applied-ai-018 commited on
Commit
8acb2b4
·
verified ·
1 Parent(s): d515c7b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__init__.py +21 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/composable_quantizer.py +78 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/embedding_quantizer.py +96 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/quantizer.py +158 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/x86_inductor_quantizer.py +1016 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer.py +453 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py +1032 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h +351 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h +181 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h +84 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h +685 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h +200 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h +241 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h +16 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h +15 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h +12 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +16 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h +575 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h +33 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h +7 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +68 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h +40 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h +55 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +457 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +47 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h +12 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h +857 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +412 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h +220 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h +1275 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h +322 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h +201 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h +1841 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h +164 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h +84 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h +220 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h +74 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h +20 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h +39 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h +57 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h +196 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h +136 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h +53 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h +86 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h +112 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +38 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h +23 -0
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .quantizer import (
2
+ DerivedQuantizationSpec,
3
+ EdgeOrNode,
4
+ FixedQParamsQuantizationSpec,
5
+ QuantizationAnnotation,
6
+ QuantizationSpec,
7
+ QuantizationSpecBase,
8
+ Quantizer,
9
+ SharedQuantizationSpec,
10
+ )
11
+
12
+ __all__ = [
13
+ "EdgeOrNode",
14
+ "Quantizer",
15
+ "QuantizationSpecBase",
16
+ "QuantizationSpec",
17
+ "FixedQParamsQuantizationSpec",
18
+ "SharedQuantizationSpec",
19
+ "DerivedQuantizationSpec",
20
+ "QuantizationAnnotation",
21
+ ]
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (526 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/composable_quantizer.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List
4
+
5
+ import torch
6
+
7
+ from torch.fx import Node
8
+
9
+ from .quantizer import QuantizationAnnotation, Quantizer
10
+
11
+ __all__ = [
12
+ "ComposableQuantizer",
13
+ ]
14
+
15
+
16
+ class ComposableQuantizer(Quantizer):
17
+ """
18
+ ComposableQuantizer allows users to combine more than one quantizer into a single quantizer.
19
+ This allows users to quantize a model with multiple quantizers. E.g., embedding quantization
20
+ maybe supported by one quantizer while linear layers and other ops might be supported by another
21
+ quantizer.
22
+
23
+ ComposableQuantizer is initialized with a list of `Quantizer` instances.
24
+ The order of the composition matters since that is the order in which the quantizers will be
25
+ applies.
26
+ Example:
27
+ ```
28
+ embedding_quantizer = EmbeddingQuantizer()
29
+ linear_quantizer = MyLinearQuantizer()
30
+ xnnpack_quantizer = XNNPackQuantizer() # to handle ops not quantized by previous two quantizers
31
+ composed_quantizer = ComposableQuantizer([embedding_quantizer, linear_quantizer, xnnpack_quantizer])
32
+ prepared_m = prepare_pt2e(model, composed_quantizer)
33
+ ```
34
+ """
35
+
36
+ def __init__(self, quantizers: List[Quantizer]):
37
+ super().__init__()
38
+ self.quantizers = quantizers
39
+ self._graph_annotations: Dict[Node, QuantizationAnnotation] = {}
40
+
41
+ def _record_and_validate_annotations(
42
+ self, gm: torch.fx.GraphModule, quantizer: Quantizer
43
+ ) -> None:
44
+ for n in gm.graph.nodes:
45
+ if "quantization_annotation" in n.meta:
46
+ # check if the annotation has been changed by
47
+ # comparing QuantizationAnnotation object id
48
+ if n in self._graph_annotations and (
49
+ id(self._graph_annotations[n])
50
+ != id(n.meta["quantization_annotation"])
51
+ ):
52
+ raise RuntimeError(
53
+ f"Quantizer {quantizer.__class__.__name__} has changed annotations on node {n}"
54
+ )
55
+ else:
56
+ self._graph_annotations[n] = n.meta["quantization_annotation"]
57
+ else:
58
+ if n in self._graph_annotations:
59
+ raise RuntimeError(
60
+ f"Quantizer {quantizer.__class__.__name__} has removed annotations on node {n}"
61
+ )
62
+
63
+ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
64
+ """just handling global spec for now"""
65
+ for quantizer in self.quantizers:
66
+ quantizer.annotate(model)
67
+ self._record_and_validate_annotations(model, quantizer)
68
+ return model
69
+
70
+ def transform_for_annotation(
71
+ self, model: torch.fx.GraphModule
72
+ ) -> torch.fx.GraphModule:
73
+ for quantizer in self.quantizers:
74
+ model = quantizer.transform_for_annotation(model)
75
+ return model
76
+
77
+ def validate(self, model: torch.fx.GraphModule) -> None:
78
+ pass
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/embedding_quantizer.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from typing import List, Set
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch.ao.quantization.observer import PerChannelMinMaxObserver
9
+ from torch.ao.quantization.quantizer.quantizer import (
10
+ QuantizationAnnotation,
11
+ QuantizationSpec,
12
+ Quantizer,
13
+ )
14
+ from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import (
15
+ OperatorConfig,
16
+ OperatorPatternType,
17
+ QuantizationConfig,
18
+ )
19
+
20
+ __all__ = [
21
+ "get_embedding_operators_config",
22
+ "EmbeddingQuantizer",
23
+ ]
24
+
25
+
26
+ def get_embedding_operators_config() -> OperatorConfig:
27
+ weight_quantization_spec = QuantizationSpec(
28
+ dtype=torch.uint8,
29
+ qscheme=torch.per_channel_affine_float_qparams,
30
+ ch_axis=0,
31
+ observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(eps=2**-12),
32
+ )
33
+ quantization_config = QuantizationConfig(None, None, weight_quantization_spec, None)
34
+ ops: List[OperatorPatternType] = [[torch.nn.Embedding]]
35
+ ops.append([F.embedding])
36
+ supported_config_and_operators = OperatorConfig(
37
+ config=quantization_config, operators=ops
38
+ )
39
+ return copy.deepcopy(supported_config_and_operators)
40
+
41
+
42
+ class EmbeddingQuantizer(Quantizer):
43
+ def __init__(self):
44
+ super().__init__()
45
+
46
+ @classmethod
47
+ def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
48
+ op_configs: Set[QuantizationConfig] = set({})
49
+ for spec, _ in cls.get_supported_operators():
50
+ op_configs.add(spec)
51
+ return list(op_configs)
52
+
53
+ @classmethod
54
+ def get_supported_operator_for_quantization_config(
55
+ cls, quantization_config: QuantizationConfig
56
+ ) -> List[OperatorPatternType]:
57
+ for config, ops in cls.get_supported_operators():
58
+ # note: this assumes each entry in cls.supported_spec_and_operators
59
+ # corresponds to one spec, e.g. we don't have
60
+ # [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)]
61
+ # where the first and second entry have the same spec but did not
62
+ # merge the op list
63
+ if config == quantization_config:
64
+ return ops
65
+ return []
66
+
67
+ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
68
+ """just handling global spec for now"""
69
+ self._annotate_embedding_ops(model.graph)
70
+ return model
71
+
72
+ def _annotate_embedding_ops(self, graph: torch.fx.Graph) -> None:
73
+ embedding_config: OperatorConfig = get_embedding_operators_config()
74
+ for node in graph.nodes:
75
+ # Keep node parsing based annotations instead of module partitioners
76
+ # just as an example of alternate ways of annotating
77
+ if (
78
+ node.op == "call_function"
79
+ and node.target == torch.ops.aten.embedding.default
80
+ ):
81
+ if embedding_config.config.weight is None:
82
+ raise ValueError(
83
+ "Embedding config must have a valid weight quantization spec."
84
+ )
85
+ node.meta["quantization_annotation"] = QuantizationAnnotation(
86
+ input_qspec_map={
87
+ node.args[0]: embedding_config.config.weight,
88
+ }
89
+ )
90
+
91
+ def validate(self, model: torch.fx.GraphModule) -> None:
92
+ pass
93
+
94
+ @classmethod
95
+ def get_supported_operators(cls) -> List[OperatorConfig]:
96
+ return [get_embedding_operators_config()]
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/quantizer.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from dataclasses import dataclass, field
3
+ from typing import Callable, Dict, List, Optional, Tuple, Union
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ from torch.ao.quantization import ObserverOrFakeQuantize
8
+ from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor
9
+ from torch.fx import Node
10
+
11
+ __all__ = [
12
+ "Quantizer",
13
+ "QuantizationSpecBase",
14
+ "QuantizationSpec",
15
+ "FixedQParamsQuantizationSpec",
16
+ "EdgeOrNode",
17
+ "SharedQuantizationSpec",
18
+ "DerivedQuantizationSpec",
19
+ "QuantizationAnnotation",
20
+ ]
21
+
22
+
23
+ class QuantizationSpecBase(ABC): # noqa: B024
24
+ """Base class for different types of quantization specs that allows users to
25
+ specify how to quantize a Tensor (input/output of a Node) in the model
26
+ """
27
+
28
+ pass
29
+
30
+
31
+ @dataclass(eq=True, frozen=True)
32
+ class QuantizationSpec(QuantizationSpecBase):
33
+ """Quantization spec for common operators that allows user to specify how to
34
+ quantize a Tensor, this includes dtype, quant_min, quant_max etc.
35
+ """
36
+
37
+ dtype: torch.dtype
38
+ # observer or fake_quantize constructor such as
39
+ # MinMaxObserver, PerChannelHistogramObserver etc.
40
+ # or we can attach some custom args to them
41
+ # e.g. MinMaxObserver.with_args(eps=eps)
42
+ observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor
43
+ quant_min: Optional[int] = None
44
+ quant_max: Optional[int] = None
45
+ qscheme: Optional[torch.qscheme] = None
46
+ ch_axis: Optional[int] = None
47
+ is_dynamic: bool = False
48
+
49
+ def __post_init__(self):
50
+ # quant_min must be less than quant_max
51
+ if (
52
+ self.quant_min is not None
53
+ and self.quant_max is not None
54
+ and self.quant_min > self.quant_max
55
+ ):
56
+ raise ValueError(
57
+ f"quant_min {self.quant_min} must be <= quant_max {self.quant_max}."
58
+ )
59
+
60
+ # ch_axis must be less than the number of channels
61
+ # but no way to check here. Just check that it is not < 0.
62
+ if self.ch_axis is not None and self.ch_axis < 0:
63
+ raise ValueError("Ch_axis is < 0.")
64
+
65
+
66
+ @dataclass(eq=True, frozen=True)
67
+ class FixedQParamsQuantizationSpec(QuantizationSpecBase):
68
+ dtype: torch.dtype
69
+ scale: float
70
+ zero_point: int
71
+ quant_min: Optional[int] = None
72
+ quant_max: Optional[int] = None
73
+ qscheme: Optional[torch.qscheme] = None
74
+
75
+
76
+ """
77
+ The way we refer to other points of quantization in the graph will be either
78
+ an input edge or an output value
79
+ input edge is the connection between input node and the node consuming the input, so it's a Tuple[Node, Node]
80
+ output value is an fx Node
81
+ """
82
+ EdgeOrNode = Union[Tuple[Node, Node], Node]
83
+ EdgeOrNode.__module__ = "torch.ao.quantization.quantizer.quantizer"
84
+
85
+
86
+ @dataclass(eq=True, frozen=True)
87
+ class SharedQuantizationSpec(QuantizationSpecBase):
88
+ """
89
+ Quantization spec for the Tensors whose quantization parameters are shared with other Tensors
90
+ """
91
+
92
+ # the edge or node to share observer or fake quant instances with
93
+ edge_or_node: EdgeOrNode
94
+
95
+
96
+ @dataclass(eq=True, frozen=True)
97
+ class DerivedQuantizationSpec(QuantizationSpecBase):
98
+ """Quantization spec for the Tensors whose quantization parameters are derived from other Tensors"""
99
+
100
+ derived_from: List[EdgeOrNode]
101
+ derive_qparams_fn: Callable[[List[ObserverOrFakeQuantize]], Tuple[Tensor, Tensor]]
102
+ dtype: torch.dtype
103
+ quant_min: Optional[int] = None
104
+ quant_max: Optional[int] = None
105
+ qscheme: Optional[torch.qscheme] = None
106
+ ch_axis: Optional[int] = None
107
+
108
+
109
+ @dataclass
110
+ class QuantizationAnnotation:
111
+ """How are input arguemnt or output should be quantized,
112
+ expressed as QuantizationSpec, this corresponds to how a Tensor in the
113
+ operator Graph is observed (PTQ) or fake quantized (QAT)
114
+ """
115
+
116
+ # a map from torch.fx.Node to a type of QuantizationSpecBase
117
+ input_qspec_map: Dict[Node, Optional[QuantizationSpecBase]] = field(
118
+ default_factory=dict
119
+ )
120
+
121
+ # How the output of this node is quantized, expressed as QuantizationSpec
122
+ # TODO: change the value to QuantizationSpec in a separate PR
123
+ output_qspec: Optional[QuantizationSpecBase] = None
124
+
125
+ # For a Node: node1 and edge: (node1, node2), since they are observing the same
126
+ # Tensor, we may want to implicitly share observers, this flag allows people to
127
+ # turn off this behavior for the output of the node
128
+ allow_implicit_sharing: bool = True
129
+
130
+ # whether the node is annotated or not
131
+ _annotated: bool = False
132
+
133
+
134
+ class Quantizer(ABC):
135
+ def transform_for_annotation(
136
+ self, model: torch.fx.GraphModule
137
+ ) -> torch.fx.GraphModule:
138
+ """Allows for user defined transforms to run before annotating the graph.
139
+ This allows quantizer to allow quantizing part of the model that are otherwise not quantizable.
140
+ For example quantizer can
141
+ a) decompose a compound operator like scaled dot product attention,
142
+ into bmm and softmax if quantizer knows how to quantize bmm/softmax but not sdpa
143
+ or b) transform scalars to tensor to allow quantizing scalares.
144
+
145
+ Note: this is an optional method
146
+ """
147
+ return model
148
+
149
+ # annotate nodes in the graph with observer or fake quant constructors
150
+ # to convey the desired way of quantization
151
+ @abstractmethod
152
+ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
153
+ pass
154
+
155
+ # validate the annotated graph is supported by the backend
156
+ @abstractmethod
157
+ def validate(self, model: torch.fx.GraphModule) -> None:
158
+ pass
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/x86_inductor_quantizer.py ADDED
@@ -0,0 +1,1016 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import itertools
4
+ import operator
5
+ from dataclasses import dataclass
6
+ from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch.ao.quantization.fake_quantize import (
11
+ FakeQuantize,
12
+ FusedMovingAvgObsFakeQuantize,
13
+ )
14
+ from torch.ao.quantization.observer import (
15
+ HistogramObserver,
16
+ MovingAverageMinMaxObserver,
17
+ MovingAveragePerChannelMinMaxObserver,
18
+ PerChannelMinMaxObserver,
19
+ PlaceholderObserver,
20
+ )
21
+ from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions
22
+ from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor
23
+ from torch.ao.quantization.quantizer.quantizer import (
24
+ QuantizationAnnotation,
25
+ QuantizationSpec,
26
+ Quantizer,
27
+ SharedQuantizationSpec,
28
+ )
29
+ from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import (
30
+ _is_annotated,
31
+ get_bias_qspec,
32
+ get_input_act_qspec,
33
+ get_output_act_qspec,
34
+ get_weight_qspec,
35
+ OperatorConfig,
36
+ OperatorPatternType,
37
+ QuantizationConfig,
38
+ )
39
+ from torch.fx import Node
40
+ from torch.fx.passes.utils.source_matcher_utils import (
41
+ get_source_partitions,
42
+ SourcePartition,
43
+ )
44
+
45
+ __all__ = [
46
+ "X86InductorQuantizer",
47
+ "get_default_x86_inductor_quantization_config",
48
+ ]
49
+
50
+
51
+ @dataclass
52
+ class _X86InductorQuantizationAnnotation(QuantizationAnnotation):
53
+ # _is_output_of_quantized_pattern:
54
+ # * Node as output node of a fusion pattern.
55
+ # * The fusion pattern supports int8 data type.
56
+ # * The fusion pattern has inputs annotated to insert observer.
57
+ _is_output_of_quantized_pattern: bool = False
58
+
59
+
60
+ # Operations that:
61
+ # 1. Operations are optimized to run with int8 when int8 input provided.
62
+ # 2. Operations do not support int8 input and produce fp32 output.
63
+ int8_in_int8_out_ops_pt2e: Set = {
64
+ torch.ops.aten.max_pool2d.default,
65
+ torch.ops.aten.cat.default,
66
+ torch.ops.aten.avg_pool2d.default,
67
+ torch.ops.aten.adaptive_avg_pool2d.default,
68
+ torch.ops.aten.flatten.using_ints,
69
+ }
70
+
71
+
72
+ # Operations support the int8 data type and exclude operations such as conv and linear.
73
+ # A superset of int8_in_int8_out_ops_pt2e incorporating additional operators.
74
+ quantizable_ops_pt2e = copy.deepcopy(int8_in_int8_out_ops_pt2e)
75
+
76
+ QUANT_ANNOTATION_KEY = "quantization_annotation"
77
+
78
+
79
+ def _mark_nodes_as_annotated(nodes: List[Node]):
80
+ for node in nodes:
81
+ if node is not None:
82
+ if QUANT_ANNOTATION_KEY not in node.meta:
83
+ node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation()
84
+ node.meta[QUANT_ANNOTATION_KEY]._annotated = True
85
+
86
+
87
+ def _is_node_annotated(_node):
88
+ """
89
+ return True if the node is annotated, otherwise return False
90
+ """
91
+ return (
92
+ QUANT_ANNOTATION_KEY in _node.meta
93
+ and _node.meta[QUANT_ANNOTATION_KEY]._annotated
94
+ )
95
+
96
+
97
+ def _is_any_annotated(nodes: List[Node]):
98
+ """
99
+ Given a list of nodes (that represents an operator pattern),
100
+ check if any of the node is annotated, return True if any of the node
101
+ is annotated, otherwise return False.
102
+ """
103
+ return any(_is_node_annotated(node) for node in nodes)
104
+
105
+
106
+ def _is_all_annotated(nodes: List[Node]):
107
+ """
108
+ Given a list of nodes (that represents an operator pattern),
109
+ return True if all of the node is annotated, otherwise return False.
110
+ """
111
+ return all(_is_node_annotated(node) for node in nodes)
112
+
113
+
114
+ def _is_quantized_op_pt2e(node: torch.fx.Node):
115
+ """
116
+ Used for pt2e flow to check if the node is a quantized node:
117
+ Case1: the node has been annotated as output node of a fusion pattern.
118
+ Case2: the node has been annotated as single quantized node.
119
+ """
120
+ if not _is_any_annotated([node]):
121
+ # The node has not been annotated, directly return False
122
+ return False
123
+ quantization_annotation = node.meta.get(QUANT_ANNOTATION_KEY, None)
124
+ assert isinstance(quantization_annotation, _X86InductorQuantizationAnnotation)
125
+ return quantization_annotation._is_output_of_quantized_pattern
126
+
127
+
128
+ def _supported_quantized_operators() -> Dict[str, List[OperatorPatternType]]:
129
+ # TODO: Add more supported operators here.
130
+ supported_operators: Dict[str, List[OperatorPatternType]] = {
131
+ "conv2d": [
132
+ [torch.nn.Conv2d],
133
+ [F.conv2d],
134
+ ],
135
+ }
136
+
137
+ # Append Conv Optional(Add) Optioinal(ReLU)
138
+ conv_add_relu_options = itertools.product(
139
+ [torch.nn.Conv2d, F.conv2d],
140
+ [torch.add, operator.add, None], # add
141
+ [torch.nn.ReLU, F.relu, None], # relu
142
+ )
143
+ for conv_op, add_op, relu_op in conv_add_relu_options:
144
+ if add_op is None:
145
+ # Append Conv ReLU
146
+ supported_operators["conv2d"].append([conv_op, relu_op]) # type: ignore[list-item]
147
+ elif relu_op is None:
148
+ # Append Conv Add
149
+ supported_operators["conv2d"].append([conv_op, add_op]) # type: ignore[list-item]
150
+ else:
151
+ # Append Conv Add ReLU
152
+ supported_operators["conv2d"].append([conv_op, add_op, relu_op]) # type: ignore[list-item]
153
+
154
+ return copy.deepcopy(supported_operators)
155
+
156
+
157
+ def _get_supported_x86_inductor_config_and_operators() -> List[OperatorConfig]:
158
+ supported_config_and_operators: List[OperatorConfig] = []
159
+ for quantization_config in [
160
+ get_default_x86_inductor_quantization_config(),
161
+ ]:
162
+ ops = _supported_quantized_operators()
163
+ for pattern_list in ops.values():
164
+ supported_config_and_operators.append(
165
+ OperatorConfig(quantization_config, pattern_list)
166
+ )
167
+ return copy.deepcopy(supported_config_and_operators)
168
+
169
+
170
+ @functools.lru_cache
171
+ def get_default_x86_inductor_quantization_config(
172
+ is_qat: bool = False,
173
+ is_dynamic: bool = False,
174
+ ):
175
+ extra_args: Dict[str, Any] = {"eps": 2**-12}
176
+ if is_qat:
177
+ if is_dynamic:
178
+ act_observer_or_fake_quant_ctr = FakeQuantize
179
+ dynamic_quant_observer = MovingAverageMinMaxObserver.with_args(
180
+ averaging_constant=1
181
+ )
182
+ extra_args["observer"] = dynamic_quant_observer
183
+ else:
184
+ act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment]
185
+ else:
186
+ if is_dynamic:
187
+ act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment]
188
+ else:
189
+ act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment]
190
+
191
+ # Copy from x86 default qconfig from torch/ao/quantization/qconfig.py
192
+ act_quantization_spec = QuantizationSpec(
193
+ dtype=torch.uint8,
194
+ quant_min=0,
195
+ quant_max=255, # reduce_range=False
196
+ qscheme=torch.per_tensor_affine,
197
+ is_dynamic=is_dynamic,
198
+ observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args(
199
+ **extra_args
200
+ ),
201
+ )
202
+
203
+ weight_observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor = (
204
+ FusedMovingAvgObsFakeQuantize if is_qat else PerChannelMinMaxObserver
205
+ )
206
+
207
+ if is_qat:
208
+ # Only support per channel quant for now
209
+ extra_args["observer"] = MovingAveragePerChannelMinMaxObserver # type: ignore[dict-item]
210
+ weight_quantization_spec = QuantizationSpec(
211
+ dtype=torch.int8,
212
+ quant_min=-128,
213
+ quant_max=127,
214
+ qscheme=torch.per_channel_symmetric,
215
+ ch_axis=0, # 0 corresponding to weight shape = (oc, ic, kh, kw) of conv
216
+ is_dynamic=False,
217
+ observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr.with_args(
218
+ **extra_args
219
+ ),
220
+ )
221
+ bias_quantization_spec = None # will use placeholder observer by default
222
+ quantization_config = QuantizationConfig(
223
+ act_quantization_spec,
224
+ act_quantization_spec,
225
+ weight_quantization_spec,
226
+ bias_quantization_spec,
227
+ is_qat,
228
+ )
229
+ return quantization_config
230
+
231
+
232
+ def _get_supported_config_and_operators() -> List[OperatorConfig]:
233
+ return _get_supported_x86_inductor_config_and_operators()
234
+
235
+
236
+ class X86InductorQuantizer(Quantizer):
237
+ supported_config_and_operators = _get_supported_config_and_operators()
238
+
239
+ def __init__(self):
240
+ super().__init__()
241
+ self.global_config: QuantizationConfig = None # type: ignore[assignment]
242
+ self.operator_type_config: Dict[str, Optional[QuantizationConfig]] = {}
243
+
244
+ @classmethod
245
+ def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
246
+ op_configs: Set[QuantizationConfig] = set({})
247
+ for spec, _ in cls.supported_config_and_operators:
248
+ op_configs.add(spec)
249
+ return list(op_configs)
250
+
251
+ @classmethod
252
+ def get_supported_operator_for_quantization_config(
253
+ cls, quantization_config: Optional[QuantizationConfig]
254
+ ) -> List[OperatorPatternType]:
255
+ if quantization_config is None:
256
+ all_ops = []
257
+ for _, ops in cls.supported_config_and_operators:
258
+ all_ops.extend(ops)
259
+ return all_ops
260
+
261
+ for config, ops in cls.supported_config_and_operators:
262
+ if config == quantization_config:
263
+ return ops
264
+ return []
265
+
266
+ def set_global(self, quantization_config: QuantizationConfig):
267
+ self.global_config = quantization_config
268
+ return self
269
+
270
+ def set_config_for_operator_type(
271
+ self, operator_type: str, quantization_config: QuantizationConfig
272
+ ):
273
+ self.operator_type_config[operator_type] = quantization_config
274
+ return self
275
+
276
+ def _annotate_conv_node_helper(
277
+ self,
278
+ conv_node: torch.fx.Node,
279
+ annotate_output: bool,
280
+ quantization_config: QuantizationConfig,
281
+ ) -> None:
282
+ """Helper function to annotate the conv node"""
283
+ input_qspec_map = {}
284
+ input_node = conv_node.args[0]
285
+ assert isinstance(input_node, Node)
286
+ input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
287
+ weight_node = conv_node.args[1]
288
+ assert isinstance(weight_node, Node)
289
+ input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
290
+ bias_node = None if len(conv_node.args) == 2 else conv_node.args[2]
291
+ if isinstance(bias_node, Node):
292
+ input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
293
+ if annotate_output:
294
+ conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
295
+ input_qspec_map=input_qspec_map,
296
+ _annotated=True,
297
+ _is_output_of_quantized_pattern=True,
298
+ )
299
+ else:
300
+ conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
301
+ input_qspec_map=input_qspec_map,
302
+ _annotated=True,
303
+ )
304
+
305
+ def _annotate_linear_node_helper(
306
+ self,
307
+ linear_node: torch.fx.Node,
308
+ annotate_output: bool,
309
+ quantization_config: QuantizationConfig,
310
+ ) -> None:
311
+ """Helper function to annotate the linear node"""
312
+ input_qspec_map = {}
313
+ assert linear_node.target in (torch.ops.aten.linear.default,)
314
+ has_bias = len(linear_node.args) == 3
315
+ input_index = 0
316
+ weight_index = 1
317
+ bias_index = 2
318
+
319
+ input_node = linear_node.args[input_index]
320
+ assert isinstance(input_node, Node)
321
+ input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
322
+
323
+ weight_node = linear_node.args[weight_index]
324
+ assert isinstance(weight_node, Node)
325
+ input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
326
+
327
+ bias_node = linear_node.args[bias_index] if has_bias else None
328
+ if isinstance(bias_node, Node):
329
+ input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
330
+
331
+ if annotate_output:
332
+ linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
333
+ input_qspec_map=input_qspec_map,
334
+ _annotated=True,
335
+ _is_output_of_quantized_pattern=True,
336
+ )
337
+ else:
338
+ linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
339
+ input_qspec_map=input_qspec_map, _annotated=True
340
+ )
341
+
342
+ def _get_output_nodes_of_partitions(
343
+ self,
344
+ partition_list: List[SourcePartition],
345
+ ) -> List[torch.fx.Node]:
346
+ """Helper function to get the output node list from partition list"""
347
+ output_node_list = []
348
+ for partition in partition_list:
349
+ if len(partition.output_nodes) > 1:
350
+ raise ValueError("Input partition has more than one output node")
351
+ output_node = partition.output_nodes[0]
352
+ assert isinstance(output_node, Node)
353
+ output_node_list.append(output_node)
354
+ if len(output_node_list) != len(partition_list):
355
+ raise ValueError(
356
+ "length of output_node_list should equal to length of partition_list"
357
+ )
358
+ return output_node_list
359
+
360
+ def _get_input_idx_for_binary_node(
361
+ self,
362
+ conv_gemm_node: torch.fx.Node,
363
+ binary_node: torch.fx.Node,
364
+ ):
365
+ """Helper function to check conv_gemm and extra input node index
366
+ for binary node fused with conv_gemm.
367
+ """
368
+ conv_gemm_node_idx = None
369
+ extra_input_node_idx = None
370
+ if (binary_node.args[0].op == "call_function") and ( # type: ignore[union-attr]
371
+ binary_node.args[0] == conv_gemm_node
372
+ ):
373
+ conv_gemm_node_idx = 0
374
+ extra_input_node_idx = 1
375
+ elif (binary_node.args[1].op == "call_function") and ( # type: ignore[union-attr]
376
+ binary_node.args[1] == conv_gemm_node
377
+ ):
378
+ conv_gemm_node_idx = 1
379
+ extra_input_node_idx = 0
380
+ extra_input_node = binary_node.args[extra_input_node_idx] # type: ignore[index]
381
+ assert isinstance(extra_input_node, Node)
382
+ return conv_gemm_node_idx, extra_input_node_idx
383
+
384
+ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
385
+ """just handling global spec for now"""
386
+ if self.global_config and self.global_config.input_activation.is_dynamic: # type: ignore[union-attr]
387
+ model = self._annotate_for_dynamic_quantization_config(model)
388
+ else:
389
+ model = self._annotate_for_static_quantization_config(model)
390
+ return model
391
+
392
+ def _annotate_for_static_quantization_config(
393
+ self, model: torch.fx.GraphModule
394
+ ) -> torch.fx.GraphModule:
395
+ r"""
396
+ High-level description of quantization recipe for X86 Inductor Backend:
397
+ Step 1: Apply quantization recipe for fusion patterns of conv/linear to enable int8 data type actively.
398
+ Step 2: Propagate quantization annotation for patterns besides conv/linear. Go through the pattern in model
399
+ from start to the end. If a pattern supports computation with int8 data type and inputs connected to
400
+ quantized patterns, annotate its inputs as quantized pattern.
401
+ Step 3: Since in step 2, we only annotate the inputs of quantized pattern. For some quantized patterns,
402
+ such as maxpool2d, which only supports output with int8 data type when the input is with int8 data type,
403
+ we need to annotate the output of this pattern.
404
+ """
405
+
406
+ config = self.global_config
407
+
408
+ # Step1: Recipe of fusion patterns like conv/linear.
409
+ if config.is_qat:
410
+ # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat
411
+ self._annotate_qat_conv2d_fusion_pattern(model, config)
412
+
413
+ self._annotate_conv2d_fusion_pattern(model, config)
414
+
415
+ # Step2: Recipe to propagate annotation for patterns beside conv/linear.
416
+ # Go through all the nodes from start to end.
417
+ # Recipe refer to https://github.com/intel/intel-extension-for-pytorch/blob/
418
+ # 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L538
419
+ for node in model.graph.nodes:
420
+ self._annotation_propagation_quantizable_pattern(node, config)
421
+
422
+ # Step3: For quantizable ops, such as maxpool2d, we need to quantize its output if it is quantized
423
+ # in inputs. So, we can fuse dq-operator-q into a quantized op.
424
+ # Refer to https://github.com/intel/intel-extension-for-pytorch/blob/
425
+ # 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L487
426
+ for node in model.graph.nodes:
427
+ self._annotate_output_for_int8_in_int8_out_pattern(node, config)
428
+
429
+ return model
430
+
431
+ def _annotate_for_dynamic_quantization_config(
432
+ self, model: torch.fx.GraphModule
433
+ ) -> torch.fx.GraphModule:
434
+ config = self.global_config
435
+ self._annotate_linear(model, config)
436
+ return model
437
+
438
+ def _annotate_qat_conv2d_fusion_pattern(
439
+ self, model: torch.fx.GraphModule, config: QuantizationConfig
440
+ ):
441
+ # Annotate QAT Specific patterns
442
+ self._annotate_qat_conv2d_bn_binary_unary(model, config)
443
+ self._annotate_qat_conv2d_bn_binary(model, config)
444
+ self._annotate_qat_conv2d_bn_unary(model, config)
445
+ self._annotate_qat_conv2d_bn(model, config)
446
+
447
+ def _annotate_qat_conv2d_bn_binary_unary(
448
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
449
+ ) -> None:
450
+ fused_partitions = find_sequential_partitions(
451
+ gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d, operator.add, torch.nn.ReLU]
452
+ )
453
+ for fused_partition in fused_partitions:
454
+ (
455
+ conv_partition,
456
+ bn_partition,
457
+ binary_partition,
458
+ unary_partition,
459
+ ) = fused_partition
460
+
461
+ (
462
+ conv_node,
463
+ bn_output_node,
464
+ binary_node,
465
+ unary_node,
466
+ ) = self._get_output_nodes_of_partitions(
467
+ [conv_partition, bn_partition, binary_partition, unary_partition]
468
+ )
469
+ if len(bn_output_node.users) != 1:
470
+ # Conv BN pattern should only has 1 user.
471
+ continue
472
+ (
473
+ bn_output_node_idx,
474
+ extra_input_node_idx,
475
+ ) = self._get_input_idx_for_binary_node(bn_output_node, binary_node)
476
+ if (bn_output_node_idx is None) or (extra_input_node_idx is None):
477
+ continue
478
+ if bn_output_node != binary_node.args[bn_output_node_idx]:
479
+ raise ValueError(f"{bn_output_node} doesn't match input of binary node")
480
+ extra_input_node = binary_node.args[extra_input_node_idx]
481
+
482
+ if (
483
+ conv_node.op != "call_function"
484
+ or conv_node.target != torch.ops.aten.conv2d.default
485
+ ):
486
+ continue
487
+
488
+ if _is_annotated([unary_node, binary_node, bn_output_node, conv_node]):
489
+ continue
490
+
491
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
492
+
493
+ binary_node_input_qspec_map = {}
494
+ binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec(
495
+ quantization_config
496
+ )
497
+ binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
498
+ input_qspec_map=binary_node_input_qspec_map,
499
+ _annotated=True,
500
+ )
501
+ unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
502
+ # TODO<leslie> Remove the annotate of output in QAT when qat util support pattern matcher.
503
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
504
+ _annotated=True,
505
+ _is_output_of_quantized_pattern=True,
506
+ )
507
+ nodes_to_mark_annotated = list(conv_partition.nodes)
508
+ nodes_to_mark_annotated.extend(list(bn_partition.nodes))
509
+ nodes_to_mark_annotated.extend(list(binary_partition.nodes))
510
+ nodes_to_mark_annotated.extend(list(unary_partition.nodes))
511
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
512
+
513
+ def _annotate_qat_conv2d_bn_binary(
514
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
515
+ ) -> None:
516
+ fused_partitions = find_sequential_partitions(
517
+ gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d, operator.add]
518
+ )
519
+ for fused_partition in fused_partitions:
520
+ conv_partition, bn_partition, binary_partition = fused_partition
521
+ (
522
+ conv_node,
523
+ bn_output_node,
524
+ binary_node,
525
+ ) = self._get_output_nodes_of_partitions(
526
+ [conv_partition, bn_partition, binary_partition]
527
+ )
528
+ if len(bn_output_node.users) != 1:
529
+ # Conv BN pattern should only has 1 user.
530
+ continue
531
+ (
532
+ bn_output_node_idx,
533
+ extra_input_node_idx,
534
+ ) = self._get_input_idx_for_binary_node(bn_output_node, binary_node)
535
+ if (bn_output_node_idx is None) or (extra_input_node_idx is None):
536
+ continue
537
+ if bn_output_node != binary_node.args[bn_output_node_idx]:
538
+ raise ValueError(f"{bn_output_node} doesn't match input of binary node")
539
+
540
+ extra_input_node = binary_node.args[extra_input_node_idx]
541
+
542
+ if (
543
+ conv_node.op != "call_function"
544
+ or conv_node.target != torch.ops.aten.conv2d.default
545
+ ):
546
+ continue
547
+
548
+ if _is_annotated([binary_node, bn_output_node, conv_node]):
549
+ continue
550
+
551
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
552
+
553
+ binary_node_input_qspec_map = {}
554
+ binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec(
555
+ quantization_config
556
+ )
557
+ binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
558
+ input_qspec_map=binary_node_input_qspec_map,
559
+ # TODO<leslie> Remove the annotate of output in QAT when qat util support pattern matcher.
560
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
561
+ _annotated=True,
562
+ _is_output_of_quantized_pattern=True,
563
+ )
564
+ nodes_to_mark_annotated = list(conv_partition.nodes)
565
+ nodes_to_mark_annotated.extend(list(bn_partition.nodes))
566
+ nodes_to_mark_annotated.extend(list(binary_partition.nodes))
567
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
568
+
569
+ def _annotate_qat_conv2d_bn_unary(
570
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
571
+ ) -> None:
572
+ fused_partitions = []
573
+ unary_patterns = [
574
+ [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU],
575
+ [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Hardtanh],
576
+ [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Hardswish],
577
+ [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU6],
578
+ ]
579
+ for unary_pattern in unary_patterns:
580
+ partitions = find_sequential_partitions(gm, unary_pattern)
581
+ if partitions:
582
+ # Extend the fused_partitions if partitions is not empty
583
+ fused_partitions.extend(partitions)
584
+
585
+ for fused_partition in fused_partitions:
586
+ conv_partition, bn_partition, unary_partition = fused_partition
587
+ (
588
+ conv_node,
589
+ bn_output_node,
590
+ unary_node,
591
+ ) = self._get_output_nodes_of_partitions(
592
+ [conv_partition, bn_partition, unary_partition]
593
+ )
594
+
595
+ if (
596
+ conv_node.op != "call_function"
597
+ or conv_node.target != torch.ops.aten.conv2d.default
598
+ ):
599
+ continue
600
+
601
+ if _is_annotated([unary_node, bn_output_node, conv_node]):
602
+ continue
603
+
604
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
605
+ unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
606
+ # TODO<leslie> Remove the annotate of output in QAT when qat util support pattern matcher.
607
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
608
+ _annotated=True,
609
+ _is_output_of_quantized_pattern=True,
610
+ )
611
+ nodes_to_mark_annotated = list(conv_partition.nodes)
612
+ nodes_to_mark_annotated.extend(list(bn_partition.nodes))
613
+ nodes_to_mark_annotated.extend(list(unary_partition.nodes))
614
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
615
+
616
+ def _annotate_qat_conv2d_bn(
617
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
618
+ ) -> None:
619
+ fused_partitions = find_sequential_partitions(
620
+ gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d]
621
+ )
622
+ for fused_partition in fused_partitions:
623
+ conv_partition, bn_partition = fused_partition
624
+ conv_node, bn_output_node = self._get_output_nodes_of_partitions(
625
+ [conv_partition, bn_partition]
626
+ )
627
+
628
+ if (
629
+ conv_node.op != "call_function"
630
+ or conv_node.target != torch.ops.aten.conv2d.default
631
+ ):
632
+ continue
633
+
634
+ if _is_annotated([bn_output_node, conv_node]):
635
+ continue
636
+
637
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
638
+ bn_output_node.meta[
639
+ QUANT_ANNOTATION_KEY
640
+ ] = _X86InductorQuantizationAnnotation(
641
+ # TODO<leslie> Remove the annotate of output in QAT when qat util support pattern matcher.
642
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
643
+ _annotated=True,
644
+ _is_output_of_quantized_pattern=True,
645
+ )
646
+ nodes_to_mark_annotated = list(conv_partition.nodes)
647
+ nodes_to_mark_annotated.extend(list(bn_partition.nodes))
648
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
649
+
650
+ def _annotate_conv2d_fusion_pattern(
651
+ self, model: torch.fx.GraphModule, config: QuantizationConfig
652
+ ):
653
+ self._annotate_conv2d_binary_unary(model, config)
654
+ self._annotate_conv2d_binary(model, config)
655
+ self._annotate_conv2d_unary(model, config)
656
+ self._annotate_conv2d(model, config)
657
+ self._annotate_linear_unary(model, config)
658
+ self._annotate_linear(model, config)
659
+
660
+ def _annotate_conv2d_binary_unary(
661
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
662
+ ) -> None:
663
+ # Conv2d + add + unary op
664
+ fused_partitions = find_sequential_partitions(
665
+ gm, [torch.nn.Conv2d, operator.add, torch.nn.ReLU]
666
+ )
667
+ for fused_partition in fused_partitions:
668
+ conv_partition, binary_partition, unary_partition = fused_partition
669
+ conv_node, binary_node, unary_node = self._get_output_nodes_of_partitions(
670
+ [conv_partition, binary_partition, unary_partition]
671
+ )
672
+ if len(conv_node.users) != 1:
673
+ # Conv Node should only has 1 user node
674
+ continue
675
+ conv_node_idx, extra_input_node_idx = self._get_input_idx_for_binary_node(
676
+ conv_node, binary_node
677
+ )
678
+ if (conv_node_idx is None) or (extra_input_node_idx is None):
679
+ continue
680
+ if conv_node != binary_node.args[conv_node_idx]:
681
+ raise ValueError(f"{conv_node} doesn't match input of binary node")
682
+ extra_input_node = binary_node.args[extra_input_node_idx]
683
+ if (
684
+ conv_node.op != "call_function"
685
+ or conv_node.target != torch.ops.aten.conv2d.default
686
+ ):
687
+ # No conv node found to be fused with add
688
+ continue
689
+ if _is_annotated([unary_node, binary_node, conv_node]):
690
+ continue
691
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
692
+ binary_node_input_qspec_map = {}
693
+ binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec(
694
+ quantization_config
695
+ )
696
+ binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
697
+ input_qspec_map=binary_node_input_qspec_map,
698
+ _annotated=True,
699
+ )
700
+ unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
701
+ _annotated=True,
702
+ _is_output_of_quantized_pattern=True,
703
+ )
704
+
705
+ def _annotate_conv2d_binary(
706
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
707
+ ) -> None:
708
+ # Conv2d + add
709
+ fused_partitions = find_sequential_partitions(
710
+ gm, [torch.nn.Conv2d, operator.add]
711
+ )
712
+ for fused_partition in fused_partitions:
713
+ conv_partition, binary_partition = fused_partition
714
+ conv_node, binary_node = self._get_output_nodes_of_partitions(
715
+ [conv_partition, binary_partition]
716
+ )
717
+ if len(conv_node.users) != 1:
718
+ # Conv Node should only has 1 user node
719
+ continue
720
+ conv_node_idx, extra_input_node_idx = self._get_input_idx_for_binary_node(
721
+ conv_node, binary_node
722
+ )
723
+ if (conv_node_idx is None) or (extra_input_node_idx is None):
724
+ continue
725
+ if conv_node != binary_node.args[conv_node_idx]:
726
+ raise ValueError(f"{conv_node} doesn't match input of binary node")
727
+ extra_input_node = binary_node.args[extra_input_node_idx]
728
+ assert isinstance(conv_node, Node)
729
+ if (
730
+ conv_node.op != "call_function"
731
+ or conv_node.target != torch.ops.aten.conv2d.default
732
+ ):
733
+ # No conv node found to be fused with add
734
+ continue
735
+ if _is_annotated([binary_node, conv_node]):
736
+ continue
737
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
738
+ binary_node_input_qspec_map = {}
739
+ binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec(
740
+ quantization_config
741
+ )
742
+ binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
743
+ input_qspec_map=binary_node_input_qspec_map,
744
+ _annotated=True,
745
+ _is_output_of_quantized_pattern=True,
746
+ )
747
+
748
+ def _annotate_conv2d_unary(
749
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
750
+ ) -> None:
751
+ fused_partitions = []
752
+ unary_patterns = [
753
+ [torch.nn.Conv2d, torch.nn.ReLU],
754
+ [torch.nn.Conv2d, torch.nn.Hardtanh],
755
+ [torch.nn.Conv2d, torch.nn.Hardswish],
756
+ [torch.nn.Conv2d, torch.nn.ReLU6],
757
+ ]
758
+ for unary_pattern in unary_patterns:
759
+ partitions = find_sequential_partitions(gm, unary_pattern)
760
+ if partitions:
761
+ # Extend the fused_partitions if partitions is not empty
762
+ fused_partitions.extend(partitions)
763
+
764
+ for fused_partition in fused_partitions:
765
+ conv_partition, unary_partition = fused_partition
766
+ conv_node, unary_node = self._get_output_nodes_of_partitions(
767
+ [conv_partition, unary_partition]
768
+ )
769
+ if (
770
+ conv_node.op != "call_function"
771
+ or conv_node.target != torch.ops.aten.conv2d.default
772
+ ):
773
+ continue
774
+ if _is_annotated([unary_node, conv_node]):
775
+ continue
776
+ self._annotate_conv_node_helper(conv_node, False, quantization_config)
777
+ unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
778
+ _annotated=True,
779
+ _is_output_of_quantized_pattern=True,
780
+ )
781
+
782
+ def _annotate_conv2d(
783
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
784
+ ) -> None:
785
+ conv_partitions = get_source_partitions(
786
+ gm.graph, [torch.nn.Conv2d, torch.nn.functional.conv2d]
787
+ )
788
+ conv_partitions = list(itertools.chain.from_iterable(conv_partitions.values()))
789
+ for conv_partition in conv_partitions:
790
+ if len(conv_partition.output_nodes) > 1:
791
+ raise ValueError("conv partition has more than one output node")
792
+ conv_node = conv_partition.output_nodes[0]
793
+ if (
794
+ conv_node.op != "call_function"
795
+ or conv_node.target != torch.ops.aten.conv2d.default
796
+ ):
797
+ raise ValueError(f"{conv_node} is not an aten conv2d operator")
798
+ # skip annotation if it is already annotated
799
+ if _is_annotated([conv_node]):
800
+ continue
801
+ self._annotate_conv_node_helper(conv_node, True, quantization_config)
802
+
803
+ def _annotate_maxpool2d(
804
+ self, node: Node, quantization_config: QuantizationConfig
805
+ ) -> None:
806
+ if node.target is not torch.ops.aten.max_pool2d.default:
807
+ return
808
+ maxpool_node = node
809
+ if _is_any_annotated(
810
+ [
811
+ maxpool_node,
812
+ ]
813
+ ):
814
+ return
815
+ input_node = maxpool_node.args[0]
816
+ assert isinstance(input_node, Node)
817
+ input_qspec_map = {}
818
+ input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
819
+ maxpool_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
820
+ input_qspec_map=input_qspec_map,
821
+ _annotated=True,
822
+ _is_output_of_quantized_pattern=True,
823
+ )
824
+
825
+ def _annotate_cat(
826
+ self, node: Node, quantization_config: QuantizationConfig
827
+ ) -> None:
828
+ cat_node = node
829
+ input_nodes = cat_node.args[0]
830
+ assert isinstance(input_nodes, Sequence)
831
+ first_input_node = input_nodes[0]
832
+ input_qspec_map = {}
833
+ assert isinstance(first_input_node, Node)
834
+ assert isinstance(cat_node, Node)
835
+ input_qspec_map[first_input_node] = get_input_act_qspec(quantization_config)
836
+ share_qparams_with_input_act0_qspec = SharedQuantizationSpec(
837
+ (first_input_node, cat_node)
838
+ )
839
+
840
+ for input_node in input_nodes[1:]:
841
+ if input_node not in input_qspec_map:
842
+ # There has the case of cat same nodes: torch.cat([input0, input0], 1)
843
+ assert isinstance(input_node, Node)
844
+ input_qspec_map[input_node] = share_qparams_with_input_act0_qspec
845
+
846
+ cat_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
847
+ input_qspec_map=input_qspec_map,
848
+ _annotated=True,
849
+ _is_output_of_quantized_pattern=True,
850
+ )
851
+
852
+ def _annotation_propagation_quantizable_pattern(
853
+ self, node: Node, quantization_config: QuantizationConfig
854
+ ) -> None:
855
+ # Propagate annotation to quantizable patterns.
856
+ if (
857
+ (node.target in quantizable_ops_pt2e)
858
+ and (not _is_any_annotated([node]))
859
+ and (node.op == "call_function")
860
+ ):
861
+
862
+ def is_all_inputs_connected_to_quantized_op(input_nodes):
863
+ # Ensure all the inputs connect to fusion pattern or quantized node
864
+ for input_node in input_nodes:
865
+ if not _is_quantized_op_pt2e(input_node):
866
+ return False
867
+ return True
868
+
869
+ if node.target is torch.ops.aten.max_pool2d.default:
870
+ # Recipe of maxpool2d: check input arg[0] of maxpool2d is quantized or not
871
+ input_nodes_to_check = [node.all_input_nodes[0]]
872
+ if not is_all_inputs_connected_to_quantized_op(input_nodes_to_check):
873
+ return
874
+ self._annotate_maxpool2d(node, quantization_config)
875
+ return
876
+ elif node.target is torch.ops.aten.cat.default:
877
+ input_nodes_to_check = node.all_input_nodes
878
+ if not is_all_inputs_connected_to_quantized_op(input_nodes_to_check):
879
+ return
880
+ self._annotate_cat(node, quantization_config)
881
+ else:
882
+ input_node = node.all_input_nodes[0]
883
+ if not is_all_inputs_connected_to_quantized_op(
884
+ [
885
+ input_node,
886
+ ]
887
+ ):
888
+ return
889
+ input_qspec_map = {}
890
+ input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
891
+ node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
892
+ input_qspec_map=input_qspec_map,
893
+ _annotated=True,
894
+ _is_output_of_quantized_pattern=True,
895
+ )
896
+ return
897
+
898
+ def _annotate_output_share_observer_as_input(
899
+ self, input_node: Node, source_node: Node
900
+ ):
901
+ source_node_quantization_annotation = (
902
+ source_node.meta[QUANT_ANNOTATION_KEY]
903
+ if QUANT_ANNOTATION_KEY in source_node.meta
904
+ else None
905
+ )
906
+ if (
907
+ source_node_quantization_annotation
908
+ and source_node_quantization_annotation._is_output_of_quantized_pattern
909
+ ):
910
+ edge_or_node = (input_node, source_node)
911
+ source_node_quantization_annotation.output_qspec = SharedQuantizationSpec(
912
+ edge_or_node
913
+ )
914
+ return
915
+
916
+ def _annotate_output_for_int8_in_int8_out_pattern(
917
+ self, node: Node, quantization_config: QuantizationConfig
918
+ ) -> None:
919
+ r"""
920
+ Check and insert observer at output of node in int8_in_int8_out_ops_pt2e if needed.
921
+ Recipe refers to https://github.com/intel/intel-extension-for-pytorch/blob/
922
+ 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_utils.py#L495
923
+ """
924
+ edge_or_node: Tuple[Node, Node]
925
+ if (node.target in int8_in_int8_out_ops_pt2e) and (_is_any_annotated([node])):
926
+ if node.target == torch.ops.aten.max_pool2d.default:
927
+ maxpool_node = node
928
+ if not _is_all_annotated(
929
+ [
930
+ maxpool_node,
931
+ ]
932
+ ):
933
+ return
934
+ # Get the quantization_annotation from getitem_node
935
+ maxpool_node_quantization_annotation = (
936
+ maxpool_node.meta[QUANT_ANNOTATION_KEY]
937
+ if QUANT_ANNOTATION_KEY in maxpool_node.meta
938
+ else None
939
+ )
940
+ if (
941
+ maxpool_node_quantization_annotation
942
+ and maxpool_node_quantization_annotation._is_output_of_quantized_pattern
943
+ ):
944
+ # Annotate the output_qspec of getitem_node
945
+ input_act = maxpool_node.args[0]
946
+ assert isinstance(input_act, Node)
947
+ assert isinstance(maxpool_node, Node)
948
+ edge_or_node = (input_act, maxpool_node)
949
+ maxpool_node_quantization_annotation.output_qspec = (
950
+ SharedQuantizationSpec(edge_or_node)
951
+ )
952
+ else:
953
+ input_node = node.all_input_nodes[0]
954
+ self._annotate_output_share_observer_as_input(input_node, node)
955
+ return
956
+
957
+ def _annotate_linear(
958
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
959
+ ) -> None:
960
+ linear_partitions = get_source_partitions(
961
+ gm.graph, [torch.nn.Linear, torch.nn.functional.linear]
962
+ )
963
+ linear_partitions = list(
964
+ itertools.chain.from_iterable(linear_partitions.values())
965
+ )
966
+ for partition in linear_partitions:
967
+ if len(partition.output_nodes) > 1:
968
+ raise ValueError(
969
+ "Linear partition cannot have more than one output node"
970
+ )
971
+ linear_node = partition.output_nodes[0]
972
+ if linear_node.op != "call_function" or linear_node.target not in (
973
+ torch.ops.aten.linear.default,
974
+ ):
975
+ raise ValueError(f"{linear_node} is not an aten linear operator")
976
+ # skip annotation if it is already annotated
977
+ if _is_annotated([linear_node]):
978
+ continue
979
+ self._annotate_linear_node_helper(linear_node, True, quantization_config)
980
+
981
+ def _annotate_linear_unary(
982
+ self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
983
+ ) -> None:
984
+ postop_list = [
985
+ torch.nn.ReLU,
986
+ torch.nn.LeakyReLU,
987
+ torch.nn.Tanh,
988
+ ]
989
+ fused_partitions: List[tuple] = []
990
+ for postop in postop_list:
991
+ fused_partitions = fused_partitions + find_sequential_partitions(
992
+ gm, [torch.nn.Linear, postop]
993
+ )
994
+ for fused_partition in fused_partitions:
995
+ linear_partition, unary_partition = fused_partition
996
+ linear_node, unary_node = self._get_output_nodes_of_partitions(
997
+ [linear_partition, unary_partition]
998
+ )
999
+ if linear_node.op != "call_function" or linear_node.target not in (
1000
+ torch.ops.aten.linear.default,
1001
+ ):
1002
+ continue
1003
+ if _is_annotated([unary_node, linear_node]):
1004
+ continue
1005
+ self._annotate_linear_node_helper(linear_node, False, quantization_config)
1006
+ unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation(
1007
+ _annotated=True,
1008
+ _is_output_of_quantized_pattern=True,
1009
+ )
1010
+
1011
+ def validate(self, model: torch.fx.GraphModule) -> None:
1012
+ pass
1013
+
1014
+ @classmethod
1015
+ def get_supported_operators(cls) -> List[OperatorConfig]:
1016
+ return cls.supported_config_and_operators
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ import functools
5
+
6
+ from typing import Any, Callable, Dict, List, Optional, Set
7
+
8
+ import torch
9
+ import torch._dynamo as torchdynamo
10
+ import torch.nn.functional as F
11
+ from torch.ao.quantization.fake_quantize import (
12
+ FakeQuantize,
13
+ FusedMovingAvgObsFakeQuantize,
14
+ )
15
+ from torch.ao.quantization.observer import (
16
+ HistogramObserver,
17
+ MinMaxObserver,
18
+ MovingAverageMinMaxObserver,
19
+ MovingAveragePerChannelMinMaxObserver,
20
+ PerChannelMinMaxObserver,
21
+ PlaceholderObserver,
22
+ )
23
+
24
+ from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor
25
+
26
+ from torch.ao.quantization.quantizer import QuantizationSpec, Quantizer
27
+
28
+ from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import (
29
+ _convert_scalars_to_attrs,
30
+ OP_TO_ANNOTATOR,
31
+ OperatorConfig,
32
+ OperatorPatternType,
33
+ propagate_annotation,
34
+ QuantizationConfig,
35
+ )
36
+
37
+ from torch.fx import Node
38
+
39
+
40
+ __all__ = [
41
+ "XNNPACKQuantizer",
42
+ "get_symmetric_quantization_config",
43
+ ]
44
+
45
+
46
+ def _get_dynamo_graph(function: Callable, inputs) -> torch.fx.Graph:
47
+ gm, _ = torchdynamo.export(function, aten_graph=True)(*inputs)
48
+ gm.graph.eliminate_dead_code()
49
+ return gm.graph
50
+
51
+
52
+ def _get_linear_patterns(input_size: List[int]):
53
+ in_channels = input_size[-1]
54
+ out_channels = 8 # hard coding but this should not matter
55
+ weight = torch.ones((out_channels, in_channels))
56
+ bias = torch.ones((out_channels,))
57
+ act = torch.ones(input_size)
58
+
59
+ def linear_op(act, weight, bias=None):
60
+ return F.linear(act, weight, bias)
61
+
62
+ pattern_w_bias = _get_dynamo_graph(linear_op, (act, weight, bias))
63
+ pattern_wo_bias = _get_dynamo_graph(linear_op, (act, weight))
64
+ return [pattern_w_bias, pattern_wo_bias]
65
+
66
+
67
+ def _supported_symmetric_quantized_operators() -> Dict[str, List[OperatorPatternType]]:
68
+ supported_operators: Dict[str, List[OperatorPatternType]] = {
69
+ # Both conv and linear should be able to handle relu + hardtanh fusion since
70
+ # those are clamp ops
71
+ "conv2d": [
72
+ [torch.nn.Conv2d, torch.nn.ReLU],
73
+ [torch.nn.Conv2d, F.relu],
74
+ [F.conv2d, torch.nn.ReLU],
75
+ [F.conv2d, F.relu],
76
+ ],
77
+ "linear": [[torch.nn.Linear], [F.linear]],
78
+ "add": [[torch.add]],
79
+ "max_pool2d": [[torch.nn.MaxPool2d], [F.max_pool2d]],
80
+ "adaptive_avg_pool2d": [
81
+ [torch.nn.AdaptiveAvgPool2d],
82
+ [F.adaptive_avg_pool2d],
83
+ ],
84
+ }
85
+ return copy.deepcopy(supported_operators)
86
+
87
+
88
+ def _get_supported_symmetric_config_and_operators() -> List[OperatorConfig]:
89
+ supported_config_and_operators: List[OperatorConfig] = []
90
+ for quantization_config in [
91
+ get_symmetric_quantization_config(),
92
+ get_symmetric_quantization_config(is_qat=True),
93
+ get_symmetric_quantization_config(is_per_channel=True),
94
+ get_symmetric_quantization_config(is_per_channel=True, is_qat=True),
95
+ ]:
96
+ ops = _supported_symmetric_quantized_operators()
97
+ for pattern_list in ops.values():
98
+ supported_config_and_operators.append(
99
+ OperatorConfig(quantization_config, pattern_list)
100
+ )
101
+ return copy.deepcopy(supported_config_and_operators)
102
+
103
+
104
+ @functools.lru_cache
105
+ def get_symmetric_quantization_config(
106
+ is_per_channel: bool = False,
107
+ is_qat: bool = False,
108
+ is_dynamic: bool = False,
109
+ act_qmin: int = -128,
110
+ act_qmax: int = 127,
111
+ weight_qmin: int = -127,
112
+ weight_qmax: int = 127,
113
+ ):
114
+ extra_args: Dict[str, Any] = {"eps": 2**-12}
115
+ if is_qat:
116
+ if is_dynamic:
117
+ act_observer_or_fake_quant_ctr = FakeQuantize
118
+ dynamic_quant_observer = MovingAverageMinMaxObserver.with_args(
119
+ averaging_constant=1
120
+ )
121
+ extra_args["observer"] = dynamic_quant_observer
122
+ else:
123
+ act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment]
124
+ else:
125
+ if is_dynamic:
126
+ act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment]
127
+ else:
128
+ act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment]
129
+
130
+ act_quantization_spec = QuantizationSpec(
131
+ dtype=torch.int8,
132
+ quant_min=act_qmin,
133
+ quant_max=act_qmax,
134
+ qscheme=torch.per_tensor_affine,
135
+ is_dynamic=is_dynamic,
136
+ observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args(
137
+ **extra_args,
138
+ ),
139
+ )
140
+ weight_qscheme = (
141
+ torch.per_channel_symmetric if is_per_channel else torch.per_tensor_symmetric
142
+ )
143
+ weight_observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor = (
144
+ MinMaxObserver
145
+ )
146
+ if is_qat:
147
+ # TODO: qat + per channel?
148
+ weight_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize
149
+ elif is_per_channel:
150
+ weight_observer_or_fake_quant_ctr = PerChannelMinMaxObserver
151
+
152
+ extra_args: Dict[str, Any] = {"eps": 2**-12}
153
+ if is_qat:
154
+ if weight_qscheme == torch.per_tensor_symmetric:
155
+ extra_args["observer"] = MovingAverageMinMaxObserver
156
+ else:
157
+ extra_args["observer"] = MovingAveragePerChannelMinMaxObserver # type: ignore[dict-item]
158
+ weight_quantization_spec = QuantizationSpec(
159
+ dtype=torch.int8,
160
+ quant_min=weight_qmin,
161
+ quant_max=weight_qmax,
162
+ qscheme=weight_qscheme,
163
+ ch_axis=0,
164
+ is_dynamic=False,
165
+ observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr.with_args(
166
+ **extra_args
167
+ ),
168
+ )
169
+
170
+ bias_quantization_spec = None
171
+ if is_dynamic:
172
+ quantization_config = QuantizationConfig(
173
+ act_quantization_spec,
174
+ None,
175
+ weight_quantization_spec,
176
+ bias_quantization_spec,
177
+ is_qat,
178
+ )
179
+ else:
180
+ quantization_config = QuantizationConfig(
181
+ act_quantization_spec,
182
+ act_quantization_spec,
183
+ weight_quantization_spec,
184
+ bias_quantization_spec,
185
+ is_qat,
186
+ )
187
+ return quantization_config
188
+
189
+
190
+ def _get_supported_config_and_operators() -> List[OperatorConfig]:
191
+ return _get_supported_symmetric_config_and_operators()
192
+
193
+
194
+ def _get_module_name_filter(module_name: str):
195
+ """Get the module_name_filter function for a given module name, the filter accepts
196
+ a node and checks if the node comes from a module that has certain module name
197
+
198
+ For example:
199
+ node: linear_op = call_function[...](...) # comes from a module with name blocks.sub.linear1
200
+
201
+
202
+ >> module_name_filter = _get_module_name_filter("blocks.sub")
203
+ >> print(module_name_filter(node))
204
+ True # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1"
205
+ """
206
+
207
+ def module_name_filter(n: Node) -> bool:
208
+ # example: {
209
+ # 'L__self___sub': ("L['self'].sub", <class '....Sub'>),
210
+ # 'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
211
+ # }
212
+ # get_attr nodes doesn't have nn_module_stack?
213
+ nn_module_stack = n.meta.get("nn_module_stack", {})
214
+ names = [n[len("L['self'].") :] for n, klass in nn_module_stack.values()]
215
+ return module_name in names
216
+
217
+ return module_name_filter
218
+
219
+
220
+ def _get_module_type_filter(tp: Callable):
221
+ """Get the module_type_filter function for a given module type, the filter accepts
222
+ a node and checks if the node comes from a module that has certain module type
223
+
224
+ For example:
225
+ node: linear_op = call_function[...](...) # comes from a module with type Block -> Sub -> Linear
226
+
227
+
228
+ >> module_type_filter = _get_module_type_filter(Sub) # submodule with type `Sub`, under the `Block` submodule
229
+ >> print(module_type_filter(node))
230
+ True # the node is from the submodule `Sub` (same for `Block` and `Linear` as well)
231
+ """
232
+
233
+ def module_type_filter(n: Node) -> bool:
234
+ # example: {
235
+ # 'L__self___sub': ("L['self'].sub", <class '....Sub'>),
236
+ # 'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
237
+ # }
238
+ nn_module_stack = n.meta.get("nn_module_stack", {})
239
+ types = [t for _, t in nn_module_stack.values()]
240
+ return tp in types
241
+
242
+ return module_type_filter
243
+
244
+
245
+ def _get_not_module_type_or_name_filter(
246
+ tp_list: List[Callable], module_name_list: List[str]
247
+ ) -> Callable[[Node], bool]:
248
+ module_type_filters = [_get_module_type_filter(tp) for tp in tp_list]
249
+ module_name_list_filters = [_get_module_name_filter(m) for m in module_name_list]
250
+
251
+ def not_module_type_or_name_filter(n: Node) -> bool:
252
+ return not any(f(n) for f in module_type_filters + module_name_list_filters)
253
+
254
+ return not_module_type_or_name_filter
255
+
256
+
257
+ class XNNPACKQuantizer(Quantizer):
258
+ supported_config_and_operators = _get_supported_config_and_operators()
259
+ STATIC_QAT_ONLY_OPS = [
260
+ "conv_bn_relu",
261
+ "conv_bn",
262
+ ]
263
+
264
+ # static quantization ops (both PTQ and QAT)
265
+ # Preserve the order that fusions come before singular ops
266
+ STATIC_OPS = [
267
+ "linear_relu",
268
+ "linear",
269
+ "conv_relu",
270
+ "conv",
271
+ "adaptive_avg_pool2d",
272
+ # TODO: move this to BoltNNQuantizer?
273
+ "gru_io_only",
274
+ "max_pool2d",
275
+ "add_relu",
276
+ "add",
277
+ "mul_relu",
278
+ "mul",
279
+ "cat",
280
+ ]
281
+
282
+ DYNAMIC_OPS = [
283
+ "linear",
284
+ ]
285
+
286
+ def __init__(self):
287
+ super().__init__()
288
+ self.global_config: Optional[QuantizationConfig] = None
289
+ self.operator_type_config: Dict[
290
+ torch._ops.OpOverloadPacket, Optional[QuantizationConfig]
291
+ ] = {}
292
+ self.module_type_config: Dict[Callable, Optional[QuantizationConfig]] = {}
293
+ self.module_name_config: Dict[str, Optional[QuantizationConfig]] = {}
294
+
295
+ @classmethod
296
+ def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
297
+ op_configs: Set[QuantizationConfig] = set({})
298
+ for spec, _ in cls.supported_config_and_operators:
299
+ op_configs.add(spec)
300
+ return list(op_configs)
301
+
302
+ @classmethod
303
+ def get_supported_operator_for_quantization_config(
304
+ cls, quantization_config: Optional[QuantizationConfig]
305
+ ) -> List[OperatorPatternType]:
306
+ if quantization_config is None:
307
+ all_ops = []
308
+ for _, ops in cls.supported_config_and_operators:
309
+ all_ops.extend(ops)
310
+ return all_ops
311
+
312
+ for config, ops in cls.supported_config_and_operators:
313
+ # note: this assumes each entry in cls.supported_spec_and_operators
314
+ # corresponds to one spec, e.g. we don't have
315
+ # [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)]
316
+ # where the first and second entry have the same spec but did not
317
+ # merge the op list
318
+ if config == quantization_config:
319
+ return ops
320
+ return []
321
+
322
+ def set_global(self, quantization_config: QuantizationConfig) -> XNNPACKQuantizer:
323
+ self.global_config = quantization_config
324
+ return self
325
+
326
+ def set_operator_type(
327
+ self,
328
+ operator_type: torch._ops.OpOverloadPacket,
329
+ quantization_config: QuantizationConfig,
330
+ ) -> XNNPACKQuantizer:
331
+ self.operator_type_config[operator_type] = quantization_config
332
+ return self
333
+
334
+ def set_module_type(
335
+ self, module_type: Callable, quantization_config: QuantizationConfig
336
+ ):
337
+ """Set quantization_config for a submodule with type: `module_type`, for example:
338
+ quantizer.set_module_name(Sub) or quantizer.set_module_name(nn.Linear), it will quantize all supported operator/operator
339
+ patterns in the submodule with this module type with the given `quantization_config`
340
+ """
341
+ self.module_type_config[module_type] = quantization_config
342
+ return self
343
+
344
+ def set_module_name(
345
+ self, module_name: str, quantization_config: Optional[QuantizationConfig]
346
+ ):
347
+ """Set quantization_config for a submodule with name: `module_name`, for example:
348
+ quantizer.set_module_name("blocks.sub"), it will quantize all supported operator/operator
349
+ patterns in the submodule with this module name with the given `quantization_config`
350
+ """
351
+ assert (
352
+ quantization_config is not None
353
+ ), " quantization_config == None is not supported yet"
354
+ self.module_name_config[module_name] = quantization_config
355
+ return self
356
+
357
+ def transform_for_annotation(
358
+ self, model: torch.fx.GraphModule
359
+ ) -> torch.fx.GraphModule:
360
+ """Transforms scalar values to tensor attributes"""
361
+ return _convert_scalars_to_attrs(model)
362
+
363
+ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
364
+ """just handling global spec for now"""
365
+ # hacked for handling dynamic linear quant. will fix later.
366
+ if self.global_config and self.global_config.input_activation.is_dynamic: # type: ignore[union-attr]
367
+ model = self._annotate_for_dynamic_quantization_config(model)
368
+ else:
369
+ model = self._annotate_for_static_quantization_config(model)
370
+ propagate_annotation(model)
371
+ return model
372
+
373
+ def _annotate_all_static_patterns(
374
+ self,
375
+ model: torch.fx.GraphModule,
376
+ quantization_config: Optional[QuantizationConfig],
377
+ filter_fn: Optional[Callable[[Node], bool]] = None,
378
+ ) -> torch.fx.GraphModule:
379
+ # TODO: implement the support for None to be canceling out previous annotations
380
+ if quantization_config is None:
381
+ return model
382
+
383
+ if quantization_config.is_qat:
384
+ for op in self.STATIC_QAT_ONLY_OPS:
385
+ OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn)
386
+ for op in self.STATIC_OPS:
387
+ OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn)
388
+ return model
389
+
390
+ def _annotate_all_dynamic_patterns(
391
+ self,
392
+ model: torch.fx.GraphModule,
393
+ quantization_config: Optional[QuantizationConfig],
394
+ filter_fn: Optional[Callable[[Node], bool]] = None,
395
+ ) -> torch.fx.GraphModule:
396
+ # TODO: implement the support for None to be canceling out previous annotations
397
+ if quantization_config is None:
398
+ return model
399
+
400
+ for op in self.DYNAMIC_OPS:
401
+ OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn)
402
+ return model
403
+
404
+ def _annotate_for_static_quantization_config(
405
+ self, model: torch.fx.GraphModule
406
+ ) -> torch.fx.GraphModule:
407
+ module_name_list = list(self.module_name_config.keys())
408
+ for module_name, config in self.module_name_config.items():
409
+ self._annotate_all_static_patterns(
410
+ model, config, _get_module_name_filter(module_name)
411
+ )
412
+
413
+ tp_list = list(self.module_type_config.keys())
414
+ for module_type, config in self.module_type_config.items():
415
+ self._annotate_all_static_patterns(
416
+ model, config, _get_module_type_filter(module_type)
417
+ )
418
+
419
+ self._annotate_all_static_patterns(
420
+ model,
421
+ self.global_config,
422
+ _get_not_module_type_or_name_filter(tp_list, module_name_list),
423
+ )
424
+ return model
425
+
426
+ def _annotate_for_dynamic_quantization_config(
427
+ self, model: torch.fx.GraphModule
428
+ ) -> torch.fx.GraphModule:
429
+ module_name_list = list(self.module_name_config.keys())
430
+ for module_name, config in self.module_name_config.items():
431
+ self._annotate_all_dynamic_patterns(
432
+ model, config, _get_module_name_filter(module_name)
433
+ )
434
+
435
+ tp_list = list(self.module_type_config.keys())
436
+ for module_type, config in self.module_type_config.items():
437
+ self._annotate_all_dynamic_patterns(
438
+ model, config, _get_module_type_filter(module_type)
439
+ )
440
+
441
+ self._annotate_all_dynamic_patterns(
442
+ model,
443
+ self.global_config,
444
+ _get_not_module_type_or_name_filter(tp_list, module_name_list),
445
+ )
446
+ return model
447
+
448
+ def validate(self, model: torch.fx.GraphModule) -> None:
449
+ pass
450
+
451
+ @classmethod
452
+ def get_supported_operators(cls) -> List[OperatorConfig]:
453
+ return cls.supported_config_and_operators
llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py ADDED
@@ -0,0 +1,1032 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import operator
3
+ from dataclasses import dataclass
4
+ from typing import Callable, Dict, List, NamedTuple, Optional
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch._subclasses import FakeTensor
9
+ from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
10
+ from torch.ao.quantization.pt2e.export_utils import _WrapperModule
11
+ from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions
12
+ from torch.ao.quantization.pt2e.utils import (
13
+ _conv1d_bn_example_inputs,
14
+ _conv2d_bn_example_inputs,
15
+ get_aten_graph_module,
16
+ )
17
+ from torch.ao.quantization.quantizer import (
18
+ QuantizationAnnotation,
19
+ QuantizationSpec,
20
+ QuantizationSpecBase,
21
+ SharedQuantizationSpec,
22
+ )
23
+
24
+ from torch.ao.quantization.quantizer.utils import (
25
+ _annotate_input_qspec_map,
26
+ _annotate_output_qspec,
27
+ )
28
+ from torch.fx import Node
29
+ from torch.fx.passes.utils.matcher_with_name_node_map_utils import (
30
+ SubgraphMatcherWithNameNodeMap,
31
+ )
32
+ from torch.fx.passes.utils.source_matcher_utils import get_source_partitions
33
+
34
+
35
+ __all__ = [
36
+ "OperatorConfig",
37
+ "OperatorPatternType",
38
+ "QuantizationConfig",
39
+ "get_input_act_qspec",
40
+ "get_output_act_qspec",
41
+ "get_weight_qspec",
42
+ "get_bias_qspec",
43
+ "OP_TO_ANNOTATOR",
44
+ "propagate_annotation",
45
+ ]
46
+
47
+
48
+ # In the absence of better name, just winging it with QuantizationConfig
49
+ @dataclass(eq=True, frozen=True)
50
+ class QuantizationConfig:
51
+ input_activation: Optional[QuantizationSpec]
52
+ output_activation: Optional[QuantizationSpec]
53
+ weight: Optional[QuantizationSpec]
54
+ bias: Optional[QuantizationSpec]
55
+ # TODO: remove, since we can use observer_or_fake_quant_ctr to express this
56
+ is_qat: bool = False
57
+
58
+
59
+ OperatorPatternType = List[Callable]
60
+ OperatorPatternType.__module__ = (
61
+ "torch.ao.quantization.quantizer.xnnpack_quantizer_utils"
62
+ )
63
+
64
+ AnnotatorType = Callable[
65
+ [
66
+ torch.fx.GraphModule,
67
+ Optional[QuantizationConfig],
68
+ Optional[Callable[[Node], bool]],
69
+ ],
70
+ Optional[List[List[Node]]],
71
+ ]
72
+ OP_TO_ANNOTATOR: Dict[str, AnnotatorType] = {}
73
+
74
+
75
+ def register_annotator(op: str):
76
+ def decorator(annotator: AnnotatorType):
77
+ OP_TO_ANNOTATOR[op] = annotator
78
+
79
+ return decorator
80
+
81
+
82
+ class OperatorConfig(NamedTuple):
83
+ # fix List[str] with List[List[Union[nn.Module, FunctionType, BuiltinFunctionType]]]
84
+ # Basically we are mapping a quantization config to some list of patterns.
85
+ # a pattern is defined as a list of nn module, function or builtin function names
86
+ # e.g. [nn.Conv2d, torch.relu, torch.add]
87
+ # We have not resolved whether fusion can be considered internal details of the
88
+ # quantizer hence it does not need communication to user.
89
+ # Note this pattern is not really informative since it does not really
90
+ # tell us the graph structure resulting from the list of ops.
91
+ config: QuantizationConfig
92
+ operators: List[OperatorPatternType]
93
+
94
+
95
+ def _is_annotated(nodes: List[Node]):
96
+ """
97
+ Given a list of nodes (that represents an operator pattern),
98
+ check if any of the node is annotated, return True if any of the node
99
+ is annotated, otherwise return False
100
+ """
101
+ annotated = False
102
+ for node in nodes:
103
+ annotated = annotated or (
104
+ "quantization_annotation" in node.meta
105
+ and node.meta["quantization_annotation"]._annotated
106
+ )
107
+ return annotated
108
+
109
+
110
+ def _mark_nodes_as_annotated(nodes: List[Node]):
111
+ for node in nodes:
112
+ if node is not None:
113
+ if "quantization_annotation" not in node.meta:
114
+ node.meta["quantization_annotation"] = QuantizationAnnotation()
115
+ node.meta["quantization_annotation"]._annotated = True
116
+
117
+
118
+ def get_input_act_qspec(quantization_config: Optional[QuantizationConfig]):
119
+ if quantization_config is None:
120
+ return None
121
+ if quantization_config.input_activation is None:
122
+ return None
123
+ quantization_spec: QuantizationSpec = quantization_config.input_activation
124
+ assert quantization_spec.qscheme in [
125
+ torch.per_tensor_affine,
126
+ torch.per_tensor_symmetric,
127
+ ]
128
+ return quantization_spec
129
+
130
+
131
+ def get_output_act_qspec(quantization_config: Optional[QuantizationConfig]):
132
+ if quantization_config is None:
133
+ return None
134
+ if quantization_config.output_activation is None:
135
+ return None
136
+ quantization_spec: QuantizationSpec = quantization_config.output_activation
137
+ assert quantization_spec.qscheme in [
138
+ torch.per_tensor_affine,
139
+ torch.per_tensor_symmetric,
140
+ ]
141
+ return quantization_spec
142
+
143
+
144
+ def get_weight_qspec(quantization_config: Optional[QuantizationConfig]):
145
+ if quantization_config is None:
146
+ return None
147
+ assert quantization_config is not None
148
+ if quantization_config.weight is None:
149
+ return None
150
+ quantization_spec: QuantizationSpec = quantization_config.weight
151
+ if quantization_spec.qscheme not in [
152
+ torch.per_tensor_symmetric,
153
+ torch.per_channel_symmetric,
154
+ ]:
155
+ raise ValueError(
156
+ f"Unsupported quantization_spec {quantization_spec} for weight"
157
+ )
158
+ return quantization_spec
159
+
160
+
161
+ def get_bias_qspec(quantization_config: Optional[QuantizationConfig]):
162
+ if quantization_config is None:
163
+ return None
164
+ assert quantization_config is not None
165
+ if quantization_config.bias is None:
166
+ return None
167
+ quantization_spec: QuantizationSpec = quantization_config.bias
168
+ assert (
169
+ quantization_spec.dtype == torch.float
170
+ ), "Only float dtype for bias is supported for bias right now"
171
+ return quantization_spec
172
+
173
+
174
+ @register_annotator("linear")
175
+ def _annotate_linear(
176
+ gm: torch.fx.GraphModule,
177
+ quantization_config: Optional[QuantizationConfig],
178
+ filter_fn: Optional[Callable[[Node], bool]] = None,
179
+ ) -> Optional[List[List[Node]]]:
180
+ annotated_partitions = []
181
+ input_act_qspec = get_input_act_qspec(quantization_config)
182
+ output_act_qspec = get_output_act_qspec(quantization_config)
183
+ weight_qspec = get_weight_qspec(quantization_config)
184
+ bias_qspec = get_bias_qspec(quantization_config)
185
+ for node in gm.graph.nodes:
186
+ if node.op != "call_function" or node.target != torch.ops.aten.linear.default:
187
+ continue
188
+ if filter_fn and not filter_fn(node):
189
+ continue
190
+ act_node = node.args[0]
191
+ weight_node = node.args[1]
192
+ bias_node = None
193
+ if len(node.args) > 2:
194
+ bias_node = node.args[2]
195
+
196
+ if _is_annotated([node]) is False: # type: ignore[list-item]
197
+ _annotate_input_qspec_map(
198
+ node,
199
+ act_node,
200
+ input_act_qspec,
201
+ )
202
+ _annotate_input_qspec_map(
203
+ node,
204
+ weight_node,
205
+ weight_qspec,
206
+ )
207
+ nodes_to_mark_annotated = [node, weight_node]
208
+ if bias_node:
209
+ _annotate_input_qspec_map(
210
+ node,
211
+ bias_node,
212
+ bias_qspec,
213
+ )
214
+ nodes_to_mark_annotated.append(bias_node)
215
+ _annotate_output_qspec(node, output_act_qspec)
216
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
217
+ annotated_partitions.append(nodes_to_mark_annotated)
218
+
219
+ return annotated_partitions
220
+
221
+
222
+ @register_annotator("linear_relu")
223
+ def _annotate_linear_relu(
224
+ gm: torch.fx.GraphModule,
225
+ quantization_config: Optional[QuantizationConfig],
226
+ filter_fn: Optional[Callable[[Node], bool]] = None,
227
+ ) -> Optional[List[List[Node]]]:
228
+ annotated_partitions = []
229
+ input_act_qspec = get_input_act_qspec(quantization_config)
230
+ output_act_qspec = get_output_act_qspec(quantization_config)
231
+ weight_qspec = get_weight_qspec(quantization_config)
232
+ bias_qspec = get_bias_qspec(quantization_config)
233
+ for node in gm.graph.nodes:
234
+ if node.op != "call_function" or node.target not in [
235
+ torch.ops.aten.relu.default,
236
+ torch.ops.aten.relu_.default,
237
+ ]:
238
+ continue
239
+ relu_node = node
240
+ maybe_linear_node = node.args[0]
241
+ if (
242
+ not isinstance(maybe_linear_node, Node)
243
+ or maybe_linear_node.op != "call_function"
244
+ or maybe_linear_node.target != torch.ops.aten.linear.default
245
+ ):
246
+ continue
247
+
248
+ linear_node = maybe_linear_node
249
+ input_qspec_map = {}
250
+ input_act = linear_node.args[0]
251
+ assert isinstance(input_act, Node)
252
+ input_qspec_map[input_act] = input_act_qspec
253
+
254
+ weight = linear_node.args[1]
255
+ assert isinstance(weight, Node)
256
+ input_qspec_map[weight] = weight_qspec
257
+
258
+ # adding weight node to the partition as well
259
+ partition = [relu_node, linear_node, weight]
260
+ bias = linear_node.args[2] if len(linear_node.args) > 2 else None
261
+ if isinstance(bias, Node):
262
+ input_qspec_map[bias] = bias_qspec
263
+ partition.append(bias)
264
+
265
+ if _is_annotated(partition):
266
+ continue
267
+
268
+ if filter_fn and any(not filter_fn(n) for n in partition):
269
+ continue
270
+
271
+ linear_node.meta["quantization_annotation"] = QuantizationAnnotation(
272
+ input_qspec_map=input_qspec_map,
273
+ _annotated=True,
274
+ )
275
+ relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
276
+ output_qspec=output_act_qspec,
277
+ _annotated=True,
278
+ )
279
+ _mark_nodes_as_annotated(partition)
280
+ annotated_partitions.append(partition)
281
+ return annotated_partitions
282
+
283
+
284
+ @register_annotator("conv")
285
+ def _annotate_conv(
286
+ gm: torch.fx.GraphModule,
287
+ quantization_config: Optional[QuantizationConfig],
288
+ filter_fn: Optional[Callable[[Node], bool]] = None,
289
+ ) -> Optional[List[List[Node]]]:
290
+ annotated_partitions = []
291
+ for n in gm.graph.nodes:
292
+ if n.op != "call_function" or n.target not in [
293
+ torch.ops.aten.conv1d.default,
294
+ torch.ops.aten.conv2d.default,
295
+ ]:
296
+ continue
297
+ conv_node = n
298
+
299
+ input_qspec_map = {}
300
+ input_act = conv_node.args[0]
301
+ assert isinstance(input_act, Node)
302
+ input_qspec_map[input_act] = get_input_act_qspec(quantization_config)
303
+
304
+ weight = conv_node.args[1]
305
+ assert isinstance(weight, Node)
306
+ input_qspec_map[weight] = get_weight_qspec(quantization_config)
307
+
308
+ # adding weight node to the partition as well
309
+ partition = [conv_node, conv_node.args[1]]
310
+
311
+ bias = conv_node.args[2] if len(conv_node.args) > 2 else None
312
+ if isinstance(bias, Node):
313
+ input_qspec_map[bias] = get_bias_qspec(quantization_config)
314
+ partition.append(bias)
315
+
316
+ if _is_annotated(partition):
317
+ continue
318
+
319
+ if filter_fn and any(not filter_fn(n) for n in partition):
320
+ continue
321
+
322
+ conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
323
+ input_qspec_map=input_qspec_map,
324
+ output_qspec=get_output_act_qspec(quantization_config),
325
+ _annotated=True,
326
+ )
327
+ _mark_nodes_as_annotated(partition)
328
+ annotated_partitions.append(partition)
329
+ return annotated_partitions
330
+
331
+
332
+ @register_annotator("conv_relu")
333
+ def _annotate_conv_relu(
334
+ gm: torch.fx.GraphModule,
335
+ quantization_config: Optional[QuantizationConfig],
336
+ filter_fn: Optional[Callable[[Node], bool]] = None,
337
+ ) -> Optional[List[List[Node]]]:
338
+ annotated_partitions = []
339
+ for n in gm.graph.nodes:
340
+ if n.op != "call_function" or n.target not in [
341
+ torch.ops.aten.relu.default,
342
+ torch.ops.aten.relu_.default,
343
+ ]:
344
+ continue
345
+ relu_node = n
346
+ maybe_conv_node = n.args[0]
347
+ if (
348
+ not isinstance(maybe_conv_node, Node)
349
+ or maybe_conv_node.op != "call_function"
350
+ or maybe_conv_node.target
351
+ not in [
352
+ torch.ops.aten.conv1d.default,
353
+ torch.ops.aten.conv2d.default,
354
+ ]
355
+ ):
356
+ continue
357
+ conv_node = maybe_conv_node
358
+
359
+ input_qspec_map = {}
360
+ input_act = conv_node.args[0]
361
+ assert isinstance(input_act, Node)
362
+ input_qspec_map[input_act] = get_input_act_qspec(quantization_config)
363
+
364
+ weight = conv_node.args[1]
365
+ assert isinstance(weight, Node)
366
+ input_qspec_map[weight] = get_weight_qspec(quantization_config)
367
+
368
+ # adding weight node to the partition as well
369
+ partition = [relu_node, conv_node, conv_node.args[1]]
370
+ bias = conv_node.args[2] if len(conv_node.args) > 2 else None
371
+ if isinstance(bias, Node):
372
+ input_qspec_map[bias] = get_bias_qspec(quantization_config)
373
+ partition.append(bias)
374
+
375
+ if _is_annotated(partition):
376
+ continue
377
+
378
+ if filter_fn and any(not filter_fn(n) for n in partition):
379
+ continue
380
+
381
+ conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
382
+ input_qspec_map=input_qspec_map, _annotated=True
383
+ )
384
+ relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
385
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
386
+ _annotated=True,
387
+ )
388
+ _mark_nodes_as_annotated(partition)
389
+ annotated_partitions.append(partition)
390
+ return annotated_partitions
391
+
392
+
393
+ @register_annotator("conv_bn")
394
+ def _annotate_conv_bn(
395
+ gm: torch.fx.GraphModule,
396
+ quantization_config: Optional[QuantizationConfig],
397
+ filter_fn: Optional[Callable[[Node], bool]] = None,
398
+ ) -> Optional[List[List[Node]]]:
399
+ """
400
+ Find conv + batchnorm parititions
401
+ Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
402
+ """
403
+ return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=False)
404
+
405
+
406
+ @register_annotator("conv_bn_relu")
407
+ def _annotate_conv_bn_relu(
408
+ gm: torch.fx.GraphModule,
409
+ quantization_config: Optional[QuantizationConfig],
410
+ filter_fn: Optional[Callable[[Node], bool]] = None,
411
+ ) -> Optional[List[List[Node]]]:
412
+ """
413
+ Find conv + batchnorm + relu parititions
414
+ Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
415
+ """
416
+ return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=True)
417
+
418
+
419
+ def _do_annotate_conv_bn(
420
+ gm: torch.fx.GraphModule,
421
+ quantization_config: Optional[QuantizationConfig],
422
+ filter_fn: Optional[Callable[[Node], bool]],
423
+ has_relu: bool,
424
+ ) -> List[List[Node]]:
425
+ """
426
+ Given a function that takes in a `conv_fn` and returns a conv-bn[-relu] pattern,
427
+ return a list of annotated partitions.
428
+
429
+ The output of the pattern must include a dictionary from string name to node
430
+ for the following names: "input", "conv", "weight", "bias", and "output".
431
+ """
432
+
433
+ def get_pattern(conv_fn: Callable, relu_is_inplace: bool):
434
+ def _conv_bn(x, conv_weight, conv_bias, bn_weight, bn_bias, bn_rm, bn_rv):
435
+ conv = conv_fn(x, conv_weight, conv_bias)
436
+ bn = F.batch_norm(conv, bn_rm, bn_rv, bn_weight, bn_bias, training=True)
437
+ if has_relu:
438
+ output = F.relu_(bn) if relu_is_inplace else F.relu(bn)
439
+ else:
440
+ output = bn
441
+ return output, {
442
+ "input": x,
443
+ "conv": conv,
444
+ "weight": conv_weight,
445
+ "bias": conv_bias,
446
+ "output": output,
447
+ }
448
+
449
+ return _WrapperModule(_conv_bn)
450
+
451
+ # Needed for matching, otherwise the matches gets filtered out due to unused
452
+ # nodes returned by batch norm
453
+ gm.graph.eliminate_dead_code()
454
+ gm.recompile()
455
+
456
+ matches = []
457
+ combinations = [
458
+ (F.conv1d, _conv1d_bn_example_inputs),
459
+ (F.conv2d, _conv2d_bn_example_inputs),
460
+ ]
461
+
462
+ # Add `is_cuda` and `relu_is_inplace` dimensions
463
+ combinations = itertools.product(
464
+ combinations,
465
+ [True, False] if torch.cuda.is_available() else [False], # is_cuda
466
+ [True, False] if has_relu else [False], # relu_is_inplace
467
+ )
468
+
469
+ # Match against all conv dimensions and cuda variants
470
+ for (conv_fn, example_inputs), is_cuda, relu_is_inplace in combinations:
471
+ pattern = get_pattern(conv_fn, relu_is_inplace)
472
+ pattern = get_aten_graph_module(pattern, example_inputs, is_cuda)
473
+ pattern.graph.eliminate_dead_code()
474
+ pattern.recompile()
475
+ matcher = SubgraphMatcherWithNameNodeMap(pattern, ignore_literals=True)
476
+ matches.extend(matcher.match(gm.graph))
477
+
478
+ # Annotate nodes returned in the matches
479
+ annotated_partitions = []
480
+ for match in matches:
481
+ name_node_map = match.name_node_map
482
+ input_node = name_node_map["input"]
483
+ conv_node = name_node_map["conv"]
484
+ weight_node = name_node_map["weight"]
485
+ bias_node = name_node_map["bias"]
486
+ output_node = name_node_map["output"]
487
+
488
+ # TODO: annotate the uses of input, weight, and bias separately instead
489
+ # of assuming they come from a single conv node. This is not possible today
490
+ # because input may have multiple users, and we can't rely on the conv node
491
+ # always being the first user. This was the case in models with skip
492
+ # connections like resnet18
493
+
494
+ # Validate conv args
495
+ if conv_node.args[0] is not input_node:
496
+ raise ValueError("Conv arg did not contain input node ", input_node)
497
+ if conv_node.args[1] is not weight_node:
498
+ raise ValueError("Conv arg did not contain weight node ", weight_node)
499
+ if len(conv_node.args) > 2 and conv_node.args[2] is not bias_node:
500
+ raise ValueError("Conv arg did not contain bias node ", bias_node)
501
+
502
+ # Skip if the partition is already annotated or is filtered out by the user
503
+ partition = [conv_node, weight_node]
504
+ if bias_node is not None:
505
+ partition.append(bias_node)
506
+ if _is_annotated(partition):
507
+ continue
508
+ if filter_fn and any(not filter_fn(n) for n in partition):
509
+ continue
510
+
511
+ # Annotate conv inputs and pattern output
512
+ input_qspec_map = {}
513
+ input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
514
+ input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
515
+ if bias_node is not None:
516
+ input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
517
+ conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
518
+ input_qspec_map=input_qspec_map,
519
+ _annotated=True,
520
+ )
521
+ output_node.meta["quantization_annotation"] = QuantizationAnnotation(
522
+ output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type]
523
+ _annotated=True,
524
+ )
525
+ _mark_nodes_as_annotated(partition)
526
+ annotated_partitions.append(partition)
527
+ return annotated_partitions
528
+
529
+
530
+ @register_annotator("gru_io_only")
531
+ def _annotate_gru_io_only(
532
+ gm: torch.fx.GraphModule,
533
+ quantization_config: Optional[QuantizationConfig],
534
+ filter_fn: Optional[Callable[[Node], bool]] = None,
535
+ ) -> Optional[List[List[Node]]]:
536
+ gru_partitions = get_source_partitions(gm.graph, [torch.nn.GRU], filter_fn)
537
+ gru_partitions = list(itertools.chain.from_iterable(gru_partitions.values()))
538
+ annotated_partitions = []
539
+ for gru_partition in gru_partitions:
540
+ annotated_partitions.append(gru_partition.nodes)
541
+ output_nodes = gru_partition.output_nodes
542
+ input_nodes = gru_partition.input_nodes
543
+ # skip annotation if it is already annotated
544
+ if _is_annotated(input_nodes + output_nodes):
545
+ continue
546
+ # inside each GRU partition, we should be able to annotate each linear
547
+ # subgraph
548
+ input_qspec_map: Dict[Node, QuantizationSpecBase] = {}
549
+ input_act = input_nodes[0]
550
+ input_act_user = next(iter(input_act.users.keys()))
551
+ assert isinstance(input_act, Node)
552
+ assert isinstance(input_act_user, Node)
553
+ input_act_user.meta["quantization_annotation"] = QuantizationAnnotation(
554
+ input_qspec_map={
555
+ input_act: get_input_act_qspec(quantization_config),
556
+ },
557
+ _annotated=True,
558
+ )
559
+
560
+ hidden_state = input_nodes[1]
561
+ hidden_state_user = next(iter(hidden_state.users.keys()))
562
+ assert isinstance(hidden_state, Node)
563
+ assert isinstance(hidden_state_user, Node)
564
+ hidden_state_user.meta["quantization_annotation"] = QuantizationAnnotation(
565
+ input_qspec_map={
566
+ hidden_state: get_input_act_qspec(quantization_config),
567
+ },
568
+ _annotated=True,
569
+ )
570
+
571
+ assert len(output_nodes) == 2, "expecting GRU to have two outputs"
572
+ for output in output_nodes:
573
+ output.meta["quantization_annotation"] = QuantizationAnnotation(
574
+ output_qspec=get_output_act_qspec(quantization_config),
575
+ _annotated=True,
576
+ )
577
+ nodes_to_mark_annotated = list(gru_partition.nodes)
578
+ _mark_nodes_as_annotated(nodes_to_mark_annotated)
579
+ return annotated_partitions
580
+
581
+
582
+ @register_annotator("max_pool2d")
583
+ def _annotate_max_pool2d(
584
+ gm: torch.fx.GraphModule,
585
+ quantization_config: Optional[QuantizationConfig],
586
+ filter_fn: Optional[Callable[[Node], bool]] = None,
587
+ ) -> Optional[List[List[Node]]]:
588
+ module_partitions = get_source_partitions(
589
+ gm.graph, [torch.nn.MaxPool2d, torch.nn.functional.max_pool2d], filter_fn
590
+ )
591
+ maxpool_partitions = list(itertools.chain.from_iterable(module_partitions.values()))
592
+ annotated_partitions = []
593
+ for maxpool_partition in maxpool_partitions:
594
+ annotated_partitions.append(maxpool_partition.nodes)
595
+ output_node = maxpool_partition.output_nodes[0]
596
+ maxpool_node = None
597
+ for n in maxpool_partition.nodes:
598
+ if n.target == torch.ops.aten.max_pool2d.default:
599
+ maxpool_node = n
600
+ assert (
601
+ maxpool_node is not None
602
+ ), "XNNPACKQuantizer only works with torch.ops.aten.max_pool2d.default, "
603
+ "please make sure you are exporting the model correctly"
604
+ if _is_annotated([output_node, maxpool_node]): # type: ignore[list-item]
605
+ continue
606
+
607
+ input_act = maxpool_node.args[0] # type: ignore[union-attr]
608
+ assert isinstance(input_act, Node)
609
+
610
+ # only annotate maxpool when the output of the input node is annotated
611
+ if (
612
+ "quantization_annotation" not in input_act.meta
613
+ or not input_act.meta["quantization_annotation"]._annotated
614
+ or input_act.meta["quantization_annotation"].output_qspec is None
615
+ ):
616
+ continue
617
+ # input and output of maxpool will share quantization parameter with input of maxpool
618
+ act_qspec = SharedQuantizationSpec(input_act)
619
+ # act_qspec = get_act_qspec(quantization_config)
620
+ maxpool_node.meta["quantization_annotation"] = QuantizationAnnotation( # type: ignore[union-attr]
621
+ input_qspec_map={
622
+ input_act: act_qspec,
623
+ },
624
+ _annotated=True,
625
+ )
626
+ output_node.meta["quantization_annotation"] = QuantizationAnnotation(
627
+ output_qspec=act_qspec,
628
+ _annotated=True,
629
+ )
630
+ return annotated_partitions
631
+
632
+
633
+ @register_annotator("adaptive_avg_pool2d")
634
+ def _annotate_adaptive_avg_pool2d(
635
+ gm: torch.fx.GraphModule,
636
+ quantization_config: Optional[QuantizationConfig],
637
+ filter_fn: Optional[Callable[[Node], bool]] = None,
638
+ ) -> Optional[List[List[Node]]]:
639
+ """Always annotate adaptive_avg_pool2d op"""
640
+ module_partitions = get_source_partitions(
641
+ gm.graph, [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], filter_fn
642
+ )
643
+ partitions = list(itertools.chain.from_iterable(module_partitions.values()))
644
+ annotated_partitions = []
645
+ for partition in partitions:
646
+ pool_node = partition.output_nodes[0]
647
+ if (
648
+ pool_node.op != "call_function"
649
+ or pool_node.target != torch.ops.aten.adaptive_avg_pool2d.default
650
+ ):
651
+ raise ValueError(f"{pool_node} is not an aten adaptive_avg_pool2d operator")
652
+
653
+ if _is_annotated([pool_node]):
654
+ continue
655
+
656
+ annotated_partitions.append(partition.nodes)
657
+ input_act = pool_node.args[0]
658
+ assert isinstance(input_act, Node)
659
+
660
+ # only annotate input output sharing operator
661
+ # when the output of the input node is annotated
662
+ if (
663
+ "quantization_annotation" not in input_act.meta
664
+ or not input_act.meta["quantization_annotation"]._annotated
665
+ or input_act.meta["quantization_annotation"].output_qspec is None
666
+ ):
667
+ input_act_qspec = get_input_act_qspec(quantization_config)
668
+ else:
669
+ input_act_qspec = SharedQuantizationSpec(input_act)
670
+
671
+ # output sharing with input
672
+ output_act_qspec = SharedQuantizationSpec((input_act, pool_node))
673
+ pool_node.meta["quantization_annotation"] = QuantizationAnnotation(
674
+ input_qspec_map={
675
+ input_act: input_act_qspec,
676
+ },
677
+ output_qspec=output_act_qspec,
678
+ _annotated=True,
679
+ )
680
+ return annotated_partitions
681
+
682
+
683
+ def _is_input_large_scalar(node: Node, gm: torch.fx.GraphModule):
684
+ """Check if input is a large scalar value. So that we can skip quantization for the node
685
+ since histc op (in HistogramObserver) only works for values up to certain upper bound
686
+ """
687
+ if node.op == "get_attr":
688
+ tensor = getattr(gm, node.target) # type: ignore[arg-type]
689
+ # torch.histc works until this upper bound
690
+ HISTC_UPPER_BOUND = 3.4028235e15
691
+ return tensor.numel() == 1 and abs(tensor.item()) > HISTC_UPPER_BOUND
692
+ return False
693
+
694
+
695
+ def _is_input_non_float_tensor(node: Node):
696
+ """Check if the input is not a float tensor, so that we can skip quantization for the node
697
+ since observers only works with float Tensors
698
+ """
699
+ if "val" not in node.meta or not isinstance(node.meta["val"], FakeTensor):
700
+ return True
701
+ return node.meta["val"].dtype != torch.float32
702
+
703
+
704
+ @register_annotator("add_relu")
705
+ def _annotate_add_relu(
706
+ gm: torch.fx.GraphModule,
707
+ quantization_config: Optional[QuantizationConfig],
708
+ filter_fn: Optional[Callable[[Node], bool]] = None,
709
+ ) -> Optional[List[List[Node]]]:
710
+ fused_partitions = find_sequential_partitions(
711
+ gm, [torch.add, torch.nn.ReLU], filter_fn=filter_fn
712
+ )
713
+ annotated_partitions = []
714
+ for fused_partition in fused_partitions:
715
+ add_partition, relu_partition = fused_partition
716
+ annotated_partitions.append(add_partition.nodes + relu_partition.nodes)
717
+ if len(relu_partition.output_nodes) > 1:
718
+ raise ValueError("Relu partition has more than one output node")
719
+ relu_node = relu_partition.output_nodes[0]
720
+ if len(add_partition.output_nodes) > 1:
721
+ raise ValueError("add partition has more than one output node")
722
+ add_node = add_partition.output_nodes[0]
723
+
724
+ if _is_annotated([relu_node, add_node]):
725
+ continue
726
+
727
+ input_act_qspec = get_input_act_qspec(quantization_config)
728
+ output_act_qspec = get_output_act_qspec(quantization_config)
729
+
730
+ input_qspec_map = {}
731
+ input_act0 = add_node.args[0]
732
+ if isinstance(input_act0, Node):
733
+ if _is_input_large_scalar(input_act0, gm):
734
+ continue
735
+ if _is_input_non_float_tensor(input_act0):
736
+ continue
737
+ input_qspec_map[input_act0] = input_act_qspec
738
+
739
+ input_act1 = add_node.args[1]
740
+ if isinstance(input_act1, Node):
741
+ if _is_input_large_scalar(input_act1, gm):
742
+ continue
743
+ if _is_input_non_float_tensor(input_act1):
744
+ continue
745
+ input_qspec_map[input_act1] = input_act_qspec
746
+
747
+ add_node.meta["quantization_annotation"] = QuantizationAnnotation(
748
+ input_qspec_map=input_qspec_map,
749
+ _annotated=True,
750
+ )
751
+ relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
752
+ output_qspec=output_act_qspec,
753
+ _annotated=True,
754
+ )
755
+ return annotated_partitions
756
+
757
+
758
+ @register_annotator("add")
759
+ def _annotate_add(
760
+ gm: torch.fx.GraphModule,
761
+ quantization_config: Optional[QuantizationConfig],
762
+ filter_fn: Optional[Callable[[Node], bool]] = None,
763
+ ) -> Optional[List[List[Node]]]:
764
+ add_partitions = get_source_partitions(
765
+ gm.graph, [operator.add, torch.add, operator.iadd], filter_fn
766
+ )
767
+ add_partitions = list(itertools.chain.from_iterable(add_partitions.values()))
768
+ annotated_partitions = []
769
+ for add_partition in add_partitions:
770
+ annotated_partitions.append(add_partition.nodes)
771
+ add_node = add_partition.output_nodes[0]
772
+ if _is_annotated([add_node]):
773
+ continue
774
+
775
+ input_act_qspec = get_input_act_qspec(quantization_config)
776
+ output_act_qspec = get_output_act_qspec(quantization_config)
777
+
778
+ input_qspec_map = {}
779
+ input_act0 = add_node.args[0]
780
+ if isinstance(input_act0, Node):
781
+ if _is_input_large_scalar(input_act0, gm):
782
+ continue
783
+ if _is_input_non_float_tensor(input_act0):
784
+ continue
785
+ input_qspec_map[input_act0] = input_act_qspec
786
+
787
+ input_act1 = add_node.args[1]
788
+ if isinstance(input_act1, Node):
789
+ if _is_input_large_scalar(input_act1, gm):
790
+ continue
791
+ if _is_input_non_float_tensor(input_act1):
792
+ continue
793
+ input_qspec_map[input_act1] = input_act_qspec
794
+
795
+ add_node.meta["quantization_annotation"] = QuantizationAnnotation(
796
+ input_qspec_map=input_qspec_map,
797
+ output_qspec=output_act_qspec,
798
+ _annotated=True,
799
+ )
800
+ return annotated_partitions
801
+
802
+
803
+ @register_annotator("mul_relu")
804
+ def _annotate_mul_relu(
805
+ gm: torch.fx.GraphModule,
806
+ quantization_config: Optional[QuantizationConfig],
807
+ filter_fn: Optional[Callable[[Node], bool]] = None,
808
+ ) -> Optional[List[List[Node]]]:
809
+ fused_partitions = find_sequential_partitions(
810
+ gm, [torch.mul, torch.nn.ReLU], filter_fn=filter_fn
811
+ )
812
+ annotated_partitions = []
813
+ for fused_partition in fused_partitions:
814
+ mul_partition, relu_partition = fused_partition
815
+ annotated_partitions.append(mul_partition.nodes + relu_partition.nodes)
816
+ if len(relu_partition.output_nodes) > 1:
817
+ raise ValueError("Relu partition has more than one output node")
818
+ relu_node = relu_partition.output_nodes[0]
819
+ if len(mul_partition.output_nodes) > 1:
820
+ raise ValueError("mul partition has more than one output node")
821
+ mul_node = mul_partition.output_nodes[0]
822
+
823
+ if _is_annotated([relu_node, mul_node]):
824
+ continue
825
+
826
+ input_act_qspec = get_input_act_qspec(quantization_config)
827
+ output_act_qspec = get_output_act_qspec(quantization_config)
828
+
829
+ input_qspec_map = {}
830
+ input_act0 = mul_node.args[0]
831
+ if isinstance(input_act0, Node):
832
+ if _is_input_large_scalar(input_act0, gm):
833
+ continue
834
+ if _is_input_non_float_tensor(input_act0):
835
+ continue
836
+ input_qspec_map[input_act0] = input_act_qspec
837
+
838
+ input_act1 = mul_node.args[1]
839
+ if isinstance(input_act1, Node):
840
+ if _is_input_large_scalar(input_act1, gm):
841
+ continue
842
+ if _is_input_non_float_tensor(input_act1):
843
+ continue
844
+ input_qspec_map[input_act1] = input_act_qspec
845
+
846
+ mul_node.meta["quantization_annotation"] = QuantizationAnnotation(
847
+ input_qspec_map=input_qspec_map,
848
+ _annotated=True,
849
+ )
850
+ relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
851
+ output_qspec=output_act_qspec,
852
+ _annotated=True,
853
+ )
854
+ return annotated_partitions
855
+
856
+
857
+ @register_annotator("mul")
858
+ def _annotate_mul(
859
+ gm: torch.fx.GraphModule,
860
+ quantization_config: Optional[QuantizationConfig],
861
+ filter_fn: Optional[Callable[[Node], bool]] = None,
862
+ ) -> Optional[List[List[Node]]]:
863
+ mul_partitions = get_source_partitions(
864
+ gm.graph, ["mul", "mul_", operator.mul, torch.mul, operator.imul], filter_fn
865
+ )
866
+ mul_partitions = list(itertools.chain.from_iterable(mul_partitions.values()))
867
+ annotated_partitions = []
868
+ for mul_partition in mul_partitions:
869
+ annotated_partitions.append(mul_partition.nodes)
870
+ mul_node = mul_partition.output_nodes[0]
871
+ if _is_annotated([mul_node]):
872
+ continue
873
+
874
+ input_act_qspec = get_input_act_qspec(quantization_config)
875
+ output_act_qspec = get_output_act_qspec(quantization_config)
876
+
877
+ input_qspec_map = {}
878
+ input_act0 = mul_node.args[0]
879
+ if isinstance(input_act0, Node):
880
+ if _is_input_large_scalar(input_act0, gm):
881
+ continue
882
+ if _is_input_non_float_tensor(input_act0):
883
+ continue
884
+ input_qspec_map[input_act0] = input_act_qspec
885
+
886
+ input_act1 = mul_node.args[1]
887
+ if isinstance(input_act1, Node):
888
+ if _is_input_large_scalar(input_act1, gm):
889
+ continue
890
+ if _is_input_non_float_tensor(input_act1):
891
+ continue
892
+ input_qspec_map[input_act1] = input_act_qspec
893
+
894
+ mul_node.meta["quantization_annotation"] = QuantizationAnnotation(
895
+ input_qspec_map=input_qspec_map,
896
+ output_qspec=output_act_qspec,
897
+ _annotated=True,
898
+ )
899
+ return annotated_partitions
900
+
901
+
902
+ # TODO: remove Optional in return type, fix annotated_partitions logic
903
+ @register_annotator("cat")
904
+ def _annotate_cat(
905
+ gm: torch.fx.GraphModule,
906
+ quantization_config: Optional[QuantizationConfig],
907
+ filter_fn: Optional[Callable[[Node], bool]] = None,
908
+ ) -> Optional[List[List[Node]]]:
909
+ cat_partitions = get_source_partitions(gm.graph, [torch.cat], filter_fn)
910
+ cat_partitions = list(itertools.chain.from_iterable(cat_partitions.values()))
911
+ annotated_partitions = []
912
+ for cat_partition in cat_partitions:
913
+ cat_node = cat_partition.output_nodes[0]
914
+ if _is_annotated([cat_node]):
915
+ continue
916
+
917
+ if cat_node.target != torch.ops.aten.cat.default:
918
+ # TODO: change this to AnnotationException
919
+ raise Exception(
920
+ f"Expected cat node: torch.ops.aten.cat.default, but found {cat_node.target}"
921
+ " please check if you are calling the correct capture API"
922
+ )
923
+
924
+ annotated_partitions.append(cat_partition.nodes)
925
+
926
+ input_act_qspec = get_input_act_qspec(quantization_config)
927
+ inputs = cat_node.args[0]
928
+
929
+ input_qspec_map = {}
930
+ input_act0 = inputs[0]
931
+ if isinstance(input_act0, Node):
932
+ input_qspec_map[input_act0] = input_act_qspec
933
+
934
+ shared_with_input0_qspec = SharedQuantizationSpec((input_act0, cat_node))
935
+ for input_act in inputs[1:]:
936
+ input_qspec_map[input_act] = shared_with_input0_qspec
937
+
938
+ output_act_qspec = shared_with_input0_qspec
939
+
940
+ cat_node.meta["quantization_annotation"] = QuantizationAnnotation(
941
+ input_qspec_map=input_qspec_map,
942
+ output_qspec=output_act_qspec,
943
+ _annotated=True,
944
+ )
945
+ return annotated_partitions
946
+
947
+
948
+ def _is_share_obs_or_fq_op(op: Callable) -> bool:
949
+ return op in [
950
+ torch.ops.aten.hardtanh.default,
951
+ torch.ops.aten.hardtanh_.default,
952
+ torch.ops.aten.mean.default,
953
+ torch.ops.aten.mean.dim,
954
+ torch.ops.aten.permute.default,
955
+ torch.ops.aten.permute_copy.default,
956
+ torch.ops.aten.squeeze.dim,
957
+ torch.ops.aten.squeeze_copy.dim,
958
+ # TODO: remove?
959
+ torch.ops.aten.adaptive_avg_pool2d.default,
960
+ torch.ops.aten.view_copy.default,
961
+ torch.ops.aten.view.default,
962
+ torch.ops.aten.slice_copy.Tensor,
963
+ torch.ops.aten.flatten.using_ints,
964
+ ]
965
+
966
+
967
+ def propagate_annotation(model: torch.fx.GraphModule) -> None:
968
+ for n in model.graph.nodes:
969
+ if n.op != "call_function" or not _is_share_obs_or_fq_op(n.target):
970
+ continue
971
+
972
+ prev_node = n.args[0]
973
+ if not isinstance(prev_node, Node):
974
+ continue
975
+
976
+ quantization_annotation = prev_node.meta.get("quantization_annotation", None)
977
+ if not quantization_annotation:
978
+ continue
979
+
980
+ output_qspec = quantization_annotation.output_qspec
981
+ if not output_qspec:
982
+ continue
983
+
984
+ # make sure current node is not annotated
985
+ if (
986
+ "quantization_annotation" in n.meta
987
+ and n.meta["quantization_annotation"]._annotated
988
+ ):
989
+ continue
990
+
991
+ shared_qspec = SharedQuantizationSpec(prev_node)
992
+ # propagate the previous output_qspec to the current node
993
+ n.meta["quantization_annotation"] = QuantizationAnnotation(
994
+ input_qspec_map={
995
+ prev_node: shared_qspec,
996
+ },
997
+ output_qspec=shared_qspec,
998
+ _annotated=True,
999
+ )
1000
+
1001
+
1002
+ # TODO: make the list of ops customizable
1003
+ def _convert_scalars_to_attrs(model: torch.fx.GraphModule) -> torch.fx.GraphModule:
1004
+ for n in model.graph.nodes:
1005
+ if n.op != "call_function" or n.target not in [
1006
+ torch.ops.aten.add.Tensor,
1007
+ torch.ops.aten.mul.Tensor,
1008
+ ]:
1009
+ continue
1010
+ args = list(n.args)
1011
+ new_args = []
1012
+ for i in range(len(args)):
1013
+ if isinstance(args[i], torch.fx.Node):
1014
+ new_args.append(args[i])
1015
+ continue
1016
+ prefix = "_tensor_constant_"
1017
+ get_new_attr_name = get_new_attr_name_with_prefix(prefix)
1018
+ tensor_constant_name = get_new_attr_name(model)
1019
+ float_tensor = torch.tensor(float(args[i]))
1020
+ model.register_buffer(tensor_constant_name, float_tensor)
1021
+ fake_mode = n.meta["val"].fake_mode
1022
+ with model.graph.inserting_before(n):
1023
+ get_attr_node = model.graph.create_node(
1024
+ "get_attr", tensor_constant_name, (), {}
1025
+ )
1026
+ get_attr_node.meta["val"] = fake_mode.from_tensor(
1027
+ float_tensor, static_shapes=True
1028
+ )
1029
+ new_args.append(get_attr_node)
1030
+ n.args = tuple(new_args)
1031
+ model.recompile()
1032
+ return model
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/function.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/jit/api/function_impl.h>
5
+ #include <torch/csrc/jit/frontend/name_mangler.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+ #include <torch/csrc/jit/runtime/graph_executor.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+
12
+ #include <ATen/core/function_schema.h>
13
+ #include <ATen/core/qualified_name.h>
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <c10/util/Optional.h>
16
+
17
+ #include <functional>
18
+ #include <memory>
19
+ #include <mutex>
20
+ #include <ostream>
21
+ #include <string>
22
+ #include <unordered_map>
23
+ #include <vector>
24
+
25
+ namespace torch::jit {
26
+
27
+ struct Def;
28
+ struct Property;
29
+ struct ClassDef;
30
+ struct SugaredValue;
31
+ struct Resolver;
32
+
33
+ using ResolverPtr = std::shared_ptr<Resolver>;
34
+ struct Self {
35
+ virtual ~Self() = default;
36
+ virtual std::shared_ptr<SugaredValue> makeSugared(Value* v) const = 0;
37
+ virtual ClassTypePtr getClassType() const = 0;
38
+ };
39
+
40
+ // A CompilationUnit is a list of named Functions
41
+ // with helper methods to iterate the list or invoke the function.
42
+ // Classes have a CompilationUnit holding the class methods,
43
+ // and Modules have a CompilationUnit holding the Functions that
44
+ // are used to implement their Methods
45
+
46
+ struct TORCH_API CompilationUnit {
47
+ enum class FunctionType { Method, Hook, PreHook };
48
+ // constructor that takes a set of functions to compile using the native
49
+ // resolver
50
+ explicit CompilationUnit(const std::string& source);
51
+ CompilationUnit() = default;
52
+
53
+ CompilationUnit& operator=(CompilationUnit&&) = default;
54
+ CompilationUnit(CompilationUnit&&) = default;
55
+ CompilationUnit& operator=(const CompilationUnit&) = delete;
56
+ CompilationUnit(const CompilationUnit&) = delete;
57
+
58
+ Function* find_function(const c10::QualifiedName& name) const {
59
+ auto it = dict_.find(name);
60
+ if (it == dict_.end()) {
61
+ return nullptr;
62
+ }
63
+ return functions_[it->second].get();
64
+ }
65
+
66
+ Function& get_function(const c10::QualifiedName& name) const {
67
+ if (auto r = find_function(name)) {
68
+ return *r;
69
+ }
70
+ TORCH_CHECK(false, "attempted to get undefined function ", name.name());
71
+ }
72
+
73
+ void set_optimized(bool o) {
74
+ TORCH_WARN(
75
+ "CompilationUnit::set_optimized() is deprecated and has no effect. "
76
+ "Please use setGraphExecutorOptimize()");
77
+ }
78
+
79
+ bool is_optimized() const {
80
+ TORCH_WARN(
81
+ "CompilationUnit::is_optimized() is deprecated and always returns true. "
82
+ "Please use getGraphExecutorOptimize()");
83
+ return true;
84
+ }
85
+
86
+ // for historic reasons, these are defined in ir_emitter.cpp
87
+ // Returns the list of Functions just defined.
88
+ std::vector<Function*> define(
89
+ const c10::optional<c10::QualifiedName>& prefix,
90
+ const std::vector<Property>& properties,
91
+ const std::vector<ResolverPtr>& propResolvers,
92
+ const std::vector<Def>& definitions,
93
+ const std::vector<ResolverPtr>&
94
+ defResolvers, /* determines how we handle free
95
+ variables in each definition*/
96
+ // if non-null, the first argument to each def, is bound to this value
97
+ const Self* self,
98
+ // see [name mangling]
99
+ bool shouldMangle = false,
100
+ c10::optional<size_t> operator_set_version = c10::nullopt);
101
+
102
+ void define_hooks(
103
+ const c10::optional<c10::QualifiedName>& prefix,
104
+ const std::vector<Def>& hookDefs,
105
+ const std::vector<ResolverPtr>& hookResolvers,
106
+ const std::vector<Def>& preHookDefs,
107
+ const std::vector<ResolverPtr>& preHookResolvers,
108
+ const Self* self,
109
+ bool shouldMangle = false);
110
+
111
+ // same as above but parse the definitions from source
112
+ // Returns the list of Functions just defined.
113
+ std::vector<Function*> define(
114
+ // prefix namespace to put all the defined functions into
115
+ const c10::optional<c10::QualifiedName>& prefix,
116
+ const std::string& source,
117
+ const ResolverPtr& resolver,
118
+ const Self* self);
119
+
120
+ void define_interface(
121
+ const c10::QualifiedName& qualifiedName,
122
+ const ClassDef& classDef,
123
+ ResolverPtr rcb,
124
+ bool is_module = false);
125
+
126
+ Function* create_function(
127
+ c10::QualifiedName name,
128
+ std::shared_ptr<Graph> graph,
129
+ bool shouldMangle = false) {
130
+ if (shouldMangle) {
131
+ name = mangle(name);
132
+ }
133
+ auto fn = std::make_unique<GraphFunction>(
134
+ std::move(name), std::move(graph), nullptr);
135
+ auto ret = fn.get();
136
+ register_function(std::move(fn));
137
+ return ret;
138
+ }
139
+
140
+ std::vector<Function*> get_functions() const {
141
+ return fmap(functions_, [](const std::unique_ptr<Function>& fn) {
142
+ return fn.get();
143
+ });
144
+ }
145
+
146
+ /// Run a method from this compilation.
147
+ ///
148
+ /// For example:
149
+ /// @code
150
+ /// IValue output = module->run("relu_script", a, b);
151
+ /// @endcode
152
+ ///
153
+ /// To get a compile a module from a source string, see torch::jit::compile
154
+ ///
155
+ /// @param method_name The name of the method to run
156
+ /// @param args Arguments to be passed to the method
157
+ /// @return An IValue containing the return value (or values if it is a tuple)
158
+ /// from the method
159
+ template <typename... Types>
160
+ IValue run_method(const c10::QualifiedName& method_name, Types&&... args) {
161
+ return get_function(method_name)({IValue(std::forward<Types>(args))...});
162
+ }
163
+
164
+ void drop_all_functions() {
165
+ dict_.clear();
166
+ functions_.clear();
167
+ }
168
+
169
+ /**
170
+ * Register a class as being owned by this compilation unit.
171
+ */
172
+ void register_type(c10::NamedTypePtr namedType) {
173
+ // TODO: class types cannot be redefined because we have no way right now
174
+ // of invalidating their methods. NamedTuples are fine though, since they
175
+ // don't have methods.
176
+ TORCH_CHECK(
177
+ 0 == classDict_.count(*namedType->name()),
178
+ "class '",
179
+ namedType->name()->qualifiedName(),
180
+ "' already defined.");
181
+ classes_.push_back(std::move(namedType));
182
+ classDict_[*classes_.back()->name()] = classes_.size() - 1;
183
+ };
184
+
185
+ c10::ClassTypePtr get_class(const c10::QualifiedName& name) const {
186
+ auto type = get_type(name);
187
+ if (!type) {
188
+ return nullptr;
189
+ }
190
+ return type->cast<c10::ClassType>();
191
+ }
192
+
193
+ c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const {
194
+ auto type = get_type(name);
195
+ if (!type) {
196
+ return nullptr;
197
+ }
198
+ return type->cast<c10::InterfaceType>();
199
+ }
200
+
201
+ c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const {
202
+ for (const auto& cls : classes_) {
203
+ if (cls->name()->qualifiedName() == name.qualifiedName()) {
204
+ return cls->expect<TupleType>();
205
+ }
206
+ }
207
+ return nullptr;
208
+ }
209
+
210
+ c10::NamedTypePtr get_type(const c10::QualifiedName& name) const {
211
+ auto it = classDict_.find(name);
212
+ if (it == classDict_.end()) {
213
+ return nullptr;
214
+ }
215
+ return classes_[it->second];
216
+ }
217
+
218
+ // For testing: clear all Python-defined classes to ensure that unit tests
219
+ // have isolation.
220
+ void _clear_python_cu() {
221
+ // Delete all the associated class methods
222
+ for (const auto& type : classes_) {
223
+ if (auto cls = type->cast<ClassType>()) {
224
+ for (auto method : cls->methods()) {
225
+ // Tombstone the method in the compilation unit.
226
+ // Don't erase because the dict_
227
+ auto it = dict_.find(method->qualname());
228
+ if (it != dict_.end()) {
229
+ functions_[it->second] = nullptr;
230
+ // Erase in our big lookup table
231
+ dict_.erase(it);
232
+ }
233
+ }
234
+ // Classes can have multiple pointers to the same hook,
235
+ // need to make sure to not delete it twice
236
+ std::unordered_set<Function*> hooks_to_delete;
237
+ for (const auto& hook : cls->getForwardHooks()) {
238
+ hooks_to_delete.insert(hook);
239
+ }
240
+ for (const auto& pre_hook : cls->getForwardPreHooks()) {
241
+ hooks_to_delete.insert(pre_hook);
242
+ }
243
+ for (const auto& hook : hooks_to_delete) {
244
+ // Tombstone the hook in the compilation unit.
245
+ auto it = dict_.find(hook->qualname());
246
+ if (it != dict_.end()) {
247
+ functions_[it->second] = nullptr;
248
+ // Erase in our big lookup table
249
+ dict_.erase(it);
250
+ }
251
+ }
252
+ }
253
+ }
254
+ classes_.clear();
255
+ classDict_.clear();
256
+ }
257
+
258
+ // [Internal Only] Remove method.
259
+ // Note Used for freezing.
260
+ void unsafeRemoveMethod(const c10::QualifiedName& method_name) {
261
+ auto it = dict_.find(method_name);
262
+ TORCH_CHECK(
263
+ it != dict_.end(),
264
+ "method '",
265
+ method_name.qualifiedName(),
266
+ "' does not exist.");
267
+ functions_[it->second] = nullptr;
268
+ dict_.erase(it);
269
+ }
270
+
271
+ // [name mangling] All code objects must have a unique qualified name in a
272
+ // CompilationUnit. In Python, sometimes functions won't have unique qualified
273
+ // name (for example, nested functions). So we mangle Python functions to
274
+ // ensure that they are uniquely named.
275
+ //
276
+ // We also use mangling to distinguish different Module instances. Since each
277
+ // Module is a singleton class instance, different instances of the same
278
+ // Python Module will have different types but the same qualified name.
279
+ c10::QualifiedName mangle(const c10::QualifiedName& name) const {
280
+ auto mangled = name;
281
+ while (get_type(mangled) || find_function(mangled)) {
282
+ mangled = mangler_.mangle(mangled);
283
+ }
284
+ return mangled;
285
+ }
286
+
287
+ private:
288
+ std::unique_ptr<Function> define(
289
+ const c10::optional<c10::QualifiedName>& prefix,
290
+ const Def& def,
291
+ const ResolverPtr& resolver,
292
+ const Self* self,
293
+ const std::unordered_map<std::string, Function*>& function_table,
294
+ bool shouldMangle = false,
295
+ FunctionType type = FunctionType::Method,
296
+ c10::optional<size_t> version = c10::nullopt) const;
297
+
298
+ // Define a property on \p self.
299
+ struct PropertyPair;
300
+ PropertyPair define_property(
301
+ const c10::optional<c10::QualifiedName>& prefix,
302
+ const Property& prop,
303
+ const ResolverPtr& resolver,
304
+ const Self* self,
305
+ const std::unordered_map<std::string, Function*>& function_table,
306
+ bool shouldMangle = false) const;
307
+
308
+ Function& register_function(std::unique_ptr<Function> fn) {
309
+ TORCH_CHECK(
310
+ 0 == dict_.count(fn->qualname().qualifiedName()),
311
+ "method '",
312
+ fn->qualname().qualifiedName(),
313
+ "' already defined.");
314
+ functions_.emplace_back(std::move(fn));
315
+ dict_[functions_.back()->qualname()] = functions_.size() - 1;
316
+ return *functions_.back();
317
+ }
318
+ std::vector<std::unique_ptr<Function>> functions_;
319
+ // for fast lookup
320
+ std::unordered_map<c10::QualifiedName, size_t> dict_;
321
+ std::unordered_map<c10::QualifiedName, size_t> classDict_;
322
+
323
+ // [class ownership] Right now there are two relationships between classes
324
+ // and compilation units:
325
+ // 1. Classes have compilation units internally that hold their methods.
326
+ // 2. On load, the TypePtrs of any imported classes are owned by the main
327
+ // module's compilation unit.
328
+ std::vector<c10::NamedTypePtr> classes_;
329
+
330
+ mutable NameMangler mangler_;
331
+ };
332
+
333
+ // An owning pointer to a Function. Just a pair of a raw Function ptr and it's
334
+ // owning CU. We need this because pybind requires a ref-counted way to refer to
335
+ // Functions.
336
+ struct StrongFunctionPtr {
337
+ StrongFunctionPtr(std::shared_ptr<CompilationUnit> cu, Function* function)
338
+ : cu_(std::move(cu)), function_(function) {
339
+ TORCH_INTERNAL_ASSERT(cu_);
340
+ TORCH_INTERNAL_ASSERT(function_);
341
+ }
342
+ std::shared_ptr<CompilationUnit> cu_;
343
+ Function* function_;
344
+ };
345
+
346
+ namespace script {
347
+ // We once had a `script::` namespace that was deleted. This is for backcompat
348
+ // of the public API; new code should not use this type alias.
349
+ using CompilationUnit = ::torch::jit::CompilationUnit;
350
+ } // namespace script
351
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/runtime/graph_executor.h>
6
+
7
+ namespace torch::jit {
8
+
9
+ struct TORCH_API GraphFunction : public Function {
10
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
11
+ GraphFunction(
12
+ c10::QualifiedName name,
13
+ std::shared_ptr<Graph> graph,
14
+ std::function<void(GraphFunction&)> function_creator,
15
+ c10::optional<ExecutorExecutionMode> executor_execution_mode =
16
+ c10::nullopt)
17
+ : name_(std::move(name)),
18
+ graph_(std::move(graph)),
19
+ executor_execution_mode_(executor_execution_mode),
20
+ function_creator_(std::move(function_creator)) {}
21
+
22
+ bool isGraphFunction() const override {
23
+ return true;
24
+ }
25
+
26
+ void run(Stack& stack) override;
27
+
28
+ std::function<void(GraphFunction&)> function_creator() const {
29
+ return function_creator_;
30
+ }
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
33
+ Stack& stack,
34
+ TaskLauncher taskLauncher = at::launch) override;
35
+
36
+ std::shared_ptr<Graph> graph() const {
37
+ return graph_;
38
+ }
39
+
40
+ std::shared_ptr<Graph> optimized_graph() const;
41
+
42
+ const c10::QualifiedName& qualname() const override {
43
+ return name_;
44
+ }
45
+
46
+ // private/unstable api. sets the initial execution mode
47
+ // will not affect executor if there is an existing executor
48
+ // created for this function
49
+ void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) {
50
+ executor_execution_mode_ = mode;
51
+ }
52
+ // private/unstable api. sets flag of whether or not to ignore amp.
53
+ // will not affect executor if there is an existing executor
54
+ // created for this function
55
+ void _set_ignore_amp(bool ignore_amp) {
56
+ force_no_amp_ = ignore_amp;
57
+ }
58
+
59
+ // if this isn't yet defined, run its method_creator function
60
+ void ensure_defined() override;
61
+
62
+ size_t num_inputs() const override {
63
+ return graph()->inputs().size();
64
+ }
65
+
66
+ Function& setSchema(FunctionSchema schema) override {
67
+ schema_ = std::make_unique<FunctionSchema>(std::move(schema));
68
+ return *this;
69
+ }
70
+
71
+ const FunctionSchema& getSchema() const override;
72
+
73
+ GraphExecutorState getDebugState() {
74
+ return get_executor().getDebugState();
75
+ }
76
+
77
+ bool is_optimized() const {
78
+ TORCH_WARN(
79
+ "GraphFunction::is_optimized() is deprecated and always returns true. "
80
+ "Please use getGraphExecutorOptimize()");
81
+ return true;
82
+ }
83
+
84
+ void check_single_output() {
85
+ TORCH_CHECK(
86
+ graph()->outputs().size() == 1,
87
+ "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs");
88
+ }
89
+
90
+ GraphExecutor& get_executor() {
91
+ ensure_defined();
92
+ std::lock_guard<std::recursive_mutex> lock(compile_mutex);
93
+ auto& executor = executors_[currentSpecialization()];
94
+ if (executor) {
95
+ return *executor;
96
+ }
97
+ check_single_output();
98
+ const std::string& name = name_.name();
99
+ std::shared_ptr<Graph> opt_graph = optimized_graph();
100
+ if (!executor_execution_mode_) {
101
+ executor = GraphExecutor(opt_graph, name);
102
+ } else {
103
+ executor = GraphExecutor(opt_graph, name, *executor_execution_mode_);
104
+ }
105
+ return *executor;
106
+ }
107
+
108
+ using Function::call;
109
+ bool call(
110
+ Stack& stack,
111
+ c10::optional<size_t> bailOut,
112
+ c10::function_ref<void(const Code&)> f) override {
113
+ f(get_executor().getPlanFor(stack, bailOut).code);
114
+ return true;
115
+ }
116
+
117
+ void clear_optimized_graphs() {
118
+ optimized_graphs_.fill(nullptr);
119
+ }
120
+
121
+ private:
122
+ enum SpecializationKey {
123
+ AutocastOff,
124
+ CpuAutocastOn,
125
+ GpuAutocastOn,
126
+ CpuGpuAutocastOn,
127
+
128
+ // This provides the number of specializations
129
+ // (Must be last entry)
130
+ TotalCount
131
+ };
132
+
133
+ SpecializationKey currentSpecialization() const;
134
+
135
+ private:
136
+ c10::QualifiedName name_;
137
+ // The original, non-optimized graph
138
+ std::shared_ptr<Graph> graph_; // for debugging and for inlining
139
+
140
+ // allows users to specify Simple/Profiling Executor for function
141
+ // TODO: add more executors
142
+ mutable c10::optional<ExecutorExecutionMode> executor_execution_mode_;
143
+
144
+ // if invoked on a graph that has already traced through amp
145
+ // don't invoke amp pass
146
+ mutable bool force_no_amp_ = false;
147
+ // Optimized graph, computed lazily. Used for inlining.
148
+ mutable std::array<std::shared_ptr<Graph>, SpecializationKey::TotalCount>
149
+ optimized_graphs_;
150
+
151
+ // GraphFunctions are invokable from multiple threads, so this lock needs to
152
+ // be held when we're initializing graph executor for the first time or
153
+ // computing the optimized graph. We're using reentrant mutex so that we don't
154
+ // need to worry about causing a deadlock by calling one method from another
155
+ // (e.g. optimized_graph() from get_executor()).
156
+ mutable std::recursive_mutex compile_mutex;
157
+
158
+ // executor_[0] - autocast off
159
+ // executor_[1] - autocast cpu on
160
+ // executor_[2] - autocast gpu on
161
+ // executor_[3] - autocast cpu & gpu on
162
+ std::array<c10::optional<GraphExecutor>, SpecializationKey::TotalCount>
163
+ executors_;
164
+
165
+ // an optional function that actually creates the method when
166
+ // ensure_defined() is called. This is used by the compiler so
167
+ // that it can construct methods out of order
168
+ std::function<void(GraphFunction&)> function_creator_;
169
+
170
+ // if absent, then we generate a default schema based on the graph
171
+ // mutable because getSchema caches the default schema if one is requested
172
+ // before a call to setSchema
173
+ mutable std::unique_ptr<FunctionSchema> schema_;
174
+ };
175
+
176
+ // Short hands for dynamic_cast<GraphFunction*>.
177
+ TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept;
178
+ TORCH_API GraphFunction& toGraphFunction(Function&);
179
+ TORCH_API const GraphFunction& toGraphFunction(const Function&);
180
+
181
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <torch/csrc/api/include/torch/imethod.h>
7
+ #include <torch/csrc/jit/api/function_impl.h>
8
+
9
+ namespace torch::jit {
10
+
11
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
12
+
13
+ // A method in a module, e.g. f in:
14
+ //
15
+ // class M(ScriptModule):
16
+ // @script_method
17
+ // def f(self, x):
18
+ // ...
19
+ // Note: because Method/Module are exposed to python these
20
+ // classes use python method naming conventions
21
+ struct TORCH_API Method : public torch::IMethod {
22
+ Method(ObjectPtr owner, Function* function);
23
+
24
+ // the module that contains this method.
25
+ Module owner() const;
26
+ // the raw objectptr that owns this method, for when the method is owned by a
27
+ // torchbind object.
28
+ ObjectPtr raw_owner() const;
29
+ void run(Stack& stack);
30
+ void run(Stack&& stack) {
31
+ run(stack);
32
+ }
33
+
34
+ c10::IValue operator()(
35
+ std::vector<c10::IValue> stack,
36
+ const Kwargs& kwargs = Kwargs()) const override;
37
+
38
+ // Run method async. Invocation on this function would invokes a JIT
39
+ // interpreter that executes ops inline, one by one, on caller's thread. A
40
+ // model can utilize async op, i.e. `fork`, to launch an asynchronous task
41
+ // which will be launched on provided `taskLauncher`.
42
+ c10::intrusive_ptr<c10::ivalue::Future> run_async(
43
+ std::vector<c10::IValue> stack,
44
+ const Kwargs& kwargs = Kwargs(),
45
+ TaskLauncher taskLauncher = at::launch);
46
+
47
+ std::shared_ptr<Graph> graph() const {
48
+ return toGraphFunction(*function_).graph();
49
+ }
50
+
51
+ const std::string& name() const override {
52
+ return function_->name();
53
+ }
54
+
55
+ size_t num_inputs() const {
56
+ return function_->num_inputs();
57
+ }
58
+
59
+ GraphExecutor& get_executor() {
60
+ return toGraphFunction(*function_).get_executor();
61
+ }
62
+
63
+ Function& function() const {
64
+ return *function_;
65
+ }
66
+
67
+ private:
68
+ void setArgumentNames(std::vector<std::string>&) const override;
69
+
70
+ // Methods are uniqued onwed by a single module. This raw pointer allows
71
+ // looking up the module.
72
+ ObjectPtr owner_;
73
+
74
+ // Underlying unbound function
75
+ Function* function_;
76
+ };
77
+
78
+ namespace script {
79
+ // We once had a `script::` namespace that was deleted. This is for backcompat
80
+ // of the public API; new code should not use this type alias.
81
+ using Method = ::torch::jit::Method;
82
+ } // namespace script
83
+
84
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <torch/csrc/autograd/variable.h>
4
+ #include <torch/csrc/jit/api/object.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+ #include <torch/csrc/jit/ir/named_value.h>
8
+ #include <torch/csrc/jit/runtime/argument_spec.h>
9
+ #include <torch/csrc/jit/runtime/graph_executor.h>
10
+
11
+ #include <torch/csrc/Export.h>
12
+ #include <torch/csrc/api/include/torch/ordered_dict.h>
13
+ #include <torch/csrc/jit/api/compilation_unit.h>
14
+
15
+ #include <ATen/core/function_schema.h>
16
+ #include <ATen/core/qualified_name.h>
17
+ #include <c10/util/ArrayRef.h>
18
+ #include <c10/util/Optional.h>
19
+ #include <c10/util/irange.h>
20
+
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <ostream>
25
+ #include <string>
26
+ #include <unordered_map>
27
+ #include <unordered_set>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ // This file contains classes which assist in desugaring Python style
32
+ // modules and their methods into flattened graphs which don't have any
33
+ // function calls.
34
+
35
+ namespace torch::jit {
36
+
37
+ using ::c10::Argument;
38
+ using ::c10::FunctionSchema;
39
+ using ::c10::QualifiedName;
40
+ // Map which stores filename to content.
41
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
42
+
43
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
44
+
45
+ struct Module;
46
+
47
+ template <typename T>
48
+ struct slot_list_impl;
49
+
50
+ template <typename T>
51
+ struct Named {
52
+ std::string name;
53
+ T value;
54
+ };
55
+
56
+ using NameModule = Named<Module>;
57
+ using NameValue = Named<IValue>;
58
+ using NameTensor = Named<at::Tensor>;
59
+
60
+ namespace detail {
61
+ struct TORCH_API ModulePolicy;
62
+ struct TORCH_API ParameterPolicy;
63
+ struct TORCH_API AttributePolicy;
64
+ struct TORCH_API BufferPolicy;
65
+ template <typename P>
66
+ struct NamedPolicy;
67
+ } // namespace detail
68
+
69
+ using module_list = slot_list_impl<detail::ModulePolicy>;
70
+ using named_module_list =
71
+ slot_list_impl<detail::NamedPolicy<detail::ModulePolicy>>;
72
+
73
+ using parameter_list = slot_list_impl<detail::ParameterPolicy>;
74
+ using named_parameter_list =
75
+ slot_list_impl<detail::NamedPolicy<detail::ParameterPolicy>>;
76
+
77
+ using attribute_list = slot_list_impl<detail::AttributePolicy>;
78
+ using named_attribute_list =
79
+ slot_list_impl<detail::NamedPolicy<detail::AttributePolicy>>;
80
+
81
+ using buffer_list = slot_list_impl<detail::BufferPolicy>;
82
+ using named_buffer_list =
83
+ slot_list_impl<detail::NamedPolicy<detail::BufferPolicy>>;
84
+
85
+ using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
86
+
87
+ struct TORCH_API Module : public Object {
88
+ explicit Module(c10::QualifiedName class_name);
89
+ Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
90
+ Module() = default;
91
+ Module(const Module&) = default;
92
+ Module& operator=(const Module&) = default;
93
+ Module(Module&&) noexcept = default;
94
+ Module& operator=(Module&&) noexcept = default;
95
+ Module(
96
+ c10::QualifiedName,
97
+ std::shared_ptr<CompilationUnit> cu,
98
+ bool shouldMangle = false);
99
+ Module(ModulePtr module_value) : Object(std::move(module_value)) {}
100
+ ~Module() = default;
101
+
102
+ void set_optimized(bool o) {
103
+ TORCH_WARN(
104
+ "Module::set_optimized() is deprecated and has no effect. "
105
+ "Please use setGraphExecutorOptimize()");
106
+ }
107
+
108
+ bool is_optimized() const {
109
+ TORCH_WARN(
110
+ "Module::is_optimized() is deprecated and always returns true. "
111
+ "Please use getGraphExecutorOptimize()");
112
+ return true;
113
+ }
114
+
115
+ IValue forward(std::vector<IValue> inputs, const Kwargs& kwargs = Kwargs()) {
116
+ return get_method("forward")(std::move(inputs), kwargs);
117
+ }
118
+
119
+ // In script modules, buffers are Tensors attribute that are _not_ registered
120
+ // as parameters. This is different than in nn.Module where there is a special
121
+ // register_buffer method. With this simplification, we only need to track
122
+ // whether a slot is a parameter to be able to classify it.
123
+ void register_buffer(const std::string& name, at::Tensor v) {
124
+ bool is_param = false;
125
+ bool is_buffer = true;
126
+ std::lock_guard<std::mutex> lock(*register_mutex_);
127
+ type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer);
128
+ _ivalue()->setAttr(name, std::move(v));
129
+ }
130
+
131
+ void register_parameter(
132
+ const std::string& name,
133
+ at::Tensor v,
134
+ bool is_buffer) {
135
+ std::lock_guard<std::mutex> lock(*register_mutex_);
136
+ type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer);
137
+ _ivalue()->setAttr(name, std::move(v));
138
+ }
139
+
140
+ void register_attribute(
141
+ const std::string& name,
142
+ const TypePtr& t,
143
+ IValue v,
144
+ bool is_param = false,
145
+ bool is_buffer = false) {
146
+ type()->addOrCheckAttribute(name, t, is_param, is_buffer);
147
+ _ivalue()->setAttr(name, std::move(v));
148
+ }
149
+
150
+ void register_module(const std::string& name, const Module& module) {
151
+ type()->addOrCheckAttribute(name, module.type());
152
+ _ivalue()->setAttr(name, module._ivalue());
153
+ }
154
+
155
+ void apply(const std::function<void(Module&)>& fn);
156
+
157
+ buffer_list buffers(bool recurse = true) const;
158
+ named_buffer_list named_buffers(bool recurse = true) const;
159
+
160
+ module_list children() const; // direct modules
161
+ named_module_list named_children() const;
162
+ module_list modules() const; // all modules, including this one, recursively
163
+ named_module_list named_modules() const;
164
+
165
+ // all tensors involved in gradient optimization
166
+ parameter_list parameters(bool recurse = true) const;
167
+ named_parameter_list named_parameters(bool recurse = true) const;
168
+
169
+ // all members of the object, similar to iterating over dir(obj) in python
170
+ attribute_list attributes(bool recurse = true) const;
171
+ named_attribute_list named_attributes(bool recurse = true) const;
172
+
173
+ void dump(
174
+ bool print_method_bodies,
175
+ bool print_attr_values,
176
+ bool print_param_values) const;
177
+
178
+ std::string dump_to_str(
179
+ bool print_method_bodies,
180
+ bool print_attr_values,
181
+ bool print_param_values) const;
182
+
183
+ /// Enables "training" mode.
184
+ void train(bool on = true);
185
+ /// Calls train(false) to enable "eval" mode.
186
+ /// Do not override this method, override `train()` instead.
187
+ void eval() {
188
+ train(/*on=*/false);
189
+ }
190
+ /// True if the module is in training mode.
191
+ bool is_training() const {
192
+ return attr("training", true).toBool();
193
+ }
194
+
195
+ /// Recursively casts all parameters to the given `dtype` and `device`.
196
+ ///
197
+ /// If `non_blocking` is true and the source is in pinned memory and
198
+ /// destination is on the GPU or vice versa, the copy is performed
199
+ /// asynchronously with respect to the host. Otherwise, the argument has no
200
+ /// effect.
201
+ void to(at::Device device, at::ScalarType dtype, bool non_blocking = false);
202
+
203
+ /// Recursively casts all parameters to the given dtype.
204
+ ///
205
+ /// If `non_blocking` is true and the source is in pinned memory and
206
+ /// destination is on the GPU or vice versa, the copy is performed
207
+ /// asynchronously with respect to the host. Otherwise, the argument has no
208
+ /// effect.
209
+ void to(at::ScalarType dtype, bool non_blocking = false);
210
+
211
+ /// Recursively moves all parameters to the given device.
212
+ ///
213
+ /// If `non_blocking` is true and the source is in pinned memory and
214
+ /// destination is on the GPU or vice versa, the copy is performed
215
+ /// asynchronously with respect to the host. Otherwise, the argument has no
216
+ /// effect.
217
+ void to(at::Device device, bool non_blocking = false);
218
+
219
+ void save(
220
+ std::ostream& out,
221
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
222
+
223
+ void save(
224
+ const std::string& filename,
225
+ const ExtraFilesMap& extra_files = ExtraFilesMap()) const;
226
+
227
+ void _save_for_mobile(
228
+ std::ostream& out,
229
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
230
+ bool save_mobile_debug_info = false,
231
+ bool use_flatbuffer = false) const;
232
+
233
+ void _save_for_mobile(
234
+ const std::string& filename,
235
+ const ExtraFilesMap& extra_files = ExtraFilesMap(),
236
+ bool save_mobile_debug_info = false,
237
+ bool use_flatbuffer = false) const;
238
+
239
+ Module copy() const;
240
+
241
+ Module deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
242
+
243
+ // Clones both the underlying `ClassType` and the module instance(data), this
244
+ // function creates a new `ClassType` and returns a new instance that has the
245
+ // same data as the current instance but with the new type, shared ClassType
246
+ // will be preserved as well
247
+ Module clone(bool inplace = false) const;
248
+
249
+ // Clones both the underlying `ClassType` and the module instance(data), this
250
+ // function creates a new `ClassType` and returns a new instance that has the
251
+ // same data as the current instance but with the new type, shared ClassType
252
+ // will be preserved as well. Also allows the caller to specify a set of
253
+ // method and attribute names to not clone.
254
+ Module clone(
255
+ bool inplace,
256
+ const std::unordered_set<std::string>& ignored_method,
257
+ const std::unordered_set<std::string>& ignored_attributes) const;
258
+
259
+ void clone_method(const Module& orig, const std::string& name);
260
+
261
+ IValue operator()(std::vector<IValue> inputs);
262
+
263
+ template <typename... Types>
264
+ IValue create_class(const c10::QualifiedName& name, Types&&... args) const {
265
+ return create_class(name, {IValue(std::forward<Types>(args))...});
266
+ }
267
+
268
+ IValue create_class(const c10::QualifiedName& name, Stack stack) const;
269
+
270
+ inline bool operator==(const Module& y) const noexcept {
271
+ return _ivalue() == y._ivalue();
272
+ }
273
+
274
+ void set_delete_memory(std::shared_ptr<char> delete_mem) {
275
+ mem_to_delete_ = std::move(delete_mem);
276
+ }
277
+
278
+ // A set of functions to maintain input shapes through torch.jit.save and
279
+ // torch.jit.load. It only works on tensors and lists/dicts of tensors
280
+ // because tracing is only supported by these types.
281
+ void store_traced_inputs(std::string func_name, std::vector<IValue> inputs) {
282
+ if (inputs.size() == 0) {
283
+ return;
284
+ }
285
+ auto c10_inputs = c10::impl::GenericList(AnyType::get());
286
+ for (IValue& value : inputs) {
287
+ // Not checking whether this is traceable type as that is already checked
288
+ // higher up in the stack and changing that would require a larger
289
+ // restructuring.
290
+ c10_inputs.emplace_back(std::move(value));
291
+ }
292
+ traced_inputs_.insert_or_assign(func_name, c10_inputs);
293
+ }
294
+
295
+ c10::Dict<std::string, c10::impl::GenericList> retrieve_traced_inputs()
296
+ const {
297
+ return traced_inputs_;
298
+ }
299
+
300
+ private:
301
+ Module clone_impl(
302
+ std::unordered_map<TypePtr, TypePtr>& type_remap,
303
+ bool inplace,
304
+ IValue::HashAliasedIValueMap memo,
305
+ const std::unordered_set<std::string>& ignored_methods,
306
+ const std::unordered_set<std::string>& ignored_attributes) const;
307
+
308
+ void clone_method(
309
+ const Module& orig,
310
+ const Function& method,
311
+ const std::unordered_map<TypePtr, TypePtr>& type_remap);
312
+
313
+ c10::QualifiedName getNameForMethod(std::string basename) const {
314
+ return QualifiedName(*type()->name(), std::move(basename));
315
+ }
316
+
317
+ void to_impl(
318
+ const c10::optional<at::Device>& device,
319
+ const c10::optional<at::ScalarType>& dtype,
320
+ bool non_blocking);
321
+
322
+ // Extra handle for the module to delete when itself is deleted
323
+ std::shared_ptr<char> mem_to_delete_;
324
+
325
+ // Map of function names to the traced inputs that they have been traced with
326
+ c10::Dict<std::string, c10::impl::GenericList> traced_inputs_;
327
+
328
+ // Mutex to keep registring buffer or parameter thread safe.
329
+ std::shared_ptr<std::mutex> register_mutex_ = std::make_shared<std::mutex>();
330
+ };
331
+
332
+ // C++ equivalent api of `torch.jit.freeze`. See documentation there for
333
+ // details.
334
+ TORCH_API Module freeze(
335
+ const Module& module,
336
+ const c10::optional<std::vector<std::string>>& preserved_attrs =
337
+ c10::nullopt,
338
+ bool optimize_numerics = true);
339
+
340
+ // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
341
+ // there for details.
342
+ TORCH_API Module optimize_for_inference(
343
+ Module& module,
344
+ const std::vector<std::string>& other_methods = {});
345
+
346
+ enum class FusionBehavior { STATIC, DYNAMIC };
347
+
348
+ using FusionStrategy = std::vector<std::pair<FusionBehavior, size_t>>;
349
+ // clang-format off
350
+ /*
351
+ Sets the type and number of specializations that can occur during fusion.
352
+
353
+ Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC
354
+ and depth is an integer.
355
+
356
+ Behavior - static vs dynamic:
357
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
358
+ based on some initial profiling runs.
359
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
360
+ shapes are possible.
361
+
362
+ In both cases, we also recompile on new striding behavior, device, or dtype.
363
+
364
+ Behavior - fallback functions & depth:
365
+ When an input doesn't match the format required by the specialized compiled op, it will run
366
+ a fallback function. Fallback functions are recursively be compiled and specialized based
367
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
368
+ limit the number of specializations that can be compiled, before giving up on recompiling and
369
+ falling back to a completely un-fused, un-specialized implementation.
370
+
371
+ The list of (type, depth) pairs controls the type of specializations and the number of
372
+ specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first
373
+ two specializations will use static fusions, the following two specializations will use
374
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
375
+ unfused implementation.
376
+
377
+ NB: in the future, if more as more fusion backends are added there may be more granular
378
+ apis for specific fusers.
379
+ */
380
+ // clang-format on
381
+ TORCH_API FusionStrategy getFusionStrategy();
382
+ // returns previous strategy
383
+ TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
384
+
385
+ namespace detail {
386
+
387
+ struct TORCH_API SlotCursor {
388
+ Module module_;
389
+ int64_t i_; // slot offset, -1 indicates the module itself
390
+ };
391
+
392
+ } // namespace detail
393
+
394
+ // This iterator allows the (optionally recursive) enumeration of
395
+ // the members of a Module. It performs a depth-first pre-order
396
+ // traversal of the module. The Policy template parameter determines
397
+ // which slots of the object should be included. For instance,
398
+ // when iterating parameters, we return the parameter tensors,
399
+ // but skip modules, buffers, and other attributes.
400
+ // See ModulePolicy for comments about Policy object's API.
401
+ template <typename Policy>
402
+ struct slot_iterator_impl {
403
+ using SlotCursor = detail::SlotCursor;
404
+ using value_type = typename Policy::value_type;
405
+ slot_iterator_impl(
406
+ Module root,
407
+ bool recurse, // if true, do a depth-first search, otherwise, just look at
408
+ // slots of root
409
+ bool return_module) // if true include root itself as the first thing
410
+ // visited (used in modules())
411
+ : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
412
+ recurse_(recurse) {
413
+ // advance iterator to first valid element (or the end, if empty)
414
+ while_not_valid_next();
415
+ }
416
+ // empty cursors_, represents end of iteration
417
+ slot_iterator_impl() : recurse_(false) {}
418
+ value_type operator*() const {
419
+ return Policy::create(cursors_, cur());
420
+ }
421
+ value_type operator->() const {
422
+ return **this;
423
+ }
424
+ slot_iterator_impl& operator++() {
425
+ next_valid();
426
+ return *this;
427
+ }
428
+ slot_iterator_impl operator++(int) {
429
+ // this is really expensive, should we delete it so people don't use it
430
+ // instead of prefix?
431
+ slot_iterator_impl old = *this;
432
+ ++(*this);
433
+ return old;
434
+ }
435
+
436
+ private:
437
+ // return_module() is a corner case where instead of returning a submodule
438
+ // of root, we are returning root itself, because we are iterating modules(),
439
+ // which contains the root module itself.
440
+ // It is represented with a single SlotCursor whose index is -1.
441
+ bool return_module() const {
442
+ return top().i_ == -1;
443
+ }
444
+ const SlotCursor& top() const {
445
+ return cursors_.back();
446
+ }
447
+ SlotCursor& top() {
448
+ return cursors_.back();
449
+ }
450
+ IValue cur() const {
451
+ return return_module() ? top().module_._ivalue()
452
+ : top().module_._ivalue()->getSlot(top().i_);
453
+ }
454
+
455
+ // advance to the next slot in a depth first pre-order traversal of the
456
+ // modules slots. This function does not guarantee the next slot is a
457
+ // valid element of the iteration. That is done by valid().
458
+ // invariant: !cursors_.empty()
459
+ void next() {
460
+ // we just returned the module itself, advance i_ to 0 so we are now
461
+ // at the first slot of the module.
462
+ if (return_module()) {
463
+ ++top().i_;
464
+ return;
465
+ }
466
+ // the last traversal action advanced beyond the number of slots in the
467
+ // module so continue the iteration in the parent.
468
+ if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) {
469
+ cursors_.pop_back();
470
+ if (!cursors_.empty()) {
471
+ ++top().i_;
472
+ }
473
+ return;
474
+ }
475
+ // if the current thing is a module, we have to scan it for recursive
476
+ // traversals. We do this by adding a new SlotCursor to track the traversal.
477
+ if (recurse_ &&
478
+ top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) {
479
+ cursors_.emplace_back(SlotCursor{cur().toModule(), 0});
480
+ return;
481
+ }
482
+ // common case: advance to the next slot.
483
+ ++top().i_;
484
+ }
485
+ // is the current position of the iterator a valid one?
486
+ // otherwise, we have to continue advancing.
487
+ bool valid() const {
488
+ return top().i_ <
489
+ int64_t(top().module_._ivalue()->type()->numAttributes()) &&
490
+ Policy::valid(
491
+ top().module_._ivalue()->type(),
492
+ top().i_,
493
+ top().module_._ivalue()->getSlot(top().i_));
494
+ }
495
+ void while_not_valid_next() {
496
+ // advance iteration until we are either at the end (cursors_.empty())
497
+ // or in a valid state. return_module() is a special case,
498
+ // and is always considered valid, regardless of Policy, because it is
499
+ // it is only true when we are iterating modules.
500
+ while (!cursors_.empty() && !return_module() && !valid()) {
501
+ next();
502
+ }
503
+ }
504
+ void next_valid() {
505
+ // avoid crashing if this is empty
506
+ if (cursors_.empty()) {
507
+ return;
508
+ }
509
+ // advance to next element, which is maybe not valid
510
+ next();
511
+ while_not_valid_next();
512
+ }
513
+
514
+ std::vector<SlotCursor> cursors_;
515
+ bool recurse_;
516
+
517
+ friend inline bool operator!=(
518
+ const slot_iterator_impl<Policy>& a,
519
+ const slot_iterator_impl<Policy>& b) {
520
+ // we are finished iteration when we have no more iteration SlotCursors.
521
+ // end is always an empty iterator with no cursors.
522
+ return (a.cursors_.empty() != b.cursors_.empty());
523
+ }
524
+ };
525
+
526
+ // This type represents lists of parameters, attributes, and
527
+ // submodules contained in the module. It is abstract because
528
+ // they are not stored directly in std::vectors but inside the
529
+ // module's IValue object itself.
530
+ template <typename Policy>
531
+ struct slot_list_impl {
532
+ using iterator = slot_iterator_impl<Policy>;
533
+ using const_iterator = slot_iterator_impl<Policy>;
534
+ using value_type = typename iterator::value_type;
535
+ slot_iterator_impl<Policy> begin() const {
536
+ return slot_iterator_impl<Policy>(module_, recurse_, return_module_);
537
+ }
538
+ slot_iterator_impl<Policy> end() const {
539
+ return slot_iterator_impl<Policy>();
540
+ }
541
+ size_t size() const {
542
+ if (!size_) {
543
+ size_ = size_t(0);
544
+ // NOLINTNEXTLINE(clang-diagnostic-unused-variable)
545
+ for (const value_type& s : *(this)) {
546
+ (void)s; // Suppress unused variable warning
547
+ ++*size_;
548
+ }
549
+ }
550
+ return *size_;
551
+ }
552
+
553
+ slot_list_impl(Module module, bool recurse, bool return_module)
554
+ : module_(std::move(module)),
555
+ recurse_(recurse),
556
+ return_module_(return_module),
557
+ size_(c10::nullopt) {
558
+ if (!recurse && !return_module && Policy::all_slots) {
559
+ size_ = module_.num_slots();
560
+ }
561
+ }
562
+
563
+ private:
564
+ Module module_;
565
+ bool recurse_;
566
+ bool return_module_;
567
+ // size of this list, cached on first request
568
+ // when we need to filter the slot list
569
+ mutable c10::optional<size_t> size_;
570
+ friend struct Module;
571
+ };
572
+
573
+ namespace detail {
574
+
575
+ // slot_iterator_impl always iterate over all the slots in a module,
576
+ // the Policy template argument determines slots should be returned and their
577
+ // types
578
+ struct TORCH_API ModulePolicy {
579
+ // the type of the value being returned
580
+ using value_type = Module;
581
+
582
+ // the logic for creating the type being returned, given the raw IValue
583
+ // of that object.
584
+ static value_type create(
585
+ const std::vector<detail::SlotCursor>& cursors,
586
+ IValue v) {
587
+ return Module(std::move(v).toObject());
588
+ }
589
+ // is slot i in typ something that this iterator should return, otherwise,
590
+ // we skip it.
591
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
592
+ return typ->getAttribute(i)->is_module();
593
+ }
594
+ // are we going to return everything? If so, we can optimize the calculate
595
+ // of the size of the list.
596
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
597
+ };
598
+
599
+ struct TORCH_API ParameterPolicy {
600
+ using value_type = at::Tensor;
601
+ static value_type create(
602
+ const std::vector<detail::SlotCursor>& cursors,
603
+ IValue v) {
604
+ return std::move(v).toTensor();
605
+ }
606
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
607
+ return typ->is_parameter(i) && v.isTensor();
608
+ }
609
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
610
+ };
611
+
612
+ struct TORCH_API BufferPolicy {
613
+ using value_type = at::Tensor;
614
+ static value_type create(
615
+ const std::vector<detail::SlotCursor>& cursors,
616
+ IValue v) {
617
+ return std::move(v).toTensor();
618
+ }
619
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
620
+ return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) &&
621
+ typ->is_buffer(i);
622
+ }
623
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false;
624
+ };
625
+
626
+ struct TORCH_API AttributePolicy {
627
+ using value_type = IValue;
628
+ static value_type create(
629
+ const std::vector<detail::SlotCursor>& cursors,
630
+ IValue v) {
631
+ return v;
632
+ }
633
+ static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) {
634
+ return true;
635
+ }
636
+ static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true;
637
+ };
638
+
639
+ // take a Policy object, and make a version of it that returns the slot.
640
+ // along with the fully qualified name of that slot. This is used for the named_
641
+ // variants like named_parameters().
642
+ template <typename Policy>
643
+ struct NamedPolicy {
644
+ using value_type = Named<typename Policy::value_type>;
645
+ static value_type create(
646
+ const std::vector<detail::SlotCursor>& cursors,
647
+ IValue v) {
648
+ std::string name;
649
+ if (cursors.size() == 1) {
650
+ name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back());
651
+ } else {
652
+ std::ostringstream ss;
653
+ for (const auto i : c10::irange(cursors.size())) {
654
+ if (i > 0) {
655
+ ss << ".";
656
+ }
657
+ ss << nameFragment(cursors[i]);
658
+ }
659
+ name = ss.str();
660
+ }
661
+ return value_type{std::move(name), Policy::create(cursors, std::move(v))};
662
+ }
663
+ static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) {
664
+ return Policy::valid(t, i, v);
665
+ }
666
+ static constexpr bool all_slots = Policy::all_slots;
667
+
668
+ private:
669
+ static std::string nameFragment(const detail::SlotCursor& f) {
670
+ return f.module_.type()->getAttributeName(f.i_);
671
+ }
672
+ };
673
+
674
+ } // namespace detail
675
+
676
+ TORCH_API bool& getInlineEverythingMode();
677
+
678
+ namespace script {
679
+ // We once had a `script::` namespace that was deleted. This is for backcompat
680
+ // of the public API; new code should not use this type alias.
681
+ using Module = ::torch::jit::Module;
682
+ using ExtraFilesMap = ::torch::jit::ExtraFilesMap;
683
+ } // namespace script
684
+
685
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/functional.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/api/method.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct Resolver;
13
+ using ResolverPtr = std::shared_ptr<Resolver>;
14
+
15
+ using ObjectPtr = c10::intrusive_ptr<c10::ivalue::Object>;
16
+
17
+ // Throw this in C++ land if `attr` fails. This will be converted to a Python
18
+ // AttributeError by the Python binding code
19
+ class ObjectAttributeError : public std::runtime_error {
20
+ public:
21
+ ObjectAttributeError(const std::string& what) : std::runtime_error(what) {}
22
+ };
23
+
24
+ struct TORCH_API Object {
25
+ Object() = default;
26
+ Object(const Object&) = default;
27
+ Object& operator=(const Object&) = default;
28
+ Object(Object&&) noexcept = default;
29
+ Object& operator=(Object&&) noexcept = default;
30
+ Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
31
+ Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
32
+ Object(
33
+ c10::QualifiedName,
34
+ std::shared_ptr<CompilationUnit> cu,
35
+ bool shouldMangle = false);
36
+
37
+ ObjectPtr _ivalue() const {
38
+ TORCH_INTERNAL_ASSERT(_ivalue_);
39
+ return _ivalue_;
40
+ }
41
+
42
+ c10::ClassTypePtr type() const {
43
+ return _ivalue()->type();
44
+ }
45
+
46
+ struct Property {
47
+ std::string name;
48
+ Method getter_func;
49
+ c10::optional<Method> setter_func;
50
+ };
51
+
52
+ void setattr(const std::string& name, c10::IValue v) {
53
+ if (_ivalue()->type()->hasConstant(name)) {
54
+ TORCH_CHECK(
55
+ false,
56
+ "Can't set constant '",
57
+ name,
58
+ "' which has value:",
59
+ _ivalue()->type()->getConstant(name));
60
+ } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) {
61
+ const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot);
62
+ TORCH_CHECK(
63
+ v.type()->isSubtypeOf(*expected),
64
+ "Expected a value of type '",
65
+ expected->repr_str(),
66
+ "' for field '",
67
+ name,
68
+ "', but found '",
69
+ v.type()->repr_str(),
70
+ "'");
71
+ _ivalue()->setSlot(*slot, std::move(v));
72
+ } else {
73
+ TORCH_CHECK(false, "Module has no attribute '", name, "'");
74
+ }
75
+ }
76
+
77
+ c10::IValue attr(const std::string& name) const {
78
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
79
+ return _ivalue()->getSlot(*r);
80
+ }
81
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
82
+ return _ivalue()->type()->getConstant(*r);
83
+ }
84
+ std::stringstream err;
85
+ err << _ivalue()->type()->repr_str() << " does not have a field with name '"
86
+ << name.c_str() << "'";
87
+ throw ObjectAttributeError(err.str());
88
+ }
89
+
90
+ c10::IValue attr(const std::string& name, c10::IValue or_else) const {
91
+ if (auto r = _ivalue()->type()->findAttributeSlot(name)) {
92
+ return _ivalue()->getSlot(*r);
93
+ }
94
+ if (auto r = _ivalue()->type()->findConstantSlot(name)) {
95
+ return _ivalue()->type()->getConstant(*r);
96
+ }
97
+ return or_else;
98
+ }
99
+
100
+ bool hasattr(const std::string& name) const {
101
+ return _ivalue()->type()->hasAttribute(name) ||
102
+ _ivalue()->type()->hasConstant(name);
103
+ }
104
+
105
+ // each object owns its methods. The reference returned here
106
+ // is guaranteed to stay valid until this module has been destroyed
107
+ Method get_method(const std::string& name) const {
108
+ if (auto method = find_method(name)) {
109
+ return *method;
110
+ }
111
+ AT_ERROR("Method '", name, "' is not defined.");
112
+ }
113
+
114
+ const std::vector<Method> get_methods() const {
115
+ return c10::fmap(type()->methods(), [&](Function* func) {
116
+ return Method(_ivalue(), func);
117
+ });
118
+ }
119
+
120
+ bool has_property(const std::string& name) const {
121
+ for (const auto& prop : type()->properties()) {
122
+ if (prop.name == name) {
123
+ return true;
124
+ }
125
+ }
126
+ return false;
127
+ }
128
+
129
+ const Property get_property(const std::string& name) const {
130
+ for (const auto& prop : type()->properties()) {
131
+ if (prop.name == name) {
132
+ c10::optional<Method> setter = c10::nullopt;
133
+ if (prop.setter) {
134
+ setter = Method(_ivalue(), prop.setter);
135
+ }
136
+ return Property{
137
+ prop.name, Method(_ivalue(), prop.getter), std::move(setter)};
138
+ }
139
+ }
140
+ AT_ERROR("Property '", name, "' is not defined.");
141
+ }
142
+
143
+ const std::vector<Property> get_properties() const {
144
+ return c10::fmap(type()->properties(), [&](ClassType::Property prop) {
145
+ c10::optional<Method> setter = c10::nullopt;
146
+ if (prop.setter) {
147
+ setter = Method(_ivalue(), prop.setter);
148
+ }
149
+ return Property{
150
+ std::move(prop.name),
151
+ Method(_ivalue(), prop.getter),
152
+ std::move(setter)};
153
+ });
154
+ }
155
+
156
+ c10::optional<Method> find_method(const std::string& basename) const;
157
+
158
+ /// Run a method from this module.
159
+ ///
160
+ /// For example:
161
+ /// @code
162
+ /// IValue output = module->run("relu_script", a, b);
163
+ /// @endcode
164
+ ///
165
+ /// To get a compile a module from a source string, see torch::jit::compile
166
+ ///
167
+ /// @param method_name The name of the method to run
168
+ /// @param args Arguments to be passed to the method
169
+ /// @return An IValue containing the return value (or values if it is a tuple)
170
+ /// from the method
171
+ template <typename... Types>
172
+ IValue run_method(const std::string& method_name, Types&&... args) {
173
+ return get_method(method_name)({IValue(std::forward<Types>(args))...});
174
+ }
175
+
176
+ // so that C++ users can easily add methods
177
+ void define(const std::string& src, const ResolverPtr& resolver = nullptr);
178
+
179
+ size_t num_slots() const {
180
+ return _ivalue()->slots().size();
181
+ }
182
+
183
+ // shallow copy the object
184
+ Object copy() const;
185
+
186
+ // Copies all the attributes of the object recursively without creating new
187
+ // `ClassType`, including deepcopy of Tensors
188
+ Object deepcopy() const;
189
+
190
+ private:
191
+ // mutable be we lazily initialize in module_object.
192
+ mutable ObjectPtr _ivalue_;
193
+ };
194
+
195
+ namespace script {
196
+ // We once had a `script::` namespace that was deleted. This is for backcompat
197
+ // of the public API; new code should not use this type alias.
198
+ using Object = ::torch::jit::Object;
199
+ } // namespace script
200
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/python/pybind_utils.h>
6
+ #include <memory>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT };
14
+ class ConcreteModuleType;
15
+
16
+ // You can think of an nn.Module as a template that corresponds to a family of
17
+ // JIT types. The template "arguments" are things like the constant values.
18
+ // e.g.
19
+ // class M(nn.Module):
20
+ // __constants__ = ["const"]
21
+ // ...
22
+ //
23
+ // Is similar to writing the following in C++:
24
+ //
25
+ // template<TConst>
26
+ // class M {
27
+ // ...
28
+ // }
29
+ //
30
+ // We need to consider each different member of the type family a different JIT
31
+ // type because, e.g. different constant values lead to different versions of
32
+ // the same method.
33
+ //
34
+ // ConcreteModuleType corresponds to a single member of the type family, with
35
+ // all template arguments fully specified. Two Modules that share a
36
+ // ConcreteModuleType can share a JIT type, and vice versa.
37
+ //
38
+ // Why not just use a JIT type to represent concrete types? Because constants,
39
+ // function attributes, etc. are currently not representable in the type system,
40
+ // so this acts a non-first-class way of tracking concrete types.
41
+ //
42
+ // ConcreteModuleType is also the source of truth for servicing all
43
+ // ModuleValue::attr calls. This is so we can guarantee that if two Module's
44
+ // share a JIT type (and thus a ConcreteModuleType), then they behave the same
45
+ // way when you access attributes on them.
46
+
47
+ // ConcreteModuleType has two phases.
48
+ // 1. Creation: First we build it up, during the ScriptModule conversion
49
+ // process. This is represented by ConcreteModuleTypeBuilder.
50
+ // ...then the converter calls ConcreteModuleTypeBuilder::build(), producing
51
+ // a
52
+ // ConcreteModuleType ready for querying.
53
+ // 2. Querying: We use ConcreteModuleType as a source of truth for
54
+ // ModuleValue::attr calls during method compilation.
55
+
56
+ // Represents a concrete type during in the process for construction. We use
57
+ // this to decide whether we can share types between modules.
58
+ class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder {
59
+ public:
60
+ explicit ConcreteModuleTypeBuilder(py::object pyClass) {
61
+ TORCH_INTERNAL_ASSERT(pyClass);
62
+ pyClass_ = std::move(pyClass);
63
+ }
64
+
65
+ void addConstant(std::string name, py::object value);
66
+ void addConstant(std::string name, IValue value);
67
+ void addAttribute(
68
+ std::string name,
69
+ const TypePtr& type,
70
+ bool isParameter,
71
+ bool isBuffer);
72
+ void addFunctionAttribute(
73
+ std::string name,
74
+ const TypePtr& type,
75
+ py::object pyFunction);
76
+
77
+ void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta);
78
+
79
+ void addForwardHook(py::object hook);
80
+ void addForwardPreHook(py::object pre_hook);
81
+
82
+ void addOverload(
83
+ std::string methodName,
84
+ std::vector<std::string> overloadedMethodNames);
85
+ void addBuiltinFunction(std::string name, const std::string& symbol_name);
86
+ void addFailedAttribute(std::string name, std::string failureReason);
87
+ void addIgnoredAttribute(std::string name);
88
+ void setIterableModuleKind(IterableModuleKind kind);
89
+
90
+ // If a ConcreteModuleType is poisoned, it will never compare equal to any
91
+ // other concrete type
92
+ void setPoisoned();
93
+
94
+ std::shared_ptr<ConcreteModuleType> build() const {
95
+ return std::make_shared<ConcreteModuleType>(*this);
96
+ }
97
+
98
+ // This determines whether two modules can share a type. The container structs
99
+ // used by ConcreteModuleType have been defined such that operator==
100
+ // implements a meaningful comparison in that context.
101
+ bool equals(const ConcreteModuleTypeBuilder& other) const;
102
+
103
+ struct FunctionAttribute {
104
+ FunctionTypePtr function_;
105
+ py::object pyFunction_;
106
+
107
+ friend bool operator==(
108
+ const FunctionAttribute& lhs,
109
+ const FunctionAttribute& rhs) {
110
+ // Functions are not first class, so we can't do type comparison like a
111
+ // regular attribute. So we do a pointer equality check on the actual
112
+ // Python function object.
113
+ return lhs.pyFunction_.is(rhs.pyFunction_);
114
+ }
115
+ };
116
+
117
+ struct Attribute {
118
+ Attribute(TypePtr type, bool isParam, bool isBuffer)
119
+ : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {}
120
+
121
+ friend bool operator==(const Attribute& lhs, const Attribute& rhs) {
122
+ return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_;
123
+ }
124
+ TypePtr type_;
125
+ bool isParam_;
126
+ bool isBuffer_;
127
+ };
128
+
129
+ struct ModuleInfo {
130
+ ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta)
131
+ : name_(std::move(name)), meta_(std::move(meta)) {}
132
+
133
+ friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs);
134
+
135
+ std::string name_;
136
+ std::shared_ptr<ConcreteModuleType> meta_;
137
+ };
138
+
139
+ private:
140
+ ConcreteModuleTypeBuilder() = default;
141
+ ClassTypePtr createTypeFromThis() const;
142
+
143
+ // If true, this type will never compare equally to anything else. This is
144
+ // used if we want to ensure that this type is not shared (for example, if it
145
+ // came from a traced module)
146
+ bool isPoisoned_ = false;
147
+
148
+ // The value of any constants defined by the module.
149
+ std::unordered_map<std::string, IValue> constants_;
150
+ // The types of any attributes
151
+ OrderedDict<std::string, Attribute> attributes_;
152
+ // Overloads, in the same format as `__overloads__` in Python
153
+ std::unordered_map<std::string, std::vector<std::string>> overloads_;
154
+ // Any attributes we failed to convert to TorchScript, along with a hint as to
155
+ // why
156
+ std::unordered_map<std::string, std::string> failedAttributes_;
157
+ // Any attributes that were marked as ignored. They cannot be used in
158
+ // TorchScript but can still be used in ignored function in Python.
159
+ std::unordered_set<std::string> ignoredAttributes_;
160
+ // Any function attributes. These are special right now because functions are
161
+ // not first-class in the type system.
162
+ std::unordered_map<std::string, FunctionAttribute> functionAttributes_;
163
+ // Function attributes that are calls to builtin functions. These get
164
+ // de-sugared directly into the corresponding aten:: call. The map is
165
+ // attribute name -> aten symbol name
166
+ std::unordered_map<std::string, c10::Symbol> builtinFunctions_;
167
+ // The concrete types of any submodules
168
+ std::vector<ModuleInfo> modules_;
169
+ // Hooks to be called before/after forward when the module
170
+ // is called directly. Used to ensure modules have different types
171
+ // when they have different python hooks
172
+ // Actual hooks are added to ClassType directly during compilation
173
+ std::vector<py::object> forwardHooks_;
174
+ std::vector<py::object> forwardPreHooks_;
175
+
176
+ // If something is a ModuleDict/ModuleList, it means:
177
+ // 1. The order of the submodules matters for comparing the type
178
+ // 2. The compiler is allowed to treat it like a dict/tuple
179
+ IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE;
180
+
181
+ // The original `nn.Module` class that we derived this ScriptModule from.
182
+ py::object pyClass_;
183
+
184
+ // NOTE: If you ever add any more state to this struct, you need to make sure
185
+ // operator== still makes sense!
186
+ friend ConcreteModuleType;
187
+ };
188
+
189
+ // Represents a finalized concrete type, used to service ModuleValue::attr calls
190
+ // during method compilation.
191
+ class VISIBILITY_HIDDEN ConcreteModuleType {
192
+ public:
193
+ explicit ConcreteModuleType(ConcreteModuleTypeBuilder data);
194
+
195
+ static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type);
196
+
197
+ TypePtr getJitType() const;
198
+ c10::optional<py::object> getPyClass() const;
199
+ IterableModuleKind getIterableModuleKind() const;
200
+ c10::optional<std::vector<std::string>> findOverloads(
201
+ const std::string& name) const;
202
+ c10::optional<Function*> findFunctionAttribute(const std::string& name) const;
203
+ c10::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const;
204
+ std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType(
205
+ const std::string& name) const;
206
+ c10::optional<std::string> findFailedAttribute(const std::string& name) const;
207
+ bool isIgnoredAttribute(const std::string& name) const;
208
+
209
+ // These getters are only here to return things as types that can be
210
+ // automatically converted by pybind.
211
+ std::unordered_map<std::string, py::object> getConstantsPy() const;
212
+ std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy()
213
+ const;
214
+ std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>>
215
+ getModulesPy() const;
216
+
217
+ bool equals(const ConcreteModuleType& other) const {
218
+ if (jitType_ == other.jitType_) {
219
+ // If the computed types are the same, these modules can (obviously) share
220
+ // a type.
221
+ return true;
222
+ }
223
+
224
+ return data_.equals(other.data_);
225
+ }
226
+ bool equals(const ConcreteModuleTypeBuilder& other) const {
227
+ return data_.equals(other);
228
+ }
229
+
230
+ void dump() const;
231
+
232
+ private:
233
+ ConcreteModuleType() = default;
234
+
235
+ // The JIT type derived from this ConcreteModuleType.
236
+ ConcreteModuleTypeBuilder data_;
237
+ TypePtr jitType_;
238
+ };
239
+
240
+ } // namespace jit
241
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Convert a graph with Loads & Stores into SSA form
13
+ TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstddef>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API size_t ComputeEditDistance(
10
+ const char* word1,
11
+ const char* word2,
12
+ size_t maxEditDistance);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+ #include <memory>
4
+ #include <string>
5
+
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
13
+ TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/parser_constants.h>
6
+ #include <torch/csrc/jit/frontend/source_range.h>
7
+ #include <torch/csrc/jit/frontend/strtod.h>
8
+ #include <algorithm>
9
+ #include <clocale>
10
+ #include <cstdlib>
11
+ #include <memory>
12
+ #include <sstream>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ C10_CLANG_DIAGNOSTIC_PUSH()
17
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
18
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
19
+ #endif
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ // single character tokens are just the character itself '+'
25
+ // multi-character tokens need an entry here
26
+ // if the third entry is not the empty string, it is used
27
+ // in the lexer to match this token.
28
+
29
+ // These kinds are also used in Tree.h as the kind of the AST node.
30
+ // Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the
31
+ // lexer.
32
+
33
+ #define TC_FORALL_TOKEN_KINDS(_) \
34
+ _(TK_EOF, "eof", "") \
35
+ _(TK_WHITESPACE, "whitespace", "") \
36
+ _(TK_WHITESPACE_EOF, "whitespace_eof", "") \
37
+ _(TK_NUMBER, "number", "") \
38
+ _(TK_NEWLINE, "newline", "") \
39
+ _(TK_INDENT, "indent", "") \
40
+ _(TK_DEDENT, "dedent", "") \
41
+ _(TK_DEF, "def", "def") \
42
+ _(TK_EQUIVALENT, "equivalent", "<=>") \
43
+ _(TK_IDENT, "ident", "") \
44
+ _(TK_STRING, "string", "") \
45
+ _(TK_STRINGLITERAL, "string_literal", "") \
46
+ _(TK_CONST, "const", "") \
47
+ _(TK_LIST, "list", "") \
48
+ _(TK_DICT, "dict", "") \
49
+ _(TK_OPTION, "option", "") \
50
+ _(TK_APPLY, "apply", "") \
51
+ _(TK_COMPREHENSION, "comprehension", "") \
52
+ _(TK_RANGE_CONSTRAINT, "range_constraint", "") \
53
+ _(TK_PARAM, "param", "") \
54
+ _(TK_INFERRED, "inferred", "") \
55
+ _(TK_ACCESS, "access", "") \
56
+ _(TK_ASSIGN, "assign", "") \
57
+ _(TK_AUG_ASSIGN, "aug_assign", "") \
58
+ _(TK_ATTRIBUTE, "attribute", "") \
59
+ _(TK_IF, "if", "if") \
60
+ _(TK_ELSE, "else", "else") \
61
+ _(TK_ELIF, "elif", "elif") \
62
+ _(TK_WHILE, "while", "while") \
63
+ _(TK_EXPR_STMT, "expression statement", "") \
64
+ _(TK_RETURN, "return", "return") \
65
+ _(TK_IS, "is", "is") \
66
+ _(TK_ISNOT, "is not", "is not") \
67
+ _(TK_NE, "ne", "!=") \
68
+ _(TK_EQ, "eq", "==") \
69
+ _(TK_LE, "le", "<=") \
70
+ _(TK_GE, "ge", ">=") \
71
+ _(TK_FLOOR_DIV, "floordiv", "//") \
72
+ _(TK_IF_EXPR, "if", "") \
73
+ _(TK_TRUE, "True", "True") \
74
+ _(TK_FALSE, "False", "False") \
75
+ _(TK_NONE, "None", "None") \
76
+ _(TK_AND, "and", "and") \
77
+ _(TK_OR, "or", "or") \
78
+ _(TK_NOT, "not", "not") \
79
+ _(TK_LSHIFT, "<<", "<<") \
80
+ _(TK_RSHIFT, ">>", ">>") \
81
+ _(TK_CAST, "cast", "") \
82
+ _(TK_PLUS_EQ, "+=", "+=") \
83
+ _(TK_MINUS_EQ, "-=", "-=") \
84
+ _(TK_TIMES_EQ, "*=", "*=") \
85
+ _(TK_DIV_EQ, "/=", "/=") \
86
+ _(TK_MOD_EQ, "%=", "%=") \
87
+ _(TK_BIT_OR_EQ, "|=", "|=") \
88
+ _(TK_BIT_AND_EQ, "&=", "&=") \
89
+ _(TK_BIT_XOR_EQ, "^=", "^=") \
90
+ _(TK_LSHIFT_EQ, "<<=", "<<=") \
91
+ _(TK_RSHIFT_EQ, ">>=", ">>=") \
92
+ _(TK_POW_EQ, "**=", "**=") \
93
+ _(TK_GLOBAL, "global", "global") \
94
+ _(TK_BUILT_IN, "built-in", "") \
95
+ _(TK_SUBSCRIPT, "subscript", "") \
96
+ _(TK_VAR, "variable", "") \
97
+ _(TK_NOTHING, "nothing", "") \
98
+ _(TK_DICT_LITERAL, "dict-literal", "") \
99
+ _(TK_LIST_LITERAL, "list-literal", "") \
100
+ _(TK_TUPLE_LITERAL, "tuple-literal", "") \
101
+ _(TK_FOR, "for", "for") \
102
+ _(TK_IN, "in", "in") \
103
+ _(TK_NOTIN, "not in", "not in") \
104
+ _(TK_STARRED, "starred", "") \
105
+ _(TK_UNARY_MINUS, "unary minus", "") \
106
+ _(TK_POW, "pow operator", "**") \
107
+ _(TK_ARROW, "arrow", "->") \
108
+ _(TK_DECL, "decl", "") \
109
+ _(TK_SLICE_EXPR, "slice expr", "") \
110
+ _(TK_TYPE_COMMENT, "type comment", "# type:") \
111
+ _(TK_RAISE, "raise", "raise") \
112
+ _(TK_ASSERT, "assert", "assert") \
113
+ _(TK_DOTS, "dots", "...") \
114
+ _(TK_LIST_COMP, "list comprehension", "") \
115
+ _(TK_DICT_COMP, "dict comprehension", "") \
116
+ _(TK_BREAK, "break", "break") \
117
+ _(TK_CONTINUE, "continue", "continue") \
118
+ _(TK_DELETE, "del", "del") \
119
+ _(TK_PASS, "pass", "pass") \
120
+ _(TK_CLASS_DEF, "class", "class") \
121
+ _(TK_IMPORT, "import", "import") \
122
+ _(TK_WITH, "with", "with") \
123
+ _(TK_WITH_ITEM, "withitem", "") \
124
+ _(TK_AS, "as", "as") \
125
+ _(TK_PROP, "property", "") \
126
+ _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \
127
+ _(TK_NONE_TYPE, "NoneType", "NoneType")
128
+
129
+ enum TokenKind {
130
+ // we use characters to represent themselves so skip all valid characters
131
+ // before
132
+ // assigning enum values to multi-char tokens.
133
+ TK_DUMMY_START = 256,
134
+ #define DEFINE_TOKEN(tok, _, _2) tok,
135
+ TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN)
136
+ #undef DEFINE_TOKEN
137
+ };
138
+
139
+ TORCH_API std::string kindToString(int kind);
140
+ TORCH_API int stringToKind(const std::string& str);
141
+
142
+ // nested hash tables that indicate char-by-char what is a valid token.
143
+ struct TokenTrie;
144
+ using TokenTrieRef = std::unique_ptr<TokenTrie>;
145
+ struct TokenTrie {
146
+ TokenTrie() : kind(0) {}
147
+ void insert(const char* str, int tok) {
148
+ if (*str == '\0') {
149
+ AT_ASSERT(kind == 0);
150
+ kind = tok;
151
+ return;
152
+ }
153
+
154
+ for (size_t i = 0, e = child_chars.size(); i < e; ++i) {
155
+ if (child_chars[i] == *str) {
156
+ child_tries[i]->insert(str + 1, tok);
157
+ return;
158
+ }
159
+ }
160
+
161
+ child_chars.emplace_back(*str);
162
+ child_tries.emplace_back(std::make_unique<TokenTrie>());
163
+ child_tries.back()->insert(str + 1, tok);
164
+ }
165
+ int kind; // 0 == invalid token
166
+
167
+ std::vector<char> child_chars;
168
+ std::vector<TokenTrieRef> child_tries;
169
+ };
170
+
171
+ // stuff that is shared against all TC lexers/parsers and is initialized only
172
+ // once.
173
+ struct TORCH_API SharedParserData {
174
+ SharedParserData() : head(new TokenTrie()) {
175
+ std::stringstream ss;
176
+ for (const char* c = valid_single_char_tokens; *c; c++) {
177
+ std::string str(1, *c);
178
+ head->insert(str.c_str(), *c);
179
+ }
180
+
181
+ #define ADD_CASE(tok, _, tokstring) \
182
+ if (*(tokstring) != '\0') { \
183
+ head->insert((tokstring), (tok)); \
184
+ }
185
+ TC_FORALL_TOKEN_KINDS(ADD_CASE)
186
+ #undef ADD_CASE
187
+ }
188
+
189
+ bool match(
190
+ StringCordView::Iterator pos,
191
+ bool continuation, // are we inside a scope where newlines don't count
192
+ // (e.g. inside parens)
193
+ bool whitespace_token, // should we treat whitespace as a token
194
+ int* kind,
195
+ StringCordView::Iterator* start,
196
+ StringCordView::Iterator* end) {
197
+ *start = pos;
198
+ // skip whitespace
199
+ while (pos.has_next() && isblank(*pos)) {
200
+ ++pos;
201
+ }
202
+
203
+ // special handling
204
+ if (pos.has_next()) {
205
+ if (*pos == '#' && !isTypeComment(pos)) {
206
+ // skip comments
207
+ while (pos.has_next() && *pos != '\n')
208
+ ++pos;
209
+ // tail call, handle whitespace and more comments
210
+ return match(pos, continuation, whitespace_token, kind, start, end);
211
+ }
212
+ if (*pos == '\\') {
213
+ auto newiter = pos;
214
+ ++newiter;
215
+ if (newiter.has_next() && *newiter == '\n' && !whitespace_token) {
216
+ ++newiter;
217
+ return match(newiter, continuation, false, kind, start, end);
218
+ }
219
+ }
220
+ if (*pos == '\n') {
221
+ return match(++pos, continuation, !continuation, kind, start, end);
222
+ }
223
+ }
224
+ // we handle white space before EOF because in the case we have something
225
+ // like the following where we need to generate the dedent token if foo:
226
+ // ...
227
+ // else:
228
+ // pass
229
+ if (whitespace_token) {
230
+ *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
231
+ *end = pos;
232
+ return true;
233
+ }
234
+ if (!pos.has_next()) {
235
+ *kind = TK_EOF;
236
+ *start = pos;
237
+ *end = *start;
238
+ return true;
239
+ }
240
+ // invariant: the next token is not whitespace or newline
241
+ *start = pos;
242
+ // check for a valid number
243
+ size_t len;
244
+ if (isNumber(pos.rest_line(), 0, &len)) {
245
+ *end = *start;
246
+ *end += len;
247
+ *kind = TK_NUMBER;
248
+ return true;
249
+ }
250
+ // check for string
251
+ if (isString(pos.rest_line(), 0, &len)) {
252
+ *kind = TK_STRINGLITERAL;
253
+ *end = *start;
254
+ *end += len;
255
+ return true;
256
+ }
257
+
258
+ // check for either an ident or a token
259
+ // ident tracks whether what we have scanned so far could be an identifier
260
+ // matched indicates if we have found any match.
261
+ bool matched = false;
262
+ bool ident = true;
263
+ TokenTrie* cur = head.get();
264
+ // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr);
265
+ // i++)
266
+ for (size_t i = 0; pos.has_next() && (ident || cur != nullptr);
267
+ ++pos, ++i) {
268
+ ident = ident && validIdent(i, *pos);
269
+ if (ident) {
270
+ matched = true;
271
+ *end = pos.next_iter();
272
+ *kind = TK_IDENT;
273
+ }
274
+ // check for token second, so that e.g. 'max' matches the token TK_MAX
275
+ // rather the
276
+ // identifier 'max'
277
+ if (cur) {
278
+ const auto begin_it = cur->child_chars.begin();
279
+ const auto end_it = cur->child_chars.end();
280
+ const auto ch_it = std::find(begin_it, end_it, *pos);
281
+
282
+ cur = (ch_it == end_it) ? nullptr
283
+ : cur->child_tries[ch_it - begin_it].get();
284
+
285
+ if (cur && cur->kind != 0) {
286
+ matched = true;
287
+ *end = pos.next_iter();
288
+ *kind = cur->kind;
289
+ }
290
+ }
291
+ }
292
+ return matched;
293
+ }
294
+
295
+ bool isUnary(int kind, int* prec);
296
+ bool isBinary(int kind, int* prec);
297
+ bool isRightAssociative(int kind) {
298
+ switch (kind) {
299
+ case '?':
300
+ case TK_POW:
301
+ case TK_IF:
302
+ return true;
303
+ default:
304
+ return false;
305
+ }
306
+ }
307
+
308
+ private:
309
+ bool validIdent(size_t i, char n) {
310
+ return isalpha(n) || n == '_' || (i > 0 && isdigit(n));
311
+ }
312
+
313
+ // 1. skip whitespace
314
+ // 2. handle comment or newline
315
+ //
316
+ bool isNumber(c10::string_view str, size_t start, size_t* len) {
317
+ char first = str[start];
318
+ // strtod allows numbers to start with + or - or nan or inf
319
+ // http://en.cppreference.com/w/cpp/string/byte/strtof
320
+ // but we want only the number part, otherwise 1+3 will turn into two
321
+ // adjacent numbers in the lexer
322
+ if (first == '-' || first == '+' || isalpha(first))
323
+ return false;
324
+ const char* startptr = str.data() + start;
325
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
326
+ char* endptr;
327
+ torch::jit::strtod_c(startptr, &endptr);
328
+ *len = endptr - startptr;
329
+ // check if the number is complex valued
330
+ // access is safe because string is assumed to be null terminated
331
+ if (endptr != nullptr && *endptr == 'j') {
332
+ *len += 1;
333
+ }
334
+ return *len > 0;
335
+ }
336
+
337
+ bool isCharCount(char c, c10::string_view str, size_t start, int len) {
338
+ // count checks from [start, start + len)
339
+ return start + len <= str.size() &&
340
+ std::count(str.begin() + start, str.begin() + start + len, c) == len;
341
+ }
342
+
343
+ // python concatenates all adjacent strings "a" "b" == "ab"
344
+ // strings can be enclosed with 1 or 3 single or double quotes
345
+ // if enclosed with 3 quotes newlines are valid
346
+ // as elsewhere, backslash and new line should be ignored
347
+ bool isString(c10::string_view str, size_t start, size_t* len) {
348
+ char quote = str[start];
349
+ if (quote != '\"' && quote != '\'')
350
+ return false;
351
+ int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1;
352
+
353
+ // end is now set past the opening quotation marks
354
+ size_t end = start + quote_len;
355
+ while (end < str.size() && !isCharCount(quote, str, end, quote_len)) {
356
+ if (str[end] == '\n' && quote_len != 3) {
357
+ return false;
358
+ }
359
+ // handle escaped characters. advances past escaped quotation marks,
360
+ // escaped newlines and escaped backslashes
361
+ // multi-char escapes like \x1A are handled fine here because the
362
+ // remainder of the escape are valid string characters anyway
363
+ if (str[end] == '\\') {
364
+ end++;
365
+ }
366
+ end++;
367
+ }
368
+ // set length equal to the complete string including quotations
369
+ *len = end - start + quote_len;
370
+ // if end finished without going past the last character of the string than
371
+ // there is a match
372
+ return end < str.size();
373
+ }
374
+
375
+ bool isblank(int n) {
376
+ return isspace(n) && n != '\n';
377
+ }
378
+
379
+ bool isTypeComment(StringCordView::Iterator str_iter) {
380
+ c10::string_view rest_line = str_iter.rest_line();
381
+ const std::string type_string = "# type:";
382
+ if (rest_line.size() < type_string.length()) {
383
+ return false;
384
+ }
385
+ auto match_string = rest_line.substr(0, type_string.size());
386
+ return match_string == type_string;
387
+ }
388
+
389
+ // Make an exception ignoring comments for type annotation comments
390
+ bool isTypeComment(StringCordView str, size_t pos) {
391
+ const std::string type_string = "# type:";
392
+ if (str.size() < pos + type_string.length()) {
393
+ return false;
394
+ }
395
+ auto match_string = str.substr(pos, type_string.size());
396
+ return match_string == type_string;
397
+ }
398
+
399
+ TokenTrieRef head;
400
+ };
401
+
402
+ TORCH_API SharedParserData& sharedParserData();
403
+
404
+ struct Token {
405
+ int kind;
406
+ SourceRange range;
407
+ Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
408
+ std::string text() {
409
+ return std::string(range.token_text());
410
+ }
411
+ std::string kindString() const {
412
+ return kindToString(kind);
413
+ }
414
+ };
415
+
416
+ struct Lexer {
417
+ explicit Lexer(std::shared_ptr<Source> source)
418
+ : source(std::move(source)),
419
+ pos(0),
420
+ nesting(0),
421
+ indent_stack(),
422
+ next_tokens(),
423
+ shared(sharedParserData()) {
424
+ auto first_indent = lexRaw(true);
425
+ indent_stack.push_back(first_indent.range.size());
426
+ lex();
427
+ }
428
+ // Return the current token, and then move to the next one
429
+ Token next() {
430
+ if (next_tokens.empty())
431
+ reportError("Lexer invariant violated: empty token queue");
432
+ Token r = std::move(next_tokens.front());
433
+ next_tokens.erase(next_tokens.begin());
434
+ if (next_tokens.empty()) {
435
+ lex();
436
+ }
437
+ return r;
438
+ }
439
+ // Skip the current token if it matches the given kind
440
+ bool nextIf(int kind) {
441
+ if (cur().kind != kind)
442
+ return false;
443
+ next();
444
+ return true;
445
+ }
446
+
447
+ [[noreturn]] void reportError(const std::string& what) {
448
+ reportError(what, cur());
449
+ }
450
+ [[noreturn]] void reportError(const std::string& what, const Token& t) {
451
+ std::stringstream ss;
452
+ ss << what << ":\n";
453
+ t.range.highlight(ss);
454
+ throw std::runtime_error(ss.str());
455
+ }
456
+ [[noreturn]] void expected(const std::string& what, const Token& t) {
457
+ std::stringstream ss;
458
+ ss << "expected " << what << " but found '" << t.kindString()
459
+ << "' here:\n";
460
+ t.range.highlight(ss);
461
+ throw std::runtime_error(ss.str());
462
+ }
463
+ [[noreturn]] void expected(const std::string& what) {
464
+ expected(what, cur());
465
+ }
466
+ // Check that the current token has a given kind, return the current token,
467
+ // and advance to the next one.
468
+ Token expect(int kind) {
469
+ if (cur().kind != kind) {
470
+ expected(kindToString(kind));
471
+ }
472
+ return next();
473
+ }
474
+ Token& lookahead() {
475
+ if (next_tokens.size() < 2) {
476
+ lex();
477
+ }
478
+ return next_tokens[1];
479
+ }
480
+ Token& cur() {
481
+ return next_tokens.front();
482
+ }
483
+
484
+ private:
485
+ void lex() {
486
+ auto r = lexRaw();
487
+ switch (r.kind) {
488
+ case '(':
489
+ case '[':
490
+ case '{':
491
+ nesting++;
492
+ break;
493
+ case ')':
494
+ case ']':
495
+ case '}':
496
+ nesting--;
497
+ break;
498
+ case TK_WHITESPACE:
499
+ case TK_WHITESPACE_EOF: {
500
+ const auto depth = static_cast<int64_t>(
501
+ r.kind == TK_WHITESPACE_EOF ? indent_stack.front()
502
+ : r.range.size());
503
+ // note: TK_WHITESPACE_EOF is whitespace right before the EOF token
504
+ // just like we allow the code to be indented to a particular initial
505
+ // indent level, we allow the final indent to be anything and set
506
+ // it back to the initial indent level. This allows the code to be
507
+ // put into string literals inside code without worrying about final
508
+ // whitespace
509
+ if (depth > indent_stack.back()) {
510
+ indent_stack.push_back(depth);
511
+ r.kind = TK_INDENT;
512
+ } else if (depth == indent_stack.back()) {
513
+ r.kind = TK_NEWLINE;
514
+ } else {
515
+ next_tokens.emplace_back(TK_NEWLINE, r.range);
516
+ while (indent_stack.back() != depth) {
517
+ indent_stack.pop_back();
518
+ next_tokens.emplace_back(TK_DEDENT, r.range);
519
+ if (indent_stack.empty()) {
520
+ reportError("invalid indent level " + std::to_string(depth), r);
521
+ }
522
+ }
523
+ return; // We've already queued the tokens
524
+ }
525
+ } break;
526
+ default:
527
+ break;
528
+ }
529
+ next_tokens.push_back(std::move(r));
530
+ }
531
+ Token lexRaw(bool whitespace_token = false) {
532
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
533
+ int kind;
534
+ AT_ASSERT(source);
535
+ if (current == nullptr) {
536
+ AT_ASSERT(pos == 0);
537
+ current = std::make_unique<StringCordView::Iterator>(
538
+ source->text_str().begin());
539
+ }
540
+
541
+ StringCordView::Iterator start_iter = *current;
542
+ StringCordView::Iterator end_iter = *current;
543
+ if (!shared.match(
544
+ *current,
545
+ nesting > 0,
546
+ whitespace_token,
547
+ &kind,
548
+ &start_iter,
549
+ &end_iter)) {
550
+ expected(
551
+ "a valid token",
552
+ Token(
553
+ **current,
554
+ SourceRange(source, start_iter, start_iter.pos() + 1)));
555
+ }
556
+
557
+ auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos()));
558
+ pos = end_iter.pos();
559
+ *current = end_iter;
560
+ return t;
561
+ }
562
+
563
+ std::shared_ptr<Source> source;
564
+ std::unique_ptr<StringCordView::Iterator> current;
565
+ size_t pos;
566
+ size_t nesting; // depth of ( [ { nesting...
567
+ std::vector<int> indent_stack; // stack of indentation level of blocks
568
+ // Invariant: this should always contain at least a single element
569
+ std::vector<Token> next_tokens;
570
+ SharedParserData& shared;
571
+ };
572
+ } // namespace jit
573
+ } // namespace torch
574
+
575
+ C10_CLANG_DIAGNOSTIC_POP()
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <torch/csrc/jit/frontend/tree.h>
4
+ #include <torch/csrc/jit/frontend/tree_views.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Decl;
11
+ struct ParserImpl;
12
+ struct Lexer;
13
+
14
+ TORCH_API Decl mergeTypesFromTypeComment(
15
+ const Decl& decl,
16
+ const Decl& type_annotation_decl,
17
+ bool is_method);
18
+
19
+ struct TORCH_API Parser {
20
+ explicit Parser(const std::shared_ptr<Source>& src);
21
+ TreeRef parseFunction(bool is_method);
22
+ TreeRef parseClass();
23
+ Decl parseTypeComment();
24
+ Expr parseExp();
25
+ Lexer& lexer();
26
+ ~Parser();
27
+
28
+ private:
29
+ std::unique_ptr<ParserImpl> pImpl;
30
+ };
31
+
32
+ } // namespace jit
33
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace jit {
5
+ static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
6
+ } // namespace jit
7
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/qualified_name.h>
5
+ #include <torch/csrc/jit/frontend/sugared_value.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Resolver;
11
+ using ResolverPtr = std::shared_ptr<Resolver>;
12
+
13
+ /**
14
+ * class Resolver
15
+ *
16
+ * Represents an "outer environment" in which we an look up names and return
17
+ * a corresponding SugaredValue. This is used during compilation to resolve
18
+ * references to names which are not defined internal to the graph.
19
+ *
20
+ * Example: PythonResolver looks at the enclosing Python scope for `name`.
21
+ *
22
+ * NOTE: When adding methods, keep this an abstract class (i.e. all new methods
23
+ * should be purely virtual). Resist the urge to provide a default
24
+ * implementation; you should explicitly think about how each resolver would
25
+ * handle the method.
26
+ */
27
+ struct Resolver {
28
+ virtual ~Resolver() = default;
29
+
30
+ // Resolve a given name to a SugaredValue. This takes the method `m` that the
31
+ // caller is currently constructing, since we may need to insert nodes into
32
+ // the graph to create a value.
33
+ virtual std::shared_ptr<SugaredValue> resolveValue(
34
+ const std::string& name,
35
+ GraphFunction& m,
36
+ const SourceRange& loc) {
37
+ return nullptr;
38
+ }
39
+
40
+ // Resolve `name` to a TypePtr.
41
+ virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
42
+ return nullptr;
43
+ }
44
+ };
45
+
46
+ // A resolver that only understands "torch.foo()" lookups.
47
+ struct NativeResolver : public Resolver {
48
+ std::shared_ptr<SugaredValue> resolveValue(
49
+ const std::string& name,
50
+ GraphFunction& m,
51
+ const SourceRange& loc) override {
52
+ if (name == "torch") {
53
+ return std::make_shared<BuiltinModule>("aten");
54
+ }
55
+ return nullptr;
56
+ }
57
+
58
+ TypePtr resolveType(const std::string& name, const SourceRange& loc)
59
+ override {
60
+ return nullptr;
61
+ }
62
+ };
63
+
64
+ inline std::shared_ptr<NativeResolver> nativeResolver() {
65
+ return std::make_shared<NativeResolver>();
66
+ }
67
+ } // namespace jit
68
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <ATen/core/jit_type.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/FunctionRef.h>
7
+ #include <torch/csrc/jit/frontend/lexer.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ using TypePtr = c10::TypePtr;
13
+
14
+ struct TORCH_API SchemaTypeParser {
15
+ TypePtr parseBaseType();
16
+ c10::optional<c10::AliasInfo> parseAliasAnnotation();
17
+ std::pair<TypePtr, c10::optional<c10::AliasInfo>> parseType();
18
+ std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, c10::optional<c10::AliasInfo>>
19
+ parseFakeAndRealType();
20
+ c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
21
+ TypePtr parseRefinedTensor();
22
+
23
+ SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
24
+ : complete_tensor_types(parse_complete_tensor_types), L(L) {}
25
+
26
+ private:
27
+ c10::optional<bool> tryToParseRequiresGrad();
28
+ c10::optional<c10::Device> tryToParseDeviceType();
29
+ void parseList(
30
+ int begin,
31
+ int sep,
32
+ int end,
33
+ c10::function_ref<void()> callback);
34
+
35
+ bool complete_tensor_types;
36
+ Lexer& L;
37
+ size_t next_id = 0;
38
+ };
39
+ } // namespace jit
40
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/frontend/resolver.h>
5
+ #include <torch/csrc/jit/frontend/tree_views.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /**
11
+ * class ScriptTypeParser
12
+ *
13
+ * Parses expressions in our typed AST format (TreeView) into types and
14
+ * typenames.
15
+ */
16
+ class TORCH_API ScriptTypeParser {
17
+ public:
18
+ explicit ScriptTypeParser() = default;
19
+ explicit ScriptTypeParser(ResolverPtr resolver)
20
+ : resolver_(std::move(resolver)) {}
21
+
22
+ c10::TypePtr parseTypeFromExpr(const Expr& expr) const;
23
+
24
+ c10::optional<std::pair<c10::TypePtr, int32_t>> parseBroadcastList(
25
+ const Expr& expr) const;
26
+
27
+ c10::TypePtr parseType(const std::string& str);
28
+
29
+ FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self);
30
+
31
+ c10::IValue parseClassConstant(const Assign& assign);
32
+
33
+ private:
34
+ c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const;
35
+
36
+ c10::optional<std::string> parseBaseTypeName(const Expr& expr) const;
37
+ at::TypePtr subscriptToType(
38
+ const std::string& typeName,
39
+ const Subscript& subscript) const;
40
+ std::vector<IValue> evaluateDefaults(
41
+ const SourceRange& r,
42
+ const std::vector<Expr>& default_types,
43
+ const std::vector<Expr>& default_exprs);
44
+ std::vector<Argument> parseArgsFromDecl(const Decl& decl, bool skip_self);
45
+
46
+ std::vector<Argument> parseReturnFromDecl(const Decl& decl);
47
+
48
+ ResolverPtr resolver_ = nullptr;
49
+
50
+ // Need to use `evaluateDefaults` in serialization
51
+ friend struct ConstantTableValue;
52
+ friend struct SourceImporterImpl;
53
+ };
54
+ } // namespace jit
55
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <algorithm>
6
+ #include <iterator>
7
+ #include <memory>
8
+ #include <numeric>
9
+ #include <ostream>
10
+ #include <regex>
11
+ #include <sstream>
12
+ #include <unordered_map>
13
+
14
+ namespace torch::jit {
15
+
16
+ class SourceRangeUnpickler;
17
+ struct SourceRange;
18
+
19
+ // A stringlike class backed by a vector of string_view
20
+ // the string represented are logically the concatenation of the string_views
21
+ // This has advantage of not needing continues memory.
22
+ struct TORCH_API StringCordView {
23
+ StringCordView();
24
+ StringCordView(const StringCordView&) = default;
25
+ StringCordView(StringCordView&&) noexcept = default;
26
+ StringCordView(
27
+ std::vector<c10::string_view> inputs,
28
+ std::vector<std::shared_ptr<std::string>> ownerships);
29
+
30
+ StringCordView& operator=(const StringCordView&) = default;
31
+ StringCordView& operator=(StringCordView&&) noexcept = default;
32
+
33
+ size_t size() const {
34
+ return accumulated_sizes_.back();
35
+ }
36
+
37
+ size_t find(const std::string& tok, size_t start) const;
38
+ size_t find_regex(const std::string& tok, size_t start) const;
39
+ StringCordView substr(size_t start, size_t size) const;
40
+
41
+ char at(size_t index) const {
42
+ return *iter_for_pos(index);
43
+ }
44
+ char operator[](size_t index) const {
45
+ return at(index);
46
+ }
47
+
48
+ std::string str() const {
49
+ std::stringstream ss;
50
+ for (auto s : pieces_) {
51
+ ss << std::string(s);
52
+ }
53
+ return ss.str();
54
+ }
55
+
56
+ bool operator==(const std::string& rhs) const;
57
+
58
+ bool operator==(const StringCordView& rhs) const;
59
+
60
+ c10::string_view piece(size_t index) const {
61
+ return pieces_[index];
62
+ }
63
+
64
+ struct Iterator {
65
+ Iterator(
66
+ const StringCordView* str,
67
+ size_t start_line,
68
+ size_t start_pos,
69
+ size_t size)
70
+ : line_(start_line), pos_(start_pos), str_(str), size_(size) {}
71
+ explicit Iterator(const StringCordView* str)
72
+ : Iterator(str, 0, 0, str->size()) {}
73
+
74
+ Iterator() : Iterator(nullptr, 0, 0, 0) {}
75
+
76
+ Iterator(const Iterator&) = default;
77
+ Iterator(Iterator&&) = default;
78
+ Iterator& operator=(const Iterator&) = default;
79
+ Iterator& operator=(Iterator&&) = default;
80
+
81
+ Iterator operator++() {
82
+ if (size_ == 0) {
83
+ return *this;
84
+ }
85
+ if ((pos_ + 1) < str_->pieces_[line_].size()) {
86
+ pos_++;
87
+ } else {
88
+ line_++;
89
+ pos_ = 0;
90
+ }
91
+ return *this;
92
+ }
93
+
94
+ Iterator operator++(int) {
95
+ Iterator prev(*this);
96
+ ++(*this);
97
+ return prev;
98
+ }
99
+
100
+ Iterator next_iter() const {
101
+ Iterator next(*this);
102
+ ++next;
103
+ return next;
104
+ }
105
+
106
+ Iterator& operator+=(size_t num) {
107
+ if (!has_next()) {
108
+ return *this;
109
+ }
110
+ size_t target_pos = pos_ + num;
111
+ if (target_pos >= str_->accumulated_sizes_[line_] &&
112
+ (line_ + 1) < str_->accumulated_sizes_.size() &&
113
+ target_pos < str_->accumulated_sizes_[line_ + 1]) {
114
+ pos_ = target_pos;
115
+ return *this;
116
+ }
117
+
118
+ size_t target_abs_pos = pos() + num;
119
+ *this = str_->iter_for_pos(target_abs_pos);
120
+ return *this;
121
+ }
122
+
123
+ bool operator==(const Iterator& rhs) const {
124
+ if (!has_next() && !rhs.has_next()) {
125
+ return true;
126
+ }
127
+ return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
128
+ }
129
+ bool operator!=(const Iterator& rhs) {
130
+ return !((*this) == rhs);
131
+ }
132
+ bool has_next() const {
133
+ return size_ > 0 && (line_ < str_->pieces_.size());
134
+ }
135
+
136
+ char operator*() const {
137
+ TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
138
+ TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
139
+ return str_->pieces_[line_].at(pos_);
140
+ }
141
+
142
+ // returns rest of the line of the current iterator
143
+ c10::string_view rest_line() const {
144
+ if (line_ >= str_->pieces_.size()) {
145
+ return "";
146
+ }
147
+
148
+ c10::string_view cur_line = str_->pieces_[line_];
149
+ return cur_line.substr(pos_, std::string::npos);
150
+ }
151
+
152
+ size_t pos() const {
153
+ if (size_ == 0) {
154
+ return 0;
155
+ }
156
+ return str_->accumulated_sizes_[line_] + pos_;
157
+ }
158
+
159
+ private:
160
+ size_t line_;
161
+ size_t pos_;
162
+ const StringCordView* str_;
163
+ size_t size_;
164
+ friend struct StringCordView;
165
+ };
166
+
167
+ Iterator begin() const {
168
+ return Iterator(this, 0, 0, size());
169
+ }
170
+ Iterator end() const {
171
+ return Iterator(this, pieces_.size(), 0, 0);
172
+ }
173
+ Iterator iter_for_pos(size_t pos) const;
174
+
175
+ private:
176
+ std::vector<c10::string_view> pieces_;
177
+ std::vector<size_t> accumulated_sizes_;
178
+ std::vector<std::shared_ptr<std::string>> owned_strings_;
179
+ };
180
+
181
+ // Source represents a code segment. It keeps track of:
182
+ // - text_view : the view into text of the code segment
183
+ // - filename (optional) : if present, represents the name of the file from
184
+ // which the code segment originated.
185
+ // - starting_line_no : represents the line in the original file where the
186
+ // code segment started.
187
+ struct TORCH_API Source {
188
+ // Whether or not Source should copy the string passed in the constructor.
189
+ enum CopiesString { COPIES_STRING, DONT_COPY };
190
+
191
+ explicit Source(
192
+ c10::string_view text_view,
193
+ c10::optional<std::string> filename = c10::nullopt,
194
+ size_t starting_line_no = 0,
195
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
196
+ CopiesString copies_str = COPIES_STRING)
197
+ : filename_(std::move(filename)),
198
+ starting_line_no_(starting_line_no),
199
+ gen_ranges_(std::move(gen_ranges)) {
200
+ if (copies_str == COPIES_STRING) {
201
+ std::shared_ptr<std::string> allocated_str =
202
+ std::make_shared<std::string>(text_view.data(), text_view.size());
203
+ text_view_ = StringCordView({*allocated_str}, {allocated_str});
204
+ } else {
205
+ text_view_ = StringCordView({text_view}, {});
206
+ }
207
+
208
+ calc_line_start_offsets();
209
+ }
210
+
211
+ explicit Source(
212
+ StringCordView str,
213
+ c10::optional<std::string> filename = c10::nullopt,
214
+ size_t starting_line_no = 0,
215
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
216
+ : text_view_(std::move(str)),
217
+ filename_(std::move(filename)),
218
+ starting_line_no_(starting_line_no),
219
+ gen_ranges_(std::move(gen_ranges)) {
220
+ calc_line_start_offsets();
221
+ }
222
+ // Given a line number (within source_), return the byte offset of the
223
+ // beginning of that line.
224
+ size_t offset_for_line(size_t line) const {
225
+ return line_starting_offsets_.at(line);
226
+ }
227
+
228
+ // Returns number of lines present.
229
+ size_t num_lines() const {
230
+ return line_starting_offsets_.size();
231
+ }
232
+
233
+ // Calculate the line (within the code segment) on which `offset` resides.
234
+ size_t lineno_for_offset(size_t offset) const {
235
+ auto iter = std::upper_bound(
236
+ line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
237
+ return iter - line_starting_offsets_.begin() - 1;
238
+ }
239
+
240
+ // Calculate the line (within the original source file, if present) on which
241
+ // `lineno` resides.
242
+ size_t lineno_to_source_lineno(size_t lineno) const {
243
+ if (filename_) {
244
+ return lineno + starting_line_no_;
245
+ } else {
246
+ return lineno;
247
+ }
248
+ }
249
+
250
+ StringCordView get_line(size_t lineno) const {
251
+ auto start = offset_for_line(lineno);
252
+ auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
253
+ : text_view_.size() - start;
254
+ return text_view_.substr(start, size);
255
+ }
256
+
257
+ const StringCordView& text_str() const {
258
+ return text_view_;
259
+ }
260
+
261
+ char char_at(size_t index) const {
262
+ return text_view_.at(index);
263
+ }
264
+
265
+ size_t size() const {
266
+ return text_view_.size();
267
+ }
268
+
269
+ c10::optional<std::string>& filename() {
270
+ return filename_;
271
+ }
272
+
273
+ size_t starting_line_no() const {
274
+ return starting_line_no_;
275
+ }
276
+
277
+ c10::optional<SourceRange> findSourceRangeThatGenerated(
278
+ const SourceRange& range);
279
+
280
+ ~Source() = default;
281
+
282
+ private:
283
+ void calc_line_start_offsets() {
284
+ line_starting_offsets_.clear();
285
+ line_starting_offsets_.push_back(0);
286
+ size_t pos = 0;
287
+ while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
288
+ line_starting_offsets_.push_back(++pos);
289
+ }
290
+ }
291
+
292
+ StringCordView text_view_;
293
+
294
+ c10::optional<std::string> filename_;
295
+ // If filename_ is not present, starting_line_no_ is don't care
296
+ size_t starting_line_no_;
297
+ // Starting offsets for lines into the source. e.g. line 0 starts at
298
+ // line_starting_offsets_[0], etc.
299
+ std::vector<size_t> line_starting_offsets_;
300
+
301
+ std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
302
+ };
303
+
304
+ // A SourceRange is a reference to subset of a Source, specified by `start` and
305
+ // `end` byte offsets into the source text.
306
+ struct TORCH_API SourceRange {
307
+ SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
308
+ : source_view_(std::move(source_view)), start_(start_), end_(end_) {
309
+ if (source_view_) {
310
+ start_iter_ = source_view_->text_str().iter_for_pos(start_);
311
+ }
312
+ }
313
+
314
+ SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
315
+
316
+ SourceRange(
317
+ std::shared_ptr<Source> source_view_,
318
+ StringCordView::Iterator start_iter,
319
+ size_t end_)
320
+ : source_view_(std::move(source_view_)),
321
+ start_(start_iter.pos()),
322
+ end_(end_),
323
+ start_iter_(start_iter) {}
324
+
325
+ const c10::string_view token_text() const {
326
+ size_t size = end() - start();
327
+ return start_iter_.rest_line().substr(0, size);
328
+ }
329
+
330
+ const StringCordView text() const {
331
+ return source_view_->text_str().substr(start(), end() - start());
332
+ }
333
+ size_t size() const {
334
+ return end() - start();
335
+ }
336
+ static const size_t CONTEXT = 3;
337
+ void highlight(std::ostream& out) const;
338
+
339
+ // Customizable version of 'highlight' method.
340
+ void print_with_context(
341
+ std::ostream& out,
342
+ size_t context,
343
+ bool highlight,
344
+ const std::string& funcname) const;
345
+
346
+ const std::shared_ptr<Source>& source() const {
347
+ return source_view_;
348
+ }
349
+ size_t start() const {
350
+ return start_;
351
+ }
352
+ size_t end() const {
353
+ return end_;
354
+ }
355
+ std::string str() const {
356
+ std::stringstream ss;
357
+ highlight(ss);
358
+ return ss.str();
359
+ }
360
+
361
+ c10::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
362
+ if (!source_view_ || !source()->filename()) {
363
+ return c10::nullopt;
364
+ }
365
+
366
+ auto lineno = source_view_->lineno_for_offset(start_);
367
+ auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
368
+ // TODO: c10::optional<>::value returns an rvalue ref so can't use it here??
369
+ return std::make_tuple<std::string, size_t, size_t>(
370
+ source_view_->filename().value_or(""),
371
+ source_view_->lineno_to_source_lineno(lineno),
372
+ (size_t)col_offset);
373
+ }
374
+
375
+ bool operator==(const SourceRange& rhs) const {
376
+ return start() == rhs.start() && end() == rhs.end() &&
377
+ source() == rhs.source();
378
+ }
379
+
380
+ bool operator!=(const SourceRange& rhs) const {
381
+ return !(*this == rhs);
382
+ }
383
+
384
+ c10::optional<SourceRange> findSourceRangeThatGenerated() const {
385
+ if (!source_view_) {
386
+ return c10::nullopt;
387
+ }
388
+ return source_view_->findSourceRangeThatGenerated(*this);
389
+ }
390
+
391
+ protected:
392
+ std::shared_ptr<Source> source_view_;
393
+
394
+ private:
395
+ size_t start_;
396
+ size_t end_;
397
+ StringCordView::Iterator start_iter_;
398
+ };
399
+
400
+ // OwnedSourceRange is just like a SourceRange except that it owns a `Source`
401
+ // instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
402
+ struct OwnedSourceRange : public SourceRange {
403
+ explicit OwnedSourceRange(const SourceRange& source_range)
404
+ : SourceRange(source_range) {
405
+ const auto& source = source_range.source();
406
+ if (source) {
407
+ source_view_ = std::make_shared<Source>(
408
+ source->text_str().str(),
409
+ source->filename(),
410
+ source->starting_line_no());
411
+ }
412
+ }
413
+ };
414
+
415
+ struct TORCH_API SourceRangeHasher {
416
+ public:
417
+ size_t operator()(const torch::jit::SourceRange& key) const;
418
+ };
419
+
420
+ struct StackEntry {
421
+ std::string filename;
422
+ SourceRange range;
423
+ };
424
+
425
+ TORCH_API void format_stack_trace(
426
+ std::ostream& out,
427
+ const std::vector<StackEntry>& entries);
428
+
429
+ inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
430
+ range.highlight(out);
431
+ return out;
432
+ }
433
+
434
+ // A pair of (byte offset, SourceRange) describing a specific segment
435
+ // of the output stream
436
+ struct TaggedRange {
437
+ TaggedRange(size_t bytes, SourceRange range)
438
+ : bytes(bytes), range(std::move(range)) {}
439
+ size_t bytes;
440
+ SourceRange range;
441
+ };
442
+ using SourceRangeRecords = std::vector<TaggedRange>;
443
+ using SourceRangeTagMap =
444
+ std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
445
+
446
+ } // namespace torch::jit
447
+
448
+ namespace std {
449
+ template <>
450
+ struct iterator_traits<torch::jit::StringCordView::Iterator> {
451
+ using value_type = char;
452
+ using difference_type = ptrdiff_t;
453
+ using pointer = char*;
454
+ using reference = char&;
455
+ using iterator_category = std::forward_iterator_tag;
456
+ };
457
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <torch/csrc/jit/frontend/source_range.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * SourceRef does two things:
15
+ * 1. Owns a Source object.
16
+ * 2. Serves as lookup key to the owned Source in associative containers, for
17
+ * runtime data aggregation.
18
+ * We don't want to use std::shared_ptr<Source> directly because we want to
19
+ * support heteogeneous lookup, and also shared_ptr is an implementation detail
20
+ * which should be encapsulated.
21
+ */
22
+ class TORCH_API SourceRef : public CustomClassHolder {
23
+ public:
24
+ explicit SourceRef(std::shared_ptr<Source> source_view)
25
+ : source_view_(std::move(source_view)) {}
26
+ bool operator==(const SourceRef& other) const {
27
+ return source_view_ == other.source_view_;
28
+ }
29
+ bool operator<(const Source& other) const {
30
+ return source_view_.get() < &other;
31
+ }
32
+ friend bool operator<(const Source& other, const SourceRef& self) {
33
+ return &other < self.source_view_.get();
34
+ }
35
+ bool operator<(const SourceRef& other) const {
36
+ return *this < *other.source_view_.get();
37
+ }
38
+ const Source* operator->() const {
39
+ return source_view_.get();
40
+ }
41
+
42
+ private:
43
+ std::shared_ptr<Source> source_view_;
44
+ };
45
+
46
+ } // namespace jit
47
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API double strtod_c(const char* nptr, char** endptr);
9
+ TORCH_API float strtof_c(const char* nptr, char** endptr);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/symbol.h>
9
+ #include <caffe2/serialize/versions.h>
10
+ #include <torch/csrc/jit/api/module.h>
11
+ #include <torch/csrc/jit/frontend/error_report.h>
12
+ #include <torch/csrc/jit/frontend/schema_matching.h>
13
+ #include <torch/csrc/jit/frontend/versioned_symbols.h>
14
+ #include <torch/csrc/jit/ir/ir.h>
15
+
16
+ namespace torch {
17
+ namespace jit {
18
+
19
+ using SugaredValuePtr = std::shared_ptr<SugaredValue>;
20
+
21
+ // The AST can contain nodes like `self`, `self.b` or `python_fn` that
22
+ // are not first-class values in the graph representation, but instead
23
+ // will be desugared based on how they are used in the AST.
24
+
25
+ // SugaredValue is used to temporarily represent these values in a way
26
+ // that separates their behavior from the AST -> IR converter itself.
27
+ // This allows us to keep dependencies on python minimal.
28
+
29
+ struct TORCH_API SugaredValue
30
+ : public std::enable_shared_from_this<SugaredValue> {
31
+ // what is this node? for error reporting (e.g. Module, python function)
32
+ virtual std::string kind() const = 0;
33
+
34
+ // what can we do with this thing?
35
+ // use it as a value e.g. `this + 4`
36
+ virtual Value* asValue(const SourceRange& loc, GraphFunction& m) {
37
+ throw ErrorReport(loc) << kind() << " cannot be used as a value";
38
+ }
39
+
40
+ // select an attribute on it, e.g. `this.field`
41
+ virtual std::shared_ptr<SugaredValue> attr(
42
+ const SourceRange& loc,
43
+ GraphFunction& m,
44
+ const std::string& field) {
45
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
46
+ }
47
+
48
+ virtual bool hasAttr(
49
+ const SourceRange& loc,
50
+ GraphFunction& m,
51
+ const std::string& field) {
52
+ throw ErrorReport(loc) << "attribute lookup is not defined on " << kind();
53
+ }
54
+
55
+ // assign an attribute on it, e.g. `this.field = newValue`
56
+ virtual void setAttr(
57
+ const SourceRange& loc,
58
+ GraphFunction& m,
59
+ const std::string& field,
60
+ Value* newValue) {
61
+ throw ErrorReport(loc) << "attribute assignment is not defined on "
62
+ << kind();
63
+ }
64
+
65
+ // use it as a vector of values, e.g. a tuple of values as return value from
66
+ // a method invocation
67
+ virtual std::vector<std::shared_ptr<SugaredValue>> asTuple(
68
+ const SourceRange& loc,
69
+ GraphFunction& m,
70
+ const c10::optional<size_t>& size_hint = {}) {
71
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuple";
72
+ }
73
+
74
+ // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API
75
+ virtual SugaredValuePtr asTupleValue(
76
+ const SourceRange& loc,
77
+ GraphFunction& m) {
78
+ throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue";
79
+ }
80
+
81
+ virtual std::vector<std::shared_ptr<SugaredValue>> asType(
82
+ const SourceRange& loc,
83
+ Method& m) {
84
+ throw ErrorReport(loc) << kind() << " cannot be used as a type";
85
+ }
86
+
87
+ // call it like a function, e.g. `outputs = this(inputs)`
88
+ virtual std::shared_ptr<SugaredValue> call(
89
+ const SourceRange& loc,
90
+ GraphFunction& m,
91
+ // note: names for args will be 'argument 0', 'argument 1', etc..
92
+ at::ArrayRef<NamedValue> args,
93
+ at::ArrayRef<NamedValue> kwargs,
94
+ size_t n_binders) {
95
+ // n_binders is always set to the number of variables an expression is
96
+ // syntactically bound to:
97
+ // a = foo() # 1 binder (note in this case the single binder might be a
98
+ // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0
99
+ // binders
100
+ //
101
+ // In subexpressions, like bar() in foo(bar()), n_binders is always set to
102
+ // 1. n_binders is used as a hint to subexpressions to determine how many
103
+ // values they should return when that number is ambiguous statically. In
104
+ // particular it is currently used to decide how many tensors a call to a
105
+ // python function will return. It is only a hint, functions do not have to
106
+ // check that n_binders match the number of things they are returning, the
107
+ // assignment logic will do that anyway.
108
+
109
+ throw ErrorReport(loc) << "cannot call a " << kind();
110
+ }
111
+
112
+ // This function is called when to convert a SugaredValue to its iterator.
113
+ // For example, when iterating through a Dict we iterate over its keys
114
+ virtual std::shared_ptr<SugaredValue> iter(
115
+ const SourceRange& loc,
116
+ GraphFunction& m) {
117
+ throw ErrorReport(loc) << kind() << " cannot be used as an iterable";
118
+ }
119
+
120
+ // If we are iterating over a Sugared Value and it returns a value from this
121
+ // function, then we emit an unrolled loop over the variable. This allows us
122
+ // to support containers of Heterogenous types, like Module Containers &
123
+ // Tuples
124
+ virtual c10::optional<int64_t> staticLen() {
125
+ return c10::nullopt;
126
+ }
127
+
128
+ // When iterating over this SugaredValue, should we emit the for loop as an
129
+ // unrolled loop.
130
+ bool shouldEmitUnrolled() {
131
+ return staticLen() != c10::nullopt;
132
+ }
133
+
134
+ // return length of this thing, if not then it can't be iterated.
135
+ // If it does not have a statically-determinable length, then it cannot
136
+ // be iterated over with a modulelist. If it does it must return a constant
137
+ // Value *
138
+ virtual Value* len(const SourceRange& loc, GraphFunction& m) {
139
+ throw ErrorReport(loc) << "'" << kind() << "'"
140
+ << " object is not iterable";
141
+ }
142
+
143
+ // expression for ith elemement for iterable value
144
+ virtual std::shared_ptr<SugaredValue> getitem(
145
+ const SourceRange& loc,
146
+ GraphFunction& m,
147
+ Value* idx,
148
+ TypePtr type_hint = nullptr) {
149
+ throw ErrorReport(loc) << "'" << kind() << "'"
150
+ << " object is not subscriptable";
151
+ }
152
+
153
+ virtual ~SugaredValue() = default;
154
+ };
155
+
156
+ // most things in the environment are just simple value types
157
+ // and not special python syntax sugar types
158
+ struct TORCH_API SimpleValue : public SugaredValue {
159
+ SimpleValue(Value* value) : value_(value) {}
160
+ std::string kind() const override {
161
+ std::stringstream ss;
162
+ // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
163
+ ss << "value of type '" << value_->type()->annotation_str() << "'";
164
+ return ss.str();
165
+ }
166
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
167
+ return value_;
168
+ }
169
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
170
+ const SourceRange& loc,
171
+ GraphFunction& m,
172
+ const c10::optional<size_t>& size_hint = {}) override;
173
+ std::shared_ptr<SugaredValue> attr(
174
+ const SourceRange& loc,
175
+ GraphFunction& m,
176
+ const std::string& field) override;
177
+
178
+ bool hasAttr(
179
+ const SourceRange& loc,
180
+ GraphFunction& m,
181
+ const std::string& field) override;
182
+
183
+ void setAttr(
184
+ const SourceRange& loc,
185
+ GraphFunction& m,
186
+ const std::string& field,
187
+ Value* newValue) override;
188
+
189
+ std::shared_ptr<SugaredValue> call(
190
+ const SourceRange& loc,
191
+ GraphFunction& m,
192
+ // note: names for args will be 'argument 0', 'argument 1', etc..
193
+ at::ArrayRef<NamedValue> args,
194
+ at::ArrayRef<NamedValue> kwargs,
195
+ size_t n_binders) override;
196
+
197
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
198
+ override;
199
+
200
+ Value* getValue() const {
201
+ return value_;
202
+ }
203
+
204
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
205
+ SugaredValuePtr getitem(
206
+ const SourceRange& loc,
207
+ GraphFunction& m,
208
+ Value* idx,
209
+ TypePtr type_hint = nullptr) override;
210
+
211
+ private:
212
+ Value* value_;
213
+ };
214
+
215
+ struct TORCH_API BuiltinFunction : public SugaredValue {
216
+ BuiltinFunction(Symbol symbol, c10::optional<NamedValue> self)
217
+ : symbol(symbol), self(std::move(self)) {}
218
+
219
+ // The symbol of the function (e.g. `aten::relu`).
220
+ Symbol symbol;
221
+
222
+ // if this is method, then this is the self argument.
223
+ c10::optional<NamedValue> self;
224
+ std::string kind() const override {
225
+ return "builtin";
226
+ }
227
+ std::shared_ptr<SugaredValue> call(
228
+ const SourceRange& loc,
229
+ GraphFunction& m,
230
+ at::ArrayRef<NamedValue> args,
231
+ at::ArrayRef<NamedValue> kwargs,
232
+ size_t n_binders) override;
233
+
234
+ // try to create this builtin but if it doesn't exist or the self argument
235
+ // cannot possibly match, then return nullptr. Use in situations where it is
236
+ // not clear if it is a valid builtin
237
+ static std::shared_ptr<BuiltinFunction> tryCreate(
238
+ Symbol symbol,
239
+ c10::optional<NamedValue> self);
240
+ };
241
+
242
+ struct TORCH_API SugaredTupleValue : public SugaredValue {
243
+ explicit SugaredTupleValue(std::vector<std::shared_ptr<SugaredValue>> tup)
244
+ : tup_(std::move(tup)){};
245
+
246
+ std::vector<std::shared_ptr<SugaredValue>> asTuple(
247
+ const SourceRange& loc,
248
+ GraphFunction& m,
249
+ const c10::optional<size_t>& size_hint = {}) override {
250
+ return tup_;
251
+ };
252
+
253
+ Value* asValue(const SourceRange& loc, GraphFunction& m) override {
254
+ std::vector<Value*> vec;
255
+ vec.reserve(tup_.size());
256
+ for (const auto& sv : tup_) {
257
+ vec.push_back(sv->asValue(loc, m));
258
+ }
259
+ Graph& g = *m.graph();
260
+ return g.insertNode(g.createTuple(vec))->output();
261
+ }
262
+
263
+ std::string kind() const override {
264
+ return "Tuple";
265
+ }
266
+
267
+ SugaredValuePtr getitem(
268
+ const SourceRange& loc,
269
+ GraphFunction& m,
270
+ Value* idx,
271
+ TypePtr type_hint = nullptr) override {
272
+ if (!(idx->type()->cast<IntType>() && toIValue(idx))) {
273
+ throw ErrorReport(loc)
274
+ << "Expected integer literal for index but got a variable or non-integer. "
275
+ << "ModuleList/Sequential indexing is only supported with integer literals. "
276
+ << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. "
277
+ << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'";
278
+ }
279
+ auto index = toIValue(idx)->toInt();
280
+ int64_t adj_index =
281
+ (index < 0) ? index + static_cast<int64_t>(tup_.size()) : index;
282
+ if (!(adj_index >= 0 && adj_index < static_cast<int64_t>(tup_.size()))) {
283
+ throw ErrorReport(loc)
284
+ << "Index " << index << " out of range of length " << tup_.size();
285
+ }
286
+ return tup_.at(adj_index);
287
+ }
288
+
289
+ // This function is called when a SugaredValue is used to convert a
290
+ // SugaredValue to its iterator. For example, when iterating through a Dict we
291
+ // iterate over its keys
292
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
293
+ override {
294
+ return shared_from_this();
295
+ };
296
+
297
+ // Because this is used to contain SugaredValues of Heterogenous types,
298
+ // we define staticLen() so that when this is iterated over it is emitted
299
+ // as an unrolled loop.
300
+ c10::optional<int64_t> staticLen() override {
301
+ return static_cast<int64_t>(tup_.size());
302
+ }
303
+
304
+ std::vector<std::shared_ptr<SugaredValue>> tup_;
305
+ };
306
+
307
+ struct TORCH_API BuiltinModule : public SugaredValue {
308
+ BuiltinModule(std::string name, c10::optional<int64_t> version = at::nullopt)
309
+ : name(std::move(name)), version(version) {}
310
+
311
+ std::string kind() const override {
312
+ return "builtin module";
313
+ }
314
+ std::shared_ptr<SugaredValue> attr(
315
+ const SourceRange& loc,
316
+ GraphFunction& m,
317
+ const std::string& field) override {
318
+ if (field == "autograd") {
319
+ // When refering torch.autograd, it is also considered to be a
320
+ // BuiltinModule and we will dispatch to the aten operators for the
321
+ // methods under its module.
322
+ return std::make_shared<BuiltinModule>("aten", version);
323
+ }
324
+
325
+ auto sym = Symbol::fromQualString(name + "::" + field);
326
+ return std::make_shared<BuiltinFunction>(sym, c10::nullopt);
327
+ }
328
+
329
+ private:
330
+ std::string name;
331
+ // when we add operator versioning, emit this op as it exising at 'version'
332
+ // if not set, use the latest version
333
+ c10::optional<int64_t> version;
334
+ };
335
+
336
+ // Represents a class, analagous to `int` or `dict`. Instances of classes,
337
+ // like `1` or `{"foo": 5}`, are represented as SimpleValues
338
+ struct TORCH_API ClassValue : public SugaredValue {
339
+ explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {}
340
+
341
+ // Call the type's constructor, as in:
342
+ // n = Foo(constructor_arg)
343
+ std::shared_ptr<SugaredValue> call(
344
+ const SourceRange& loc,
345
+ GraphFunction& m,
346
+ at::ArrayRef<NamedValue> args,
347
+ at::ArrayRef<NamedValue> kwargs,
348
+ size_t n_binders) override;
349
+
350
+ std::shared_ptr<SugaredValue> attr(
351
+ const SourceRange& loc,
352
+ GraphFunction& m,
353
+ const std::string& field) override;
354
+
355
+ std::string kind() const override {
356
+ return type_->str();
357
+ }
358
+
359
+ ClassTypePtr type_;
360
+ };
361
+
362
+ struct TORCH_API NamedTupleConstructor : public SugaredValue {
363
+ explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {}
364
+
365
+ std::shared_ptr<SugaredValue> call(
366
+ const SourceRange& loc,
367
+ GraphFunction& m,
368
+ at::ArrayRef<NamedValue> args,
369
+ at::ArrayRef<NamedValue> kwargs,
370
+ size_t n_binders) override;
371
+
372
+ std::string kind() const override {
373
+ return type_->str();
374
+ }
375
+
376
+ TupleTypePtr type_;
377
+ };
378
+
379
+ struct FunctionValue : public SugaredValue {
380
+ FunctionValue(Function* callee) : callees_({callee}) {}
381
+ FunctionValue(const StrongFunctionPtr& p)
382
+ : callees_({p.function_}), cu_(p.cu_) {}
383
+ FunctionValue(const std::vector<StrongFunctionPtr>& callees) {
384
+ for (const StrongFunctionPtr& callee : callees) {
385
+ cu_ = cu_ ? cu_ : callee.cu_;
386
+ TORCH_INTERNAL_ASSERT(callee.cu_ == cu_);
387
+ callees_.push_back(callee.function_);
388
+ }
389
+ }
390
+
391
+ std::string kind() const override {
392
+ return "function";
393
+ }
394
+
395
+ std::shared_ptr<SugaredValue> call(
396
+ const SourceRange& loc,
397
+ GraphFunction& f,
398
+ at::ArrayRef<NamedValue> args,
399
+ at::ArrayRef<NamedValue> kwargs,
400
+ size_t n_binders) override {
401
+ std::vector<const FunctionSchema*> schemas;
402
+ for (Function* callee : callees_) {
403
+ try {
404
+ callee->ensure_defined();
405
+ } catch (const RecursiveMethodCallError&) {
406
+ throw ErrorReport(loc)
407
+ << " function '" << callee->name() << "' is called recursively. "
408
+ << "Recursive calls are not supported";
409
+ }
410
+ schemas.push_back(&callee->getSchema());
411
+ }
412
+ auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs);
413
+ Value* output =
414
+ f.graph()->insertFunctionCall(callees_[match.first], match.second);
415
+ output->node()->setSourceRange(loc);
416
+ return std::make_shared<SimpleValue>(output);
417
+ }
418
+
419
+ const std::vector<Function*>& callees() {
420
+ return callees_;
421
+ }
422
+
423
+ private:
424
+ std::vector<Function*> callees_;
425
+ // TODO holding this thing is creepy
426
+ std::shared_ptr<CompilationUnit> cu_;
427
+ };
428
+
429
+ struct TORCH_API ClosureValue : public SugaredValue {
430
+ ClosureValue(Value* value) : value_(value) {
431
+ TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure);
432
+ }
433
+ std::string kind() const override {
434
+ return "closure";
435
+ }
436
+ Value* asValue(const SourceRange& range, GraphFunction& m) override {
437
+ return value_;
438
+ }
439
+ Value* value_;
440
+ };
441
+
442
+ // defines how a method obtained from a module/class/interface behaves in script
443
+ struct MethodValue : public SugaredValue {
444
+ MethodValue(Value* self, std::vector<std::string> method_names)
445
+ : self_(self), method_names_(std::move(method_names)) {}
446
+ MethodValue(Value* self, std::string method_name)
447
+ : MethodValue(self, std::vector<std::string>({std::move(method_name)})) {}
448
+
449
+ std::string kind() const override {
450
+ return "method";
451
+ }
452
+
453
+ std::shared_ptr<SugaredValue> call(
454
+ const SourceRange& loc,
455
+ GraphFunction& f,
456
+ at::ArrayRef<NamedValue> args,
457
+ at::ArrayRef<NamedValue> kwargs,
458
+ size_t n_binders) override {
459
+ std::vector<NamedValue> argsWithSelf = {self_};
460
+ argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end());
461
+ std::vector<const FunctionSchema*> schemas;
462
+ for (const std::string& method_name : method_names_) {
463
+ if (auto class_type = self_->type()->cast<ClassType>()) {
464
+ Function& method = class_type->getMethod(method_name);
465
+ try {
466
+ method.ensure_defined();
467
+ } catch (const RecursiveMethodCallError&) {
468
+ throw ErrorReport(loc)
469
+ << " method '" << method.name() << "' is called recursively. "
470
+ << "Recursive calls are not supported";
471
+ }
472
+ schemas.push_back(&method.getSchema());
473
+ } else if (auto interface_type = self_->type()->cast<InterfaceType>()) {
474
+ schemas.push_back(interface_type->getMethod(method_name));
475
+ } else {
476
+ TORCH_INTERNAL_ASSERT(
477
+ false, "method constructed that is not a class or interface");
478
+ }
479
+ }
480
+ auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs);
481
+ Value* output =
482
+ f.graph()->insertMethodCall(method_names_[match.first], match.second);
483
+ output->node()->setSourceRange(loc);
484
+ return std::make_shared<SimpleValue>(output);
485
+ }
486
+
487
+ private:
488
+ Value* self_;
489
+ std::vector<std::string> method_names_;
490
+ };
491
+
492
+ struct TORCH_API PrintValue : public SugaredValue {
493
+ std::string kind() const override {
494
+ return "print";
495
+ }
496
+ std::shared_ptr<SugaredValue> call(
497
+ const SourceRange& loc,
498
+ GraphFunction& m,
499
+ at::ArrayRef<NamedValue> args,
500
+ at::ArrayRef<NamedValue> kwargs,
501
+ size_t n_binders) override;
502
+ };
503
+
504
+ // expressions like int(x)
505
+ // these are the same as call prim::Int or equivalent except it
506
+ // is a noop when the input is a subtype of 'type'
507
+ struct TORCH_API CastValue : public BuiltinFunction {
508
+ CastValue(TypePtr type, c10::Symbol method)
509
+ : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {}
510
+ std::shared_ptr<SugaredValue> call(
511
+ const SourceRange& loc,
512
+ GraphFunction& m,
513
+ at::ArrayRef<NamedValue> args,
514
+ at::ArrayRef<NamedValue> kwargs,
515
+ size_t n_binders) override {
516
+ if (args.size() == 1 && kwargs.empty()) {
517
+ auto len_op = std::make_shared<BuiltinFunction>(aten::len, at::nullopt);
518
+ auto gt_op = std::make_shared<BuiltinFunction>(aten::gt, at::nullopt);
519
+ auto zero = m.graph()->insertConstant(0);
520
+
521
+ auto v = args[0].value(*m.graph());
522
+ if (v->type()->isSubtypeOf(*type_)) {
523
+ return std::make_shared<SimpleValue>(v);
524
+ } else if (
525
+ *type_ == *BoolType::get() &&
526
+ (v->type()->isSubtypeOf(*AnyListType::get()) ||
527
+ v->type()->isSubtypeOf(*StringType::get()) ||
528
+ v->type()->cast<DictType>())) {
529
+ auto len = len_op->call(loc, m, {v}, {}, 1);
530
+ return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1);
531
+ }
532
+ }
533
+ return BuiltinFunction::call(loc, m, args, kwargs, n_binders);
534
+ }
535
+
536
+ private:
537
+ TypePtr type_;
538
+ };
539
+
540
+ struct TORCH_API TensorCastValue : public SugaredValue {
541
+ TensorCastValue(at::ScalarType type, NamedValue self)
542
+ : dtype_(type), self_(std::move(self)) {}
543
+
544
+ std::string kind() const override {
545
+ return "Cast";
546
+ }
547
+
548
+ std::shared_ptr<SugaredValue> call(
549
+ const SourceRange& loc,
550
+ GraphFunction& m,
551
+ at::ArrayRef<NamedValue> args,
552
+ at::ArrayRef<NamedValue> kwargs,
553
+ size_t n_binders) override {
554
+ TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty());
555
+ Value* dtype_const = m.graph()->insertConstant(dtype_, loc);
556
+ std::vector<NamedValue> kwargs_{
557
+ self_, NamedValue(loc, "dtype", dtype_const)};
558
+ Value* casted_val = m.graph()->insert(
559
+ /*opname=*/Symbol::fromQualString("aten::to"),
560
+ /*args=*/args,
561
+ /*kwargs=*/kwargs_,
562
+ /*range=*/loc);
563
+ return std::make_shared<SimpleValue>(casted_val);
564
+ }
565
+
566
+ at::ScalarType dtype_;
567
+ NamedValue self_;
568
+ };
569
+
570
+ // builtins operators and functions that call a method if it exists
571
+ // on a class type, like 'len(x)' and 'x + y'
572
+ struct TORCH_API MagicMethod : public SugaredValue {
573
+ MagicMethod(std::string desugared_name, SugaredValuePtr base)
574
+ : base_value_(std::move(base)),
575
+ desugared_name_(std::move(desugared_name)) {}
576
+
577
+ std::string kind() const override {
578
+ return desugared_name_;
579
+ }
580
+
581
+ std::shared_ptr<SugaredValue> call(
582
+ const SourceRange& loc,
583
+ GraphFunction& m,
584
+ at::ArrayRef<NamedValue> args,
585
+ at::ArrayRef<NamedValue> kwargs,
586
+ size_t n_binders) override;
587
+
588
+ private:
589
+ SugaredValuePtr base_value_;
590
+ std::string desugared_name_;
591
+ };
592
+
593
+ // things that look like function applications, but
594
+ // perform non-standard evaluation are represented
595
+ // with SpecialFormValues, e.g.
596
+ // isinstance(x, int)
597
+ // fork(fn)
598
+ // annotate(int, 3)
599
+ // The implementation of each value is handled by a case inside emitApplyExpr
600
+ struct TORCH_API SpecialFormValue : public SugaredValue {
601
+ SpecialFormValue(Symbol form) : form_(form) {}
602
+ std::string kind() const override {
603
+ return form_.toUnqualString();
604
+ }
605
+ Symbol form() const {
606
+ return form_;
607
+ }
608
+ static std::shared_ptr<SpecialFormValue> create(Symbol form) {
609
+ return std::make_shared<SpecialFormValue>(form);
610
+ }
611
+
612
+ private:
613
+ Symbol form_;
614
+ };
615
+
616
+ struct TORCH_API LegacyTensorConstructor : public SpecialFormValue {
617
+ LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device)
618
+ : SpecialFormValue(form), device_(device), dtype_(dtype) {}
619
+
620
+ static std::shared_ptr<LegacyTensorConstructor> create(
621
+ Symbol form,
622
+ at::ScalarType dtype,
623
+ at::Device device) {
624
+ return std::make_shared<LegacyTensorConstructor>(form, dtype, device);
625
+ }
626
+ at::ScalarType dtype() const {
627
+ return dtype_;
628
+ }
629
+
630
+ private:
631
+ at::Device device_;
632
+ at::ScalarType dtype_;
633
+ };
634
+
635
+ // matched against for special handling of range expressions
636
+ struct TORCH_API RangeValue : SugaredValue {
637
+ RangeValue(
638
+ const SourceRange& loc,
639
+ GraphFunction& m,
640
+ std::vector<Value*> input,
641
+ c10::optional<int64_t> static_len = c10::nullopt);
642
+
643
+ std::string kind() const override {
644
+ return "range";
645
+ }
646
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
647
+ SugaredValuePtr getitem(
648
+ const SourceRange& loc,
649
+ GraphFunction& m,
650
+ Value* idx,
651
+ TypePtr type_hint = nullptr) override;
652
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
653
+ override;
654
+
655
+ // When Range is instantiated via enumerate(iterable_with_static_len),
656
+ // then it takes the static length of the iterable
657
+ c10::optional<int64_t> staticLen() override {
658
+ return static_len_;
659
+ }
660
+
661
+ private:
662
+ Value* start_{};
663
+ Value* end_{};
664
+ Value* step_{};
665
+ // a flag to determine if it's a simple range() call with only end_ from
666
+ // arguments If true, we will not insert length calculation and index
667
+ // derivation nodes to simplify the graph and enable more possible
668
+ // optimizations
669
+ bool has_only_end_{};
670
+ c10::optional<int64_t> static_len_;
671
+ };
672
+
673
+ // Specialized Tree structure to matched against for special handling
674
+ // of builtin functions iterables expressions like zip(), enumerate(), etc.
675
+ // zip and enumerate can be modeled as a tree of SimpleValue/RangeValue:
676
+ // zip(x, y) -> (x, y) with tuple assignment to each loop target
677
+ // enumerate(x) -> (range(0, math.inf, 1), x)
678
+ // So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be:
679
+ // (a, (range(0, math.inf, 1), b), range(0, 100))
680
+ // We use those base iterables to fill in the loop information like
681
+ // max_trip_count and set the value table for loop targets
682
+ // Iterables can contain lists of SugaredValues like ModuleLists. If it
683
+ // does, then we emit it unrolled and require that all values it contains
684
+ // have a statically-determinable length.
685
+ struct TORCH_API IterableTree : SugaredValue {
686
+ IterableTree() = default;
687
+ IterableTree(
688
+ const SourceRange& range,
689
+ GraphFunction& m,
690
+ at::ArrayRef<SugaredValuePtr> children) {
691
+ for (const auto& child : children) {
692
+ addChild(range, m, child);
693
+ }
694
+ }
695
+ std::string kind() const override {
696
+ return "iterabletree";
697
+ }
698
+
699
+ std::shared_ptr<SugaredValue> iter(const SourceRange& loc, GraphFunction& m)
700
+ override {
701
+ return shared_from_this();
702
+ }
703
+
704
+ void addChild(
705
+ const SourceRange& range,
706
+ GraphFunction& m,
707
+ const SugaredValuePtr& iter_value);
708
+
709
+ std::vector<SugaredValuePtr> get_children() {
710
+ return children_;
711
+ }
712
+
713
+ // If this iterable contains a ModuleList or Tuple, then it will have a
714
+ // static length, and we will emit it as an unrolled for loop.
715
+ c10::optional<int64_t> staticLen() override {
716
+ return unroll_length_;
717
+ }
718
+
719
+ // given a IterableTree node, get all the base iterables/leaves under the
720
+ // IterableTree node. This enables
721
+ // us to get all the basic SugaredValues that contains valid loop information
722
+ // with len() and getitem()
723
+ std::vector<SugaredValuePtr> get_base_iterables();
724
+
725
+ Value* len(const SourceRange& loc, GraphFunction& m) override;
726
+ SugaredValuePtr getitem(
727
+ const SourceRange& loc,
728
+ GraphFunction& m,
729
+ Value* idx,
730
+ TypePtr type_hint = nullptr) override;
731
+
732
+ private:
733
+ c10::optional<int64_t> unroll_length_ = c10::nullopt;
734
+ std::vector<SugaredValuePtr> children_;
735
+ };
736
+
737
+ static inline std::vector<Value*> toValues(
738
+ Graph& g,
739
+ at::ArrayRef<NamedValue> nvs) {
740
+ return fmap(nvs, [&](const NamedValue& v) { return v.value(g); });
741
+ }
742
+
743
+ struct SimpleSelf : public Self {
744
+ explicit SimpleSelf(ClassTypePtr classType)
745
+ : Self(), classType_(std::move(classType)) {}
746
+ std::shared_ptr<SugaredValue> makeSugared(Value* v) const override {
747
+ v->setType(classType_);
748
+ return std::make_shared<SimpleValue>(v);
749
+ }
750
+ ClassTypePtr getClassType() const override {
751
+ return classType_;
752
+ }
753
+
754
+ private:
755
+ ClassTypePtr classType_;
756
+ };
757
+
758
+ // This is not a SimpleValue so it can not pass through the code paths that
759
+ // expect a SimpleValue as a sugared value.
760
+ struct TORCH_API ExceptionMessageValue : public SugaredValue {
761
+ explicit ExceptionMessageValue(
762
+ Value* value,
763
+ Value* qualified_class_name = nullptr)
764
+ : value_(value), qualified_class_name_(qualified_class_name) {}
765
+
766
+ std::string kind() const override {
767
+ return "exception message";
768
+ }
769
+
770
+ Value* getValue() {
771
+ return value_;
772
+ }
773
+
774
+ // qualified python class name
775
+ Value* getQualifiedClassName() {
776
+ return qualified_class_name_;
777
+ }
778
+
779
+ private:
780
+ Value* value_;
781
+ Value* qualified_class_name_;
782
+ };
783
+
784
+ struct TORCH_API ExceptionValue : public SugaredValue {
785
+ explicit ExceptionValue(std::string message) : message_(std::move(message)) {}
786
+
787
+ std::string kind() const override {
788
+ return "exception";
789
+ }
790
+
791
+ std::shared_ptr<SugaredValue> call(
792
+ const SourceRange& loc,
793
+ GraphFunction& m,
794
+ at::ArrayRef<NamedValue> args,
795
+ at::ArrayRef<NamedValue> /*attributes*/,
796
+ size_t /*n_binders*/) override {
797
+ auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc);
798
+ for (auto& input : args) {
799
+ auto input_str = input.value(*m.graph());
800
+ if (!input_str->type()->isSubtypeOf(*StringType::get())) {
801
+ input_str =
802
+ emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {});
803
+ }
804
+ exception_message = emitBuiltinCall(
805
+ loc, *m.graph(), aten::add, {exception_message, input_str}, {});
806
+ }
807
+ return std::make_shared<ExceptionMessageValue>(exception_message);
808
+ }
809
+
810
+ std::string message_;
811
+ };
812
+
813
+ struct TORCH_API SugaredEnumClass : public SugaredValue {
814
+ explicit SugaredEnumClass(EnumTypePtr enum_type)
815
+ : enum_type_(std::move(enum_type)) {}
816
+
817
+ std::string kind() const override {
818
+ return "EnumClass";
819
+ }
820
+
821
+ SugaredValuePtr attr(
822
+ const SourceRange& loc,
823
+ GraphFunction& m,
824
+ const std::string& field) override;
825
+
826
+ SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override;
827
+
828
+ private:
829
+ EnumTypePtr enum_type_;
830
+ };
831
+
832
+ struct TORCH_API SliceValue : public SugaredValue {
833
+ explicit SliceValue(Value* start, Value* stop, Value* step)
834
+ : start_(start), stop_(stop), step_(step) {}
835
+
836
+ std::string kind() const override {
837
+ return "Python slice value";
838
+ }
839
+
840
+ Value* start() {
841
+ return start_;
842
+ };
843
+ Value* stop() {
844
+ return stop_;
845
+ };
846
+ Value* step() {
847
+ return step_;
848
+ };
849
+
850
+ private:
851
+ Value* start_;
852
+ Value* stop_;
853
+ Value* step_;
854
+ };
855
+
856
+ } // namespace jit
857
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <ATen/core/class_type.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <torch/csrc/Export.h>
10
+
11
+ #include <torch/csrc/jit/frontend/source_range.h>
12
+ #include <torch/csrc/utils/variadic.h>
13
+
14
+ #include <cstdint>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace torch::jit {
21
+ struct Node;
22
+ struct Value;
23
+ struct Graph;
24
+ struct Module;
25
+
26
+ namespace tracer {
27
+
28
+ using ::c10::ivalue::Shared;
29
+
30
+ using ::c10::IValue;
31
+ using ::c10::ivalue::Future;
32
+
33
+ using ::c10::ArrayRef;
34
+ using ::c10::TupleType;
35
+ using ::c10::TupleTypePtr;
36
+ using ::c10::ivalue::ConstantString;
37
+
38
+ using torch::autograd::Variable;
39
+ using variable_list = std::vector<Variable>;
40
+
41
+ TORCH_API std::atomic<bool>& getTracerStateWarnMode();
42
+
43
+ struct TORCH_API TracingState
44
+ : public std::enable_shared_from_this<TracingState> {
45
+ TracingState();
46
+ ~TracingState();
47
+
48
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
49
+ std::shared_ptr<Graph> graph;
50
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
51
+ bool warn = getTracerStateWarnMode();
52
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
53
+ bool strict = true;
54
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
55
+ bool force_outplace = false;
56
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
57
+ std::function<std::string(const Variable& var)> lookup_var_name_fn =
58
+ [](const Variable& var) { return ""; };
59
+
60
+ void enterFrame() {
61
+ env_stack.emplace_back();
62
+ }
63
+
64
+ void leaveFrame() {
65
+ env_stack.pop_back();
66
+ }
67
+
68
+ void setValue(const IValue& v, Value* value);
69
+ void delValue(const IValue& var);
70
+ Value* getValue(const IValue& var);
71
+ Value* getOutput(const IValue& var, size_t i);
72
+ bool hasValue(const IValue& var) const;
73
+
74
+ Node* createNode(c10::Symbol op_name, size_t num_outputs);
75
+ void insertNode(Node* node);
76
+
77
+ private:
78
+ using WeakIValue = at::WeakIValue;
79
+
80
+ struct WeakIValueHasher {
81
+ size_t operator()(const WeakIValue& t) const {
82
+ return t.hash();
83
+ }
84
+ };
85
+
86
+ struct WeakIValueEq {
87
+ bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
88
+ return t1.isSameIdentity(t2);
89
+ }
90
+ };
91
+
92
+ using Frame =
93
+ std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
94
+ std::vector<Frame> env_stack;
95
+ };
96
+
97
+ // This is meant to be used as a thread local place, where we can store extra
98
+ // info that gets lost when we call into ATen from Python bindings. One example
99
+ // for when this happens is when we get an IntArrayRef argument with e.g. sizes
100
+ // for view. When tracing, those might be tensors, which let us encode extra
101
+ // data dependencies, but once they get to the ATen call where we actually have
102
+ // the tracing logic, they get converted into a raw IntArrayRef, and we loose
103
+ // all information. To prevent this, we temporarily stash it in here.
104
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
105
+ struct ArgumentStash {
106
+ struct IntArrayRefTrace : std::vector<Value*> {
107
+ IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
108
+ };
109
+
110
+ static bool empty() {
111
+ return stash.intlists.empty();
112
+ }
113
+
114
+ TORCH_API static void stashIntArrayRefElem(
115
+ const std::string& arg_name,
116
+ size_t size,
117
+ size_t idx,
118
+ const Variable& var);
119
+
120
+ static bool hasIntArrayRef(const std::string& arg_name) {
121
+ return stash.intlists.count(arg_name) > 0;
122
+ }
123
+
124
+ static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
125
+ auto info = std::move(stash.intlists.at(arg_name));
126
+ stash.intlists.erase(arg_name);
127
+ return info;
128
+ }
129
+
130
+ // Value stashing: Use these methods to stash arguments which correspond
131
+ // to regular Value*'s in the graph. i.e. they don't require special
132
+ // handling like in the case of IntArrayRefs
133
+ TORCH_API static void stashValue(
134
+ const std::string& arg_name,
135
+ size_t idx,
136
+ const Variable& var,
137
+ const c10::TypePtr& type = nullptr);
138
+
139
+ static bool hasValue(const std::string& arg_name) {
140
+ return stash.values.count(arg_name) > 0;
141
+ }
142
+
143
+ static Value* popValue(const std::string& arg_name) {
144
+ auto info = stash.values.at(arg_name);
145
+ stash.values.erase(arg_name);
146
+ return info;
147
+ }
148
+
149
+ private:
150
+ static thread_local ArgumentStash stash;
151
+ std::unordered_map<std::string, IntArrayRefTrace> intlists;
152
+ std::unordered_map<std::string, Value*> values;
153
+ };
154
+
155
+ // Retrieve or set the current tracing state. Returns a nullptr if tracing is
156
+ // disabled.
157
+ TORCH_API const std::shared_ptr<TracingState>& getTracingState();
158
+ TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
159
+
160
+ inline bool isTracing() {
161
+ return static_cast<bool>(getTracingState());
162
+ }
163
+
164
+ using warn_fn_type = void (*)(const std::string& msg);
165
+ TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
166
+ TORCH_API extern const char* WARN_CONSTRUCTOR;
167
+ TORCH_API extern const char* WARN_RESIZE;
168
+ TORCH_API extern const char* STRICT_TRACER_MSG;
169
+ TORCH_API void _do_warn(const char* _reason, const char* _kind);
170
+ inline void warn(const char* _reason, const char* _kind = nullptr) {
171
+ if (const auto& state = getTracingState()) {
172
+ if (!state->warn)
173
+ return;
174
+ _do_warn(_reason, _kind);
175
+ }
176
+ }
177
+ TORCH_API void setWarn(warn_fn_type fn);
178
+
179
+ struct TORCH_API NoWarn {
180
+ NoWarn() : state(getTracingState()) {
181
+ if (state) {
182
+ prev = state->warn;
183
+ state->warn = false;
184
+ }
185
+ }
186
+ ~NoWarn() {
187
+ if (state) {
188
+ state->warn = prev;
189
+ }
190
+ }
191
+ std::shared_ptr<TracingState> state;
192
+ bool prev{false};
193
+ };
194
+
195
+ struct WithNestedTracingFrame {
196
+ WithNestedTracingFrame() {
197
+ getTracingState()->enterFrame();
198
+ }
199
+
200
+ ~WithNestedTracingFrame() {
201
+ getTracingState()->leaveFrame();
202
+ }
203
+ };
204
+ TORCH_API void recordSourceLocation(Node* n);
205
+ TORCH_API void setRecordSourceLocation(void (*v)(Node*));
206
+
207
+ TORCH_API std::vector<StackEntry> pythonCallstack();
208
+ TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
209
+
210
+ // Having finished adding a new 'node' to the graph IR 'setValueTrace'
211
+ // associates this node with an output variable, so that further operations
212
+ // involving this variable know which node in the IR to reference.
213
+ TORCH_API void setValueTrace(const IValue& v, Value* value);
214
+
215
+ TORCH_API void delValueTrace(const IValue& var);
216
+
217
+ TORCH_API std::function<void()> pauseTracing();
218
+
219
+ TORCH_API Value* getValueTrace(const IValue& var);
220
+
221
+ TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
222
+ Stack inputs,
223
+ const std::function<Stack(Stack)>& traced_fn,
224
+ std::function<std::string(const Variable&)> var_name_lookup_fn,
225
+ bool strict = true,
226
+ bool force_outplace = false,
227
+ Module* self = nullptr,
228
+ const std::vector<std::string>& argument_names = {});
229
+
230
+ TORCH_API void abandon();
231
+
232
+ // NB: those serve both as an intermediate steps in addInputs below,
233
+ // as well as the overloads that terminate template recursion
234
+ TORCH_API void addInputs(Node* n, const char* name, int64_t value);
235
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
236
+ TORCH_API void addInputs(
237
+ Node* n,
238
+ const char* name,
239
+ c10::optional<int64_t> value);
240
+ TORCH_API void addInputs(Node* n, const char* name, bool value);
241
+ TORCH_API void addInputs(
242
+ Node* n,
243
+ const char* name,
244
+ const c10::optional<bool>& value);
245
+ TORCH_API void addInputs(Node* n, const char* name, double value);
246
+ TORCH_API void addInputs(
247
+ Node* n,
248
+ const char* name,
249
+ const c10::optional<double>& value);
250
+ TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
251
+ TORCH_API void addInputs(
252
+ Node* n,
253
+ const char* name,
254
+ const c10::optional<at::Scalar>& value);
255
+ TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
256
+ TORCH_API void addInputs(
257
+ Node* n,
258
+ const char* name,
259
+ const c10::optional<at::Tensor>& value);
260
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
261
+ TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
262
+ TORCH_API void addInputs(
263
+ Node* n,
264
+ const char* name,
265
+ c10::optional<c10::SymInt> value);
266
+ TORCH_API void addInputs(
267
+ Node* n,
268
+ const char* name,
269
+ const c10::optional<ArrayRef<int64_t>>& value);
270
+ TORCH_API void addInputs(
271
+ Node* n,
272
+ const char* name,
273
+ const at::OptionalIntArrayRef& opt_value);
274
+ TORCH_API void addInputs(
275
+ Node* n,
276
+ const char* name,
277
+ const at::OptionalSymIntArrayRef& opt_value);
278
+ TORCH_API void addInputs(
279
+ Node* n,
280
+ const char* name,
281
+ ArrayRef<at::Tensor> value,
282
+ bool allow_undefined = false);
283
+ TORCH_API void addInputs(
284
+ Node* n,
285
+ const char* name,
286
+ std::vector<at::Tensor> value,
287
+ bool allow_undefined = false);
288
+ TORCH_API void addInputs(
289
+ Node* n,
290
+ const char* name,
291
+ at::ITensorListRef value,
292
+ bool allow_undefined = false);
293
+ TORCH_API void addInputs(
294
+ Node* n,
295
+ const char* name,
296
+ const List<c10::optional<at::Tensor>>& value);
297
+ TORCH_API void addInputs(
298
+ Node* n,
299
+ const char* name,
300
+ ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
301
+ const c10::ClassTypePtr& class_type);
302
+ TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
303
+ TORCH_API void addInputs(
304
+ Node* n,
305
+ const char* name,
306
+ const c10::optional<ArrayRef<double>>& value);
307
+ TORCH_API void addInputs(
308
+ Node* n,
309
+ const char* name,
310
+ const c10::string_view value);
311
+ TORCH_API void addInputs(
312
+ Node* n,
313
+ const char* name,
314
+ const c10::optional<c10::string_view>& value);
315
+ TORCH_API void addInputs(Node* n, const char* name, at::Device value);
316
+ TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
317
+ TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
318
+ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
319
+ TORCH_API void addInputs(
320
+ Node* n,
321
+ const char* name,
322
+ const c10::optional<at::ScalarType>& value);
323
+ TORCH_API void addInputs(
324
+ Node* n,
325
+ const char* name,
326
+ const c10::optional<at::Device>& value);
327
+ TORCH_API void addInputs(
328
+ Node* n,
329
+ const char* name,
330
+ const c10::optional<at::Layout>& value);
331
+ TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
332
+ TORCH_API void addInputs(
333
+ Node* n,
334
+ const char* name,
335
+ c10::optional<at::DimnameList> value);
336
+ TORCH_API void addInputs(
337
+ Node* n,
338
+ const char* name,
339
+ const c10::optional<at::MemoryFormat>& value);
340
+ TORCH_API void addInputs(
341
+ Node* n,
342
+ const char* name,
343
+ const c10::optional<at::Generator>& value);
344
+
345
+ inline void addInputs(
346
+ Node* n,
347
+ const char* name,
348
+ const std::vector<bool>& value) {
349
+ AT_ERROR("Tracing a list of bool type is currently not supported!");
350
+ }
351
+
352
+ template <typename T>
353
+ void addInputs(Node* n, const char* name, ArrayRef<T> value) {
354
+ AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
355
+ }
356
+ template <typename K, typename V>
357
+ void addInputs(
358
+ Node* n,
359
+ const char* name,
360
+ const std::unordered_map<K, V>& value) {
361
+ AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
362
+ }
363
+
364
+ template <size_t N>
365
+ void addInputs(Node* n, const char* name, std::array<bool, N> value) {
366
+ throw std::runtime_error(
367
+ "Found an unsupported argument type in the JIT tracer. File a bug report.");
368
+ }
369
+
370
+ TORCH_API void addInputs(
371
+ Node* n,
372
+ const char* name,
373
+ const c10::intrusive_ptr<c10::ivalue::Object>& obj);
374
+
375
+ TORCH_API void ensureUniqueIfOutOfPlaced(
376
+ const char* name,
377
+ const at::Tensor& tensor);
378
+ TORCH_API void ensureUniqueIfOutOfPlaced(
379
+ const char* name,
380
+ const c10::optional<at::Tensor>& tensor);
381
+
382
+ template <
383
+ typename T,
384
+ typename = torch::enable_if_t<
385
+ (!std::is_convertible_v<torch::decay_t<T>, at::TensorList> &&
386
+ !std::is_convertible_v<torch::decay_t<T>, c10::List<at::Tensor>> &&
387
+ !std::is_convertible_v<torch::decay_t<T>, at::Tensor> &&
388
+ !std::is_convertible_v<
389
+ torch::decay_t<T>,
390
+ c10::intrusive_ptr<c10::ivalue::Object>>)>>
391
+ void addOutput(Node* node, T&&) {
392
+ AT_ERROR(
393
+ "Found an unsupported argument type ",
394
+ c10::demangle_type<T>(),
395
+ " in the JIT tracer. File a bug report.");
396
+ }
397
+ TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
398
+ TORCH_API void setOutput(Value* value, const at::Tensor& output);
399
+ TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
400
+ TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
401
+ TORCH_API void addOutput(
402
+ Node* node,
403
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
404
+
405
+ TORCH_API autograd::Variable getSizeOf(
406
+ const autograd::Variable& var,
407
+ int64_t dim);
408
+
409
+ TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
410
+
411
+ } // namespace tracer
412
+ } // namespace torch::jit
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ #include <c10/util/SmallVector.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <torch/csrc/jit/frontend/lexer.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Trees are used to represent all forms of TC IR, pre- and post-typechecking.
16
+ // Rather than have a full class hierarchy for all TC statements, trees are a
17
+ // slight variation of Lisp s-expressions. For instance, the expression a*b+1
18
+ // is represented as:
19
+ // (+ (* (ident a) (ident b)) (const 1))
20
+ // Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which
21
+ // define stringValue(). Everything else is a Compound object, which has a
22
+ // 'kind' that is a token from lexer.h's TokenKind enum. Single-character
23
+ // operators like '+' are represented using the character itself (so, add.kind()
24
+ // would be '+'). Each Compound object also contains a list of subtrees and is
25
+ // associated with a SourceRange for error reporting.
26
+ // Memory management of trees is done using intrusive_ptr.
27
+
28
+ struct Tree;
29
+ using TreeRef = c10::intrusive_ptr<Tree>;
30
+ using TreeList = at::SmallVector<TreeRef, 4>;
31
+
32
+ struct Tree : c10::intrusive_ptr_target {
33
+ Tree(int kind_) : kind_(kind_) {}
34
+ int kind() const {
35
+ return kind_;
36
+ }
37
+ virtual bool isAtom() const {
38
+ return true;
39
+ }
40
+ virtual const SourceRange& range() const {
41
+ throw std::runtime_error("is an Atom");
42
+ }
43
+ virtual const std::string& stringValue() const {
44
+ throw std::runtime_error("stringValue can only be called on TK_STRING");
45
+ }
46
+ virtual const TreeList& trees() const {
47
+ static const TreeList empty_trees = {};
48
+ return empty_trees;
49
+ }
50
+ const TreeRef& tree(size_t i) const {
51
+ return trees().at(i);
52
+ }
53
+ virtual TreeRef map(const std::function<TreeRef(TreeRef)>& fn) {
54
+ (void)fn;
55
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
56
+ // from a raw `this` pointer
57
+ // so we need to bump the refcount
58
+ // to account for this ownership
59
+ return TreeRef::reclaim(this);
60
+ }
61
+ template <typename... Args>
62
+ void match(int k, Args&... args) const {
63
+ matchD(k, "unknown", 0, args...);
64
+ }
65
+ template <typename... Args>
66
+ void matchD(int k, const char* filename, int lineno, Args&... args) const {
67
+ std::initializer_list<TreeRef*> vars = {args...};
68
+ matchNumSubtreesD(k, filename, lineno, vars.size(), true);
69
+ size_t i = 0;
70
+ for (TreeRef* v : vars) {
71
+ *v = trees()[i++];
72
+ }
73
+ }
74
+ void matchNumSubtrees(int k, size_t expected_subtrees) {
75
+ return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false);
76
+ }
77
+ void matchNumSubtreesD(
78
+ int k,
79
+ const char* filename,
80
+ int lineno,
81
+ size_t expected_subtrees,
82
+ bool allow_more) const {
83
+ if (kind() != k) {
84
+ std::stringstream ss;
85
+ ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k)
86
+ << "' but found '" << kindToString(kind()) << "'\n";
87
+ range().highlight(ss);
88
+ throw std::runtime_error(ss.str());
89
+ }
90
+ if (trees().size() < expected_subtrees ||
91
+ (!allow_more && trees().size() != expected_subtrees)) {
92
+ std::stringstream ss;
93
+ ss << filename << ":" << lineno << ": expected at least "
94
+ << expected_subtrees << " subtrees, but found only " << trees().size()
95
+ << "\n";
96
+ range().highlight(ss);
97
+ throw std::runtime_error(ss.str());
98
+ }
99
+ }
100
+ ~Tree() override = default;
101
+
102
+ private:
103
+ int kind_;
104
+ };
105
+
106
+ struct String : public Tree {
107
+ String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {}
108
+ const std::string& stringValue() const override {
109
+ return value_;
110
+ }
111
+ template <typename... Args>
112
+ static TreeRef create(Args&&... args) {
113
+ return c10::make_intrusive<String>(std::forward<Args>(args)...);
114
+ }
115
+
116
+ private:
117
+ std::string value_;
118
+ };
119
+
120
+ static SourceRange mergeRanges(SourceRange c, const TreeList& others) {
121
+ for (const auto& t : others) {
122
+ if (t->isAtom())
123
+ continue;
124
+ size_t s = std::min(c.start(), t->range().start());
125
+ size_t e = std::max(c.end(), t->range().end());
126
+ c = SourceRange(c.source(), s, e);
127
+ }
128
+ return c;
129
+ }
130
+
131
+ struct Compound : public Tree {
132
+ Compound(int kind, SourceRange range)
133
+ : Tree(kind), range_(std::move(range)) {}
134
+ Compound(int kind, const SourceRange& range_, TreeList&& trees_)
135
+ : Tree(kind),
136
+ range_(mergeRanges(range_, trees_)),
137
+ trees_(std::move(trees_)) {}
138
+ const TreeList& trees() const override {
139
+ return trees_;
140
+ }
141
+ static TreeRef create(
142
+ int kind,
143
+ const SourceRange& range_,
144
+ TreeList&& trees_) {
145
+ return c10::make_intrusive<Compound>(kind, range_, std::move(trees_));
146
+ }
147
+ bool isAtom() const override {
148
+ return false;
149
+ }
150
+ TreeRef map(const std::function<TreeRef(TreeRef)>& fn) override {
151
+ TreeList ret;
152
+ for (auto& t : trees()) {
153
+ ret.push_back(fn(t));
154
+ }
155
+ return Compound::create(kind(), range(), std::move(ret));
156
+ }
157
+
158
+ const SourceRange& range() const override {
159
+ return range_;
160
+ }
161
+
162
+ private:
163
+ SourceRange range_;
164
+ TreeList trees_;
165
+ };
166
+
167
+ // tree pretty printer
168
+ struct pretty_tree {
169
+ pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {}
170
+ const TreeRef& tree;
171
+ size_t col;
172
+ std::unordered_map<TreeRef, std::string> flat_strings;
173
+ const std::string& get_flat(const TreeRef& t) {
174
+ auto it = flat_strings.find(t);
175
+ if (it != flat_strings.end())
176
+ return it->second;
177
+
178
+ std::stringstream out;
179
+ switch (t->kind()) {
180
+ case TK_STRING:
181
+ out << t->stringValue();
182
+ break;
183
+ default:
184
+ out << "(" << kindToString(t->kind());
185
+ for (const auto& e : t->trees()) {
186
+ out << " " << get_flat(e);
187
+ }
188
+ out << ")";
189
+ break;
190
+ }
191
+ auto it_ = flat_strings.emplace(t, out.str());
192
+ return it_.first->second;
193
+ }
194
+ void print(std::ostream& out, const TreeRef& t, int indent) {
195
+ const std::string& s = get_flat(t);
196
+ if (indent + s.size() < col || t->isAtom()) {
197
+ out << s;
198
+ return;
199
+ }
200
+ std::string k = kindToString(t->kind());
201
+ out << "(" << k;
202
+ for (const auto& e : t->trees()) {
203
+ out << "\n" << std::string(indent + 2, ' ');
204
+ print(out, e, indent + 2);
205
+ }
206
+ out << ")";
207
+ }
208
+ };
209
+
210
+ static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) {
211
+ t_.print(out, t_.tree, 0);
212
+ return out << std::endl;
213
+ }
214
+
215
+ static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) {
216
+ return out << pretty_tree(t);
217
+ }
218
+
219
+ } // namespace jit
220
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/string_utils.h>
3
+ #include <torch/csrc/jit/frontend/error_report.h>
4
+ #include <torch/csrc/jit/frontend/strtod.h>
5
+ #include <torch/csrc/jit/frontend/tree.h>
6
+
7
+ #include <c10/util/complex.h>
8
+ #include <functional>
9
+ #include <iostream>
10
+ #include <string>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // clang-format off
17
+ // TreeView provides a statically-typed way to traverse the tree, which should
18
+ // be formed according to the grammar below.
19
+ //
20
+ // A few notes on types and their aliases:
21
+ // - List<T> is really a Tree with kind TK_LIST and elements as subtrees
22
+ // - Maybe<T> is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T
23
+ // - Builtin types are: Ident (TK_IDENT), String (TK_STRING)
24
+ //
25
+ // Param = Param(Maybe<Expr> type, Ident name) TK_PARAM
26
+ //
27
+ // Decl = Decl(List<Param> params, Maybe<Expr> return_type) TK_DECL
28
+ // Def = Def(Ident name, Decl decl, List<Stmt> body) TK_DEF
29
+ // ClassDef = ClassDef(Ident name, TK_CLASS_DEF
30
+ // Maybe<Expr> superclass,
31
+ // List<Stmt> body)
32
+ //
33
+ // Stmt = If(Expr cond, List<Stmt> true_body, List<Stmt> false_body) TK_IF
34
+ // | For(List<Expr> targets, List<Expr> iters, List<Stmt> body) TK_FOR
35
+ // | While(Expr cond, List<Stmt> body) TK_WHILE
36
+ // | Global(List<Ident> idents) TK_GLOBAL
37
+ // -- NB: the only type of Expr's allowed on lhs are Var
38
+ // Or a tuple containing Var with an optional terminating Starred
39
+ // | Assign(Expr lhs, Maybe<Expr> rhs, Maybe<Expr> type) TK_ASSIGN
40
+ // | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN
41
+ // | Return(List<Expr> values) TK_RETURN
42
+ // | ExprStmt(List<Expr> expr) TK_EXPR_STMT
43
+ // | Raise(Expr expr) TK_RAISE
44
+ // | Def TK_DEF
45
+ // | With(List<WithItem> targets, List<Stmt> body) TK_WITH
46
+ //
47
+ // Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR
48
+ // | BinOp(Expr lhs, Expr rhs)
49
+ // | And TK_AND
50
+ // | Or TK_OR
51
+ // | Lt '<'
52
+ // | Gt '>'
53
+ // | Eq TK_EQ
54
+ // | Le TK_LE
55
+ // | Ge TK_GE
56
+ // | Ne TK_NE
57
+ // | Is TK_IS
58
+ // | IsNot TK_ISNOT
59
+ // | Add '+'
60
+ // | Sub '-'
61
+ // | Mul '*'
62
+ // | Div '/'
63
+ // | Mod '%'
64
+ // | MatMult '@'
65
+ // | Pow TK_POW
66
+ // | UnaryOp(Expr expr)
67
+ // | Not TK_NOT
68
+ // | USub '-'
69
+ // | Const(String value) TK_CONST
70
+ // -- NB: x.name(y) is desugared into name(x, y)
71
+ // | Apply(Ident name, List<Expr> args, List<Attribute> kwargs) TK_APPLY
72
+ // | Select(Expr value, Ident selector) '.'
73
+ // | Subscript(Expr value, List<Expr> subscript_exprs) TK_SUBSCRIPT
74
+ // | SliceExpr(Maybe<Expr> start, Maybe<Expr> end) TK_SLICE_EXPR
75
+ // | Var(Ident name) TK_VAR
76
+ // | ListLiteral(List<Expr> inputs) TK_LIST_LITERAL
77
+ // | TupleLiteral(List<Expr> inputs) TK_TUPLE_LITERAL
78
+ // | Starred(Expr expr) TK_STARRED
79
+ // | WithItem(Expr target, Maybe<Var> var) TK_WITH_ITEM
80
+ // -- NB: only allowed expressions are Const or List(Const)
81
+ // (List as a value, not type constructor)
82
+ // Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE
83
+ //
84
+ // AugAssignKind =
85
+ // | Add() TK_PLUS_EQ
86
+ // | Sub() TK_MINUS_EQ
87
+ // | Mul() TK_TIMES_EQ
88
+ // | Div() TK_DIV_EQ
89
+ // | Mod() TK_MOD_EQ
90
+ //
91
+
92
+ // Each subclass of TreeView should provide:
93
+ // 1. Constructor that takes a TreeRef, and checks that it's of the right type.
94
+ // 2. Accessors that get underlying information out of the object. If they
95
+ // return subtrees, they should wrap them in appropriate views too.
96
+ // 3. Static method 'create' that creates the underlying TreeRef object
97
+ // for every TreeRef kind that has a TreeView, the parser always uses
98
+ // (e.g.) Ident::create rather than Compound::Create, this means that
99
+ // changes to the structure of Ident are always made right here rather
100
+ // than both in the parser and in this code.
101
+ // XXX: these structs should have no fields to prevent slicing when passing by value
102
+ // clang-format on
103
+ struct TreeView {
104
+ explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
105
+ TreeRef tree() const {
106
+ return tree_;
107
+ }
108
+ const SourceRange& range() const {
109
+ return tree_->range();
110
+ }
111
+ operator TreeRef() const {
112
+ return tree_;
113
+ }
114
+ const TreeRef& get() const {
115
+ return tree_;
116
+ }
117
+ int kind() const {
118
+ return tree_->kind();
119
+ }
120
+ void dump() const {
121
+ std::cout << tree_;
122
+ }
123
+
124
+ protected:
125
+ const TreeRef& subtree(size_t i) const {
126
+ return tree_->trees().at(i);
127
+ }
128
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
129
+ TreeRef tree_;
130
+ };
131
+
132
+ template <typename T>
133
+ struct ListIterator {
134
+ ListIterator(TreeList::const_iterator it) : it(it) {}
135
+ bool operator!=(const ListIterator& rhs) const {
136
+ return it != rhs.it;
137
+ }
138
+ bool operator==(const ListIterator& rhs) const {
139
+ return it == rhs.it;
140
+ }
141
+ T operator*() const {
142
+ return T(*it);
143
+ }
144
+ ListIterator& operator+=(std::ptrdiff_t n) {
145
+ it += n;
146
+ return *this;
147
+ }
148
+ ListIterator& operator++() {
149
+ ++it;
150
+ return *this;
151
+ }
152
+ ListIterator& operator--() {
153
+ --it;
154
+ return *this;
155
+ }
156
+
157
+ private:
158
+ TreeList::const_iterator it;
159
+ };
160
+
161
+ template <typename T>
162
+ struct List : public TreeView {
163
+ using iterator = ListIterator<T>;
164
+ using const_iterator = ListIterator<T>;
165
+
166
+ List(const TreeRef& tree) : TreeView(tree) {
167
+ tree->match(TK_LIST);
168
+ // Iterate over list to temporarily instantiate Ts that will check the type
169
+ for (const T& elem : *this) {
170
+ (void)elem; // silence unused warning
171
+ }
172
+ }
173
+ iterator begin() const {
174
+ return iterator(tree_->trees().begin());
175
+ }
176
+ iterator end() const {
177
+ return iterator(tree_->trees().end());
178
+ }
179
+ bool empty() const {
180
+ return tree_->trees().begin() == tree_->trees().end();
181
+ }
182
+ T operator[](size_t i) const {
183
+ return T(subtree(i));
184
+ }
185
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
186
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
187
+ }
188
+ static List create(const SourceRange& range, const std::vector<T>& subtrees) {
189
+ TreeList type_erased_sub{subtrees.begin(), subtrees.end()};
190
+ return List(Compound::create(TK_LIST, range, std::move(type_erased_sub)));
191
+ }
192
+ static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) {
193
+ return List(Compound::create(TK_LIST, range, std::move(subtrees)));
194
+ }
195
+ size_t size() const {
196
+ return tree_->trees().size();
197
+ }
198
+ };
199
+
200
+ template <typename T>
201
+ struct Maybe : public TreeView {
202
+ explicit Maybe(const TreeRef& tree) : TreeView(tree) {
203
+ tree_->match(TK_OPTION);
204
+ if (tree_->trees().size() > 1)
205
+ throw ErrorReport(tree) << "Maybe trees can have at most one subtree";
206
+ }
207
+ /* implicit */ Maybe(const T& tree) : TreeView(tree) {}
208
+ bool present() const {
209
+ return tree_->trees().size() > 0;
210
+ }
211
+ T get() const {
212
+ return T(tree_->trees().at(0));
213
+ }
214
+ TreeRef map(const std::function<TreeRef(const T&)>& fn) {
215
+ return tree_->map([&](TreeRef v) { return fn(T(v)); });
216
+ }
217
+ static Maybe<T> create(const SourceRange& range) {
218
+ return Maybe<T>(Compound::create(TK_OPTION, range, {}));
219
+ }
220
+ static Maybe<T> create(const SourceRange& range, const T& value) {
221
+ return Maybe<T>(Compound::create(TK_OPTION, range, {value}));
222
+ }
223
+ };
224
+
225
+ struct Ident : public TreeView {
226
+ explicit Ident(const TreeRef& tree) : TreeView(tree) {
227
+ tree_->match(TK_IDENT);
228
+ }
229
+ const std::string& name() const {
230
+ return subtree(0)->stringValue();
231
+ }
232
+ static Ident create(const SourceRange& range, std::string name) {
233
+ return Ident(
234
+ Compound::create(TK_IDENT, range, {String::create(std::move(name))}));
235
+ }
236
+ };
237
+
238
+ ////////////////////////////////////////////////////////////////////////////////
239
+ // Base types (production LHS)
240
+ ////////////////////////////////////////////////////////////////////////////////
241
+
242
+ struct Stmt : public TreeView {
243
+ explicit Stmt(const TreeRef& tree) : TreeView(tree) {
244
+ switch (tree->kind()) {
245
+ case TK_IF:
246
+ case TK_FOR:
247
+ case TK_WHILE:
248
+ case TK_GLOBAL:
249
+ case TK_ASSIGN:
250
+ case TK_AUG_ASSIGN:
251
+ case TK_RETURN:
252
+ case TK_EXPR_STMT:
253
+ case TK_RAISE:
254
+ case TK_ASSERT:
255
+ case TK_PASS:
256
+ case TK_BREAK:
257
+ case TK_DELETE:
258
+ case TK_CONTINUE:
259
+ case TK_DEF:
260
+ case TK_WITH:
261
+ return;
262
+ default:
263
+ throw ErrorReport(tree)
264
+ << kindToString(tree->kind()) << " is not a valid Stmt";
265
+ }
266
+ }
267
+ };
268
+
269
+ struct Expr : public TreeView {
270
+ explicit Expr(const TreeRef& tree) : TreeView(tree) {
271
+ switch (tree->kind()) {
272
+ case TK_IF_EXPR:
273
+ case TK_AND:
274
+ case TK_OR:
275
+ case '<':
276
+ case '>':
277
+ case TK_IS:
278
+ case TK_ISNOT:
279
+ case TK_EQ:
280
+ case TK_LE:
281
+ case TK_GE:
282
+ case TK_NE:
283
+ case '+':
284
+ case '-':
285
+ case TK_UNARY_MINUS:
286
+ case '~':
287
+ case '*':
288
+ case TK_STARRED:
289
+ case '/':
290
+ case '%':
291
+ case TK_NOT:
292
+ case TK_CONST:
293
+ case TK_STRINGLITERAL:
294
+ case TK_TRUE:
295
+ case TK_FALSE:
296
+ case TK_NONE:
297
+ case TK_NONE_TYPE:
298
+ case TK_CAST:
299
+ case TK_APPLY:
300
+ case '.':
301
+ case TK_SUBSCRIPT:
302
+ case TK_SLICE_EXPR:
303
+ case TK_VAR:
304
+ case TK_LIST_LITERAL:
305
+ case TK_TUPLE_LITERAL:
306
+ case TK_DICT_LITERAL:
307
+ case '@':
308
+ case TK_POW:
309
+ case TK_LSHIFT:
310
+ case TK_RSHIFT:
311
+ case TK_FLOOR_DIV:
312
+ case '&':
313
+ case '^':
314
+ case '|':
315
+ case TK_LIST_COMP:
316
+ case TK_DICT_COMP:
317
+ case TK_DOTS:
318
+ case TK_IN:
319
+ case TK_WITH_ITEM:
320
+ return;
321
+ default:
322
+ throw ErrorReport(tree)
323
+ << kindToString(tree->kind()) << " is not a valid Expr";
324
+ }
325
+ }
326
+ };
327
+
328
+ ////////////////////////////////////////////////////////////////////////////////
329
+ // Helper nodes (mostly for function arguments)
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ struct Attribute : public TreeView {
333
+ explicit Attribute(const TreeRef& tree) : TreeView(tree) {
334
+ tree_->match(TK_ATTRIBUTE);
335
+ }
336
+ Ident name() const {
337
+ return Ident(subtree(0));
338
+ }
339
+ Expr value() const {
340
+ return Expr(subtree(1));
341
+ }
342
+ static Attribute create(
343
+ const SourceRange& range,
344
+ const Ident& name,
345
+ const TreeRef& value) {
346
+ return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value}));
347
+ }
348
+ };
349
+
350
+ struct Param : public TreeView {
351
+ explicit Param(const TreeRef& tree) : TreeView(tree) {
352
+ tree_->match(TK_PARAM);
353
+ }
354
+ static Param create(
355
+ const SourceRange& range,
356
+ const Ident& ident,
357
+ const Maybe<Expr>& type,
358
+ const Maybe<Expr>& def,
359
+ bool kwarg_only) {
360
+ TreeRef kwarg_only_tree =
361
+ Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {});
362
+ return Param(Compound::create(
363
+ TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)}));
364
+ }
365
+ Ident ident() const {
366
+ return Ident(subtree(0));
367
+ }
368
+ Maybe<Expr> type() const {
369
+ return Maybe<Expr>(subtree(1));
370
+ }
371
+ Maybe<Expr> defaultValue() const {
372
+ return Maybe<Expr>(subtree(2));
373
+ }
374
+ bool kwarg_only() const {
375
+ return TK_TRUE == subtree(3)->kind();
376
+ }
377
+ Param withType(const Maybe<Expr>& typ) const {
378
+ return Param::create(range(), ident(), typ, defaultValue(), kwarg_only());
379
+ }
380
+ };
381
+
382
+ ////////////////////////////////////////////////////////////////////////////////
383
+ // Top level definitions
384
+ ////////////////////////////////////////////////////////////////////////////////
385
+
386
+ struct Decl : public TreeView {
387
+ explicit Decl(const TreeRef& tree) : TreeView(tree) {
388
+ tree->match(TK_DECL);
389
+ }
390
+ List<Param> params() const {
391
+ return List<Param>(subtree(0));
392
+ }
393
+ Maybe<Expr> return_type() const {
394
+ return Maybe<Expr>(subtree(1));
395
+ }
396
+ static Decl create(
397
+ const SourceRange& range,
398
+ const List<Param>& params,
399
+ const Maybe<Expr>& return_type) {
400
+ return Decl(Compound::create(TK_DECL, range, {params, return_type}));
401
+ }
402
+ };
403
+
404
+ struct Def : public TreeView {
405
+ explicit Def(const TreeRef& tree) : TreeView(tree) {
406
+ tree->match(TK_DEF);
407
+ }
408
+ Def withName(std::string new_name) const {
409
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
410
+ return create(range(), new_ident, decl(), statements());
411
+ }
412
+ Def withDecl(const Decl& decl) const {
413
+ return create(range(), name(), decl, statements());
414
+ }
415
+ Ident name() const {
416
+ return Ident(subtree(0));
417
+ }
418
+ Decl decl() const {
419
+ return Decl(subtree(1));
420
+ }
421
+ List<Stmt> statements() const {
422
+ return List<Stmt>(subtree(2));
423
+ }
424
+ static Def create(
425
+ const SourceRange& range,
426
+ const Ident& name,
427
+ const Decl& decl,
428
+ const List<Stmt>& stmts) {
429
+ return Def(Compound::create(TK_DEF, range, {name, decl, stmts}));
430
+ }
431
+ };
432
+
433
+ // Property represents a named attribute combined with a getter and setter
434
+ // method to access and mutate that attribute.
435
+ struct Property : public TreeView {
436
+ explicit Property(const TreeRef& tree) : TreeView(tree) {
437
+ tree->match(TK_PROP);
438
+ }
439
+ Ident name() const {
440
+ return Ident(subtree(0));
441
+ }
442
+ Def getter() const {
443
+ return Def(subtree(1));
444
+ }
445
+ Maybe<Def> setter() const {
446
+ return Maybe<Def>(subtree(2));
447
+ }
448
+ static Property create(
449
+ const SourceRange& range,
450
+ const Ident& name,
451
+ const Def& getter,
452
+ const Maybe<Def>& setter) {
453
+ return Property(Compound::create(TK_PROP, range, {name, getter, setter}));
454
+ }
455
+ };
456
+
457
+ struct Assign;
458
+
459
+ struct ClassDef : public TreeView {
460
+ explicit ClassDef(const TreeRef& tree) : TreeView(tree) {
461
+ tree->match(TK_CLASS_DEF);
462
+ }
463
+ explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) {
464
+ tree_->match(TK_CLASS_DEF);
465
+ }
466
+ ClassDef withName(std::string new_name) const {
467
+ auto new_ident = Ident::create(name().range(), std::move(new_name));
468
+ return create(range(), new_ident, superclass(), body());
469
+ }
470
+ Ident name() const {
471
+ return Ident(subtree(0));
472
+ }
473
+ Maybe<Expr> superclass() const {
474
+ return Maybe<Expr>(subtree(1));
475
+ }
476
+ List<Stmt> body() const {
477
+ return List<Stmt>(subtree(2));
478
+ }
479
+ Maybe<List<Property>> properties() const {
480
+ return Maybe<List<Property>>(subtree(3));
481
+ }
482
+ Maybe<List<Assign>> assigns() const {
483
+ return Maybe<List<Assign>>(subtree(4));
484
+ }
485
+ static ClassDef create(
486
+ const SourceRange& range,
487
+ const Ident& name,
488
+ const Maybe<Expr>& superclass,
489
+ const List<Stmt>& body) {
490
+ return ClassDef(Compound::create(
491
+ TK_CLASS_DEF,
492
+ range,
493
+ {name,
494
+ superclass,
495
+ body,
496
+ Maybe<List<Property>>::create(range),
497
+ Maybe<List<Assign>>::create(range)}));
498
+ }
499
+ static ClassDef create(
500
+ const SourceRange& range,
501
+ const Ident& name,
502
+ const Maybe<Expr>& superclass,
503
+ const List<Stmt>& body,
504
+ const List<Property>& properties,
505
+ const List<Assign>& assigns);
506
+ };
507
+
508
+ TORCH_API std::vector<std::string> getUnresolvedClassAttributes(
509
+ const ClassDef& def);
510
+
511
+ ////////////////////////////////////////////////////////////////////////////////
512
+ // Statements
513
+ ////////////////////////////////////////////////////////////////////////////////
514
+
515
+ struct If : public Stmt {
516
+ explicit If(const TreeRef& tree) : Stmt(tree) {
517
+ tree_->match(TK_IF);
518
+ }
519
+ Expr cond() const {
520
+ return Expr(subtree(0));
521
+ }
522
+ List<Stmt> trueBranch() const {
523
+ return List<Stmt>(subtree(1));
524
+ }
525
+ List<Stmt> falseBranch() const {
526
+ return List<Stmt>(subtree(2));
527
+ }
528
+ If withNewBranches(
529
+ const List<Stmt>& true_branch,
530
+ const List<Stmt>& false_branch) const {
531
+ return create(range(), cond(), true_branch, false_branch);
532
+ }
533
+ static If create(
534
+ const SourceRange& range,
535
+ const Expr& cond,
536
+ const List<Stmt>& true_branch,
537
+ const List<Stmt>& false_branch) {
538
+ return If(
539
+ Compound::create(TK_IF, range, {cond, true_branch, false_branch}));
540
+ }
541
+ };
542
+
543
+ struct While : public Stmt {
544
+ explicit While(const TreeRef& tree) : Stmt(tree) {
545
+ tree_->match(TK_WHILE);
546
+ }
547
+ Expr cond() const {
548
+ return Expr(subtree(0));
549
+ }
550
+ List<Stmt> body() const {
551
+ return List<Stmt>(subtree(1));
552
+ }
553
+ static While create(
554
+ const SourceRange& range,
555
+ const Expr& cond,
556
+ const List<Stmt>& body) {
557
+ return While(Compound::create(TK_WHILE, range, {cond, body}));
558
+ }
559
+ };
560
+
561
+ struct For : public Stmt {
562
+ explicit For(const TreeRef& tree) : Stmt(tree) {
563
+ tree->match(TK_FOR);
564
+ }
565
+ List<Expr> targets() const {
566
+ return List<Expr>(subtree(0));
567
+ }
568
+ List<Expr> itrs() const {
569
+ return List<Expr>(subtree(1));
570
+ }
571
+ List<Stmt> body() const {
572
+ return List<Stmt>(subtree(2));
573
+ }
574
+ static For create(
575
+ const SourceRange& range,
576
+ const List<Expr>& targets,
577
+ const List<Expr>& itrs,
578
+ const List<Stmt>& body) {
579
+ return For(Compound::create(TK_FOR, range, {targets, itrs, body}));
580
+ }
581
+ };
582
+
583
+ // TODO: supports only single comprehension for now
584
+ struct ListComp : public Expr {
585
+ explicit ListComp(const TreeRef& tree) : Expr(tree) {
586
+ tree->match(TK_LIST_COMP);
587
+ }
588
+ Expr elt() const {
589
+ return Expr(subtree(0));
590
+ }
591
+ Expr target() const {
592
+ return Expr(subtree(1));
593
+ }
594
+ Expr iter() const {
595
+ return Expr(subtree(2));
596
+ }
597
+ // TODO: no ifs for now
598
+ static ListComp create(
599
+ const SourceRange& range,
600
+ const Expr& elt,
601
+ const Expr& target,
602
+ const Expr& iter) {
603
+ return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter}));
604
+ }
605
+ };
606
+
607
+ // TODO: supports only single comprehension for now
608
+ struct DictComp : public Expr {
609
+ explicit DictComp(const TreeRef& tree) : Expr(tree) {
610
+ tree->match(TK_DICT_COMP);
611
+ }
612
+ Expr key() const {
613
+ return Expr(subtree(0));
614
+ }
615
+ Expr value() const {
616
+ return Expr(subtree(1));
617
+ }
618
+ Expr target() const {
619
+ return Expr(subtree(2));
620
+ }
621
+ Expr iter() const {
622
+ return Expr(subtree(3));
623
+ }
624
+ // TODO: no ifs for now
625
+ static DictComp create(
626
+ const SourceRange& range,
627
+ const Expr& key,
628
+ const Expr& value,
629
+ const Expr& target,
630
+ const Expr& iter) {
631
+ return DictComp(
632
+ Compound::create(TK_DICT_COMP, range, {key, value, target, iter}));
633
+ }
634
+ };
635
+
636
+ struct Global : public Stmt {
637
+ explicit Global(const TreeRef& tree) : Stmt(tree) {
638
+ tree_->match(TK_GLOBAL);
639
+ }
640
+ List<Ident> names() {
641
+ return List<Ident>(subtree(0));
642
+ }
643
+ static Global create(const SourceRange& range, const List<Ident>& names) {
644
+ return Global(Compound::create(TK_GLOBAL, range, {names}));
645
+ }
646
+ };
647
+
648
+ struct AugAssignKind : public TreeView {
649
+ explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) {
650
+ switch (tree->kind()) {
651
+ case '+':
652
+ case '-':
653
+ case '*':
654
+ case '/':
655
+ case '%':
656
+ case '|':
657
+ case '&':
658
+ case '^':
659
+ case TK_POW:
660
+ case TK_LSHIFT:
661
+ case TK_RSHIFT:
662
+ return;
663
+ default:
664
+ throw ErrorReport(tree) << "is not a valid AugAssignKind";
665
+ }
666
+ }
667
+ };
668
+
669
+ // Augmented assignment, like "foo += bar"
670
+ struct AugAssign : public Stmt {
671
+ explicit AugAssign(const TreeRef& tree) : Stmt(tree) {
672
+ tree_->match(TK_AUG_ASSIGN);
673
+ }
674
+ static AugAssign create(
675
+ const SourceRange& range,
676
+ const Expr& lhs,
677
+ const AugAssignKind& aug_op,
678
+ const Expr& rhs) {
679
+ return AugAssign(
680
+ Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs}));
681
+ }
682
+ Expr lhs() const {
683
+ return Expr(subtree(0));
684
+ }
685
+ int aug_op() const {
686
+ return subtree(1)->kind();
687
+ }
688
+ Expr rhs() const {
689
+ return Expr(subtree(2));
690
+ }
691
+ };
692
+
693
+ struct Assign : public Stmt {
694
+ explicit Assign(const TreeRef& tree) : Stmt(tree) {
695
+ tree_->match(TK_ASSIGN);
696
+ }
697
+ static Assign create(
698
+ const SourceRange& range,
699
+ const List<Expr>& lhs,
700
+ const Maybe<Expr>& rhs,
701
+ const Maybe<Expr>& type) {
702
+ return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type}));
703
+ }
704
+
705
+ List<Expr> lhs_list() const {
706
+ return List<Expr>(subtree(0));
707
+ }
708
+
709
+ Expr lhs() const {
710
+ const auto& li = lhs_list();
711
+ TORCH_INTERNAL_ASSERT(li.size() == 1);
712
+ return *li.begin();
713
+ }
714
+
715
+ Maybe<Expr> rhs() const {
716
+ return Maybe<Expr>(subtree(1));
717
+ }
718
+
719
+ Maybe<Expr> type() const {
720
+ return Maybe<Expr>(subtree(2));
721
+ }
722
+ };
723
+
724
+ struct Return : public Stmt {
725
+ explicit Return(const TreeRef& tree) : Stmt(tree) {
726
+ tree_->match(TK_RETURN);
727
+ }
728
+ Expr expr() const {
729
+ return Expr(subtree(0));
730
+ }
731
+ static Return create(const SourceRange& range, const Expr& value) {
732
+ return Return(Compound::create(TK_RETURN, range, {value}));
733
+ }
734
+ };
735
+
736
+ struct Raise : public Stmt {
737
+ explicit Raise(const TreeRef& tree) : Stmt(tree) {
738
+ tree_->match(TK_RAISE);
739
+ }
740
+ Expr expr() const {
741
+ return Expr(subtree(0));
742
+ }
743
+ static Raise create(const SourceRange& range, const Expr& expr) {
744
+ return Raise(Compound::create(TK_RAISE, range, {expr}));
745
+ }
746
+ };
747
+
748
+ struct Assert : public Stmt {
749
+ explicit Assert(const TreeRef& tree) : Stmt(tree) {
750
+ tree_->match(TK_ASSERT);
751
+ }
752
+ Expr test() const {
753
+ return Expr(subtree(0));
754
+ }
755
+ Maybe<Expr> msg() const {
756
+ return Maybe<Expr>(subtree(1));
757
+ }
758
+ static Assert create(
759
+ const SourceRange& range,
760
+ const Expr& test,
761
+ const Maybe<Expr>& msg) {
762
+ return Assert(Compound::create(TK_ASSERT, range, {test, msg}));
763
+ }
764
+ };
765
+
766
+ struct Pass : public Stmt {
767
+ explicit Pass(const TreeRef& tree) : Stmt(tree) {
768
+ tree_->match(TK_PASS);
769
+ }
770
+ static Pass create(const SourceRange& range) {
771
+ return Pass(Compound::create(TK_PASS, range, {}));
772
+ }
773
+ };
774
+
775
+ struct Dots : public Expr {
776
+ explicit Dots(const TreeRef& tree) : Expr(tree) {
777
+ tree_->match(TK_DOTS);
778
+ }
779
+ static Dots create(const SourceRange& range) {
780
+ return Dots(Compound::create(TK_DOTS, range, {}));
781
+ }
782
+ };
783
+
784
+ struct Break : public Stmt {
785
+ explicit Break(const TreeRef& tree) : Stmt(tree) {
786
+ tree_->match(TK_BREAK);
787
+ }
788
+ static Break create(const SourceRange& range) {
789
+ return Break(Compound::create(TK_BREAK, range, {}));
790
+ }
791
+ };
792
+
793
+ struct Continue : public Stmt {
794
+ explicit Continue(const TreeRef& tree) : Stmt(tree) {
795
+ tree_->match(TK_CONTINUE);
796
+ }
797
+ static Continue create(const SourceRange& range) {
798
+ return Continue(Compound::create(TK_CONTINUE, range, {}));
799
+ }
800
+ };
801
+
802
+ struct ExprStmt : public Stmt {
803
+ explicit ExprStmt(const TreeRef& tree) : Stmt(tree) {
804
+ tree_->match(TK_EXPR_STMT);
805
+ }
806
+ Expr expr() {
807
+ return Expr(subtree(0));
808
+ }
809
+ static ExprStmt create(const SourceRange& range, const Expr& list) {
810
+ return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list}));
811
+ }
812
+ };
813
+
814
+ ////////////////////////////////////////////////////////////////////////////////
815
+ // Expressions
816
+ ////////////////////////////////////////////////////////////////////////////////
817
+
818
+ struct BinOp : public Expr {
819
+ explicit BinOp(const TreeRef& tree) : Expr(tree) {
820
+ switch (tree->kind()) {
821
+ case TK_AND:
822
+ case TK_OR:
823
+ case '<':
824
+ case '>':
825
+ case TK_IS:
826
+ case TK_ISNOT:
827
+ case TK_EQ:
828
+ case TK_LE:
829
+ case TK_GE:
830
+ case TK_NE:
831
+ case '+':
832
+ case '*':
833
+ case '/':
834
+ case '-':
835
+ case '@':
836
+ case TK_POW:
837
+ case TK_LSHIFT:
838
+ case TK_RSHIFT:
839
+ case '%':
840
+ case '&':
841
+ case '^':
842
+ case '|':
843
+ case TK_FLOOR_DIV:
844
+ case TK_IN:
845
+ if (tree->trees().size() != 2)
846
+ throw ErrorReport(tree)
847
+ << "BinOp expected 2 subtrees, found " << tree->trees().size();
848
+ return;
849
+ default:
850
+ throw ErrorReport(tree)
851
+ << kindToString(tree->kind()) << " is not a valid BinOp";
852
+ }
853
+ }
854
+ Expr lhs() const {
855
+ return Expr(subtree(0));
856
+ }
857
+ Expr rhs() const {
858
+ return Expr(subtree(1));
859
+ }
860
+ static BinOp create(
861
+ const SourceRange& range,
862
+ int kind,
863
+ const Expr& lhs,
864
+ const Expr& rhs) {
865
+ return BinOp(Compound::create(kind, range, {lhs, rhs}));
866
+ }
867
+ };
868
+
869
+ struct UnaryOp : public Expr {
870
+ explicit UnaryOp(const TreeRef& tree) : Expr(tree) {
871
+ switch (tree->kind()) {
872
+ case TK_UNARY_MINUS:
873
+ case '~':
874
+ case TK_NOT:
875
+ if (tree->trees().size() != 1)
876
+ throw ErrorReport(tree)
877
+ << "UnaryOp expected 1 subtree, found " << tree->trees().size();
878
+ return;
879
+ default:
880
+ throw ErrorReport(tree)
881
+ << kindToString(tree->kind()) << " is not a valid UnaryOp";
882
+ }
883
+ }
884
+ static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) {
885
+ return UnaryOp(Compound::create(kind, range, {expr}));
886
+ }
887
+ };
888
+
889
+ struct Const : public Expr {
890
+ explicit Const(const TreeRef& tree) : Expr(tree) {
891
+ tree_->matchNumSubtrees(TK_CONST, 1);
892
+ }
893
+ bool isFloatingPoint() const {
894
+ if (isComplex())
895
+ return false;
896
+
897
+ bool is_inf = subtree(0)->stringValue() == "inf";
898
+ return is_inf ||
899
+ subtree(0)->stringValue().find_first_of(".eE") != std::string::npos;
900
+ }
901
+ bool isIntegral() const {
902
+ return !isFloatingPoint() && !isComplex();
903
+ }
904
+ bool isComplex() const {
905
+ return subtree(0)->stringValue().find_first_of('j') != std::string::npos;
906
+ }
907
+ int64_t asIntegral() const {
908
+ try {
909
+ // NOLINTNEXTLINE(modernize-use-nullptr)
910
+ return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0);
911
+ } catch (const std::out_of_range&) {
912
+ throw ErrorReport(range()) << "Integral constant out of range "
913
+ "(must fit in a signed 64 bit integer)";
914
+ }
915
+ }
916
+ double asFloatingPoint() const {
917
+ // We can't pass in nullptr as the dummy pointer gets dereferenced for
918
+ // Android version of strtod_c().
919
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
920
+ char* dummy;
921
+ return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy);
922
+ }
923
+ c10::complex<double> asComplex() const {
924
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
925
+ char* dummy;
926
+ auto str = subtree(0)->stringValue();
927
+ // Complex numbers (a+bj, where a is non-zero) are parsed as an addition
928
+ // between float/int a and a complex number "bj". When a is 0, a complex
929
+ // number bj is created as above. So, while parsing the string, we don't
930
+ // have to worry about the real component of the complex number.
931
+ auto imag =
932
+ torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy);
933
+ return c10::complex<double>(0, imag);
934
+ }
935
+ const std::string& text() const {
936
+ return subtree(0)->stringValue();
937
+ }
938
+ static Const create(const SourceRange& range, const std::string& value) {
939
+ return Const(Compound::create(TK_CONST, range, {String::create(value)}));
940
+ }
941
+ };
942
+
943
+ struct StringLiteral : public Expr {
944
+ explicit StringLiteral(const TreeRef& tree) : Expr(tree) {
945
+ tree_->matchNumSubtrees(TK_STRINGLITERAL, 1);
946
+ }
947
+ const std::string& text() const {
948
+ return subtree(0)->stringValue();
949
+ }
950
+ static StringLiteral create(
951
+ const SourceRange& range,
952
+ const std::string& value) {
953
+ return StringLiteral(
954
+ Compound::create(TK_STRINGLITERAL, range, {String::create(value)}));
955
+ }
956
+ };
957
+
958
+ struct Apply : public Expr {
959
+ explicit Apply(const TreeRef& tree) : Expr(tree) {
960
+ tree_->match(TK_APPLY);
961
+ }
962
+ Expr callee() const {
963
+ return Expr(subtree(0));
964
+ }
965
+ List<Expr> inputs() const {
966
+ return List<Expr>(subtree(1));
967
+ }
968
+ List<Attribute> attributes() const {
969
+ return List<Attribute>(subtree(2));
970
+ }
971
+ static Apply create(
972
+ const SourceRange& range,
973
+ const Expr& callee,
974
+ const List<Expr>& inputs,
975
+ const List<Attribute>& attributes) {
976
+ return Apply(
977
+ Compound::create(TK_APPLY, range, {callee, inputs, attributes}));
978
+ }
979
+ };
980
+
981
+ struct Select : public Expr {
982
+ explicit Select(const TreeRef& tree) : Expr(tree) {
983
+ tree_->match('.');
984
+ }
985
+ Expr value() const {
986
+ return Expr(subtree(0));
987
+ }
988
+ Ident selector() const {
989
+ return Ident(subtree(1));
990
+ }
991
+ static Select create(
992
+ const SourceRange& range,
993
+ const Expr& value,
994
+ const Ident& selector) {
995
+ return Select(Compound::create('.', range, {value, selector}));
996
+ }
997
+ };
998
+
999
+ struct SliceExpr : public Expr {
1000
+ explicit SliceExpr(const TreeRef& tree) : Expr(tree) {
1001
+ tree_->match(TK_SLICE_EXPR);
1002
+ }
1003
+ Maybe<Expr> start() const {
1004
+ return Maybe<Expr>(subtree(0));
1005
+ }
1006
+ Maybe<Expr> end() const {
1007
+ return Maybe<Expr>(subtree(1));
1008
+ }
1009
+ Maybe<Expr> step() const {
1010
+ return Maybe<Expr>(subtree(2));
1011
+ }
1012
+ Expr startOr(int64_t alternative) const {
1013
+ const auto startOption = start();
1014
+ return startOption.present() ? startOption.get() : createInt(alternative);
1015
+ }
1016
+ Expr endOr(int64_t alternative) const {
1017
+ const auto endOption = end();
1018
+ return endOption.present() ? endOption.get() : createInt(alternative);
1019
+ }
1020
+ Expr stepOr(int64_t alternative) const {
1021
+ const auto stepOption = step();
1022
+ return stepOption.present() ? stepOption.get() : createInt(alternative);
1023
+ }
1024
+ static SliceExpr create(
1025
+ const SourceRange& range,
1026
+ const Maybe<Expr>& start,
1027
+ const Maybe<Expr>& end,
1028
+ const Maybe<Expr>& step) {
1029
+ return SliceExpr(
1030
+ Compound::create(TK_SLICE_EXPR, range, {start, end, step}));
1031
+ }
1032
+
1033
+ private:
1034
+ Expr createInt(int64_t value) const {
1035
+ return Expr(Const::create(range(), c10::to_string(value)));
1036
+ }
1037
+ };
1038
+
1039
+ struct Subscript : public Expr {
1040
+ explicit Subscript(const TreeRef& tree) : Expr(tree) {
1041
+ tree_->match(TK_SUBSCRIPT);
1042
+ }
1043
+ Expr value() const {
1044
+ return Expr(subtree(0));
1045
+ }
1046
+ List<Expr> subscript_exprs() const {
1047
+ return List<Expr>(subtree(1));
1048
+ }
1049
+ static Subscript create(
1050
+ const SourceRange& range,
1051
+ const Expr& value,
1052
+ const List<Expr>& subscript_exprs) {
1053
+ auto whole_range = SourceRange(
1054
+ range.source(), range.start(), subscript_exprs.range().end() + 1);
1055
+ return Subscript(
1056
+ Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs}));
1057
+ }
1058
+ };
1059
+
1060
+ struct Var : public Expr {
1061
+ explicit Var(const TreeRef& tree) : Expr(tree) {
1062
+ tree_->match(TK_VAR);
1063
+ };
1064
+ Ident name() const {
1065
+ return Ident(subtree(0));
1066
+ }
1067
+ static Var create(const SourceRange& range, const Ident& name) {
1068
+ return Var(Compound::create(TK_VAR, range, {name}));
1069
+ }
1070
+ };
1071
+
1072
+ // WithItem represents an item using with a WithStmt.
1073
+ struct WithItem : public Expr {
1074
+ explicit WithItem(const TreeRef& tree) : Expr(tree) {
1075
+ tree_->match(TK_WITH_ITEM);
1076
+ }
1077
+
1078
+ Expr target() const {
1079
+ return Expr(subtree(0));
1080
+ }
1081
+
1082
+ Maybe<Var> var() const {
1083
+ return Maybe<Var>(subtree(1));
1084
+ }
1085
+
1086
+ static WithItem create(
1087
+ const SourceRange& range,
1088
+ const Expr& target,
1089
+ const Maybe<Var>& var) {
1090
+ return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var}));
1091
+ }
1092
+ };
1093
+
1094
+ // With represents a with statement consisting of a list of with items and a
1095
+ // body of statements.
1096
+ struct With : public Stmt {
1097
+ explicit With(const TreeRef& tree) : Stmt(tree) {
1098
+ tree_->match(TK_WITH);
1099
+ }
1100
+
1101
+ List<WithItem> targets() const {
1102
+ return List<WithItem>(subtree(0));
1103
+ }
1104
+
1105
+ List<Stmt> body() const {
1106
+ return List<Stmt>(subtree(1));
1107
+ }
1108
+
1109
+ static With create(
1110
+ const SourceRange& range,
1111
+ const List<WithItem>& targets,
1112
+ const List<Stmt>& body) {
1113
+ return With(Compound::create(TK_WITH, range, {targets, body}));
1114
+ }
1115
+ };
1116
+
1117
+ struct TernaryIf : public Expr {
1118
+ explicit TernaryIf(const TreeRef& tree) : Expr(tree) {
1119
+ tree_->matchNumSubtrees(TK_IF_EXPR, 3);
1120
+ };
1121
+ Expr cond() const {
1122
+ return Expr(subtree(0));
1123
+ }
1124
+ Expr true_expr() const {
1125
+ return Expr(subtree(1));
1126
+ }
1127
+ Expr false_expr() const {
1128
+ return Expr(subtree(2));
1129
+ }
1130
+ static TernaryIf create(
1131
+ const SourceRange& range,
1132
+ const Expr& cond,
1133
+ const Expr& true_expr,
1134
+ const Expr& false_expr) {
1135
+ return TernaryIf(
1136
+ Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr}));
1137
+ };
1138
+ };
1139
+
1140
+ struct ListLiteral : public Expr {
1141
+ explicit ListLiteral(const TreeRef& tree) : Expr(tree) {
1142
+ tree_->match(TK_LIST_LITERAL);
1143
+ }
1144
+ List<Expr> inputs() const {
1145
+ return subtree(0);
1146
+ }
1147
+ static ListLiteral create(
1148
+ const SourceRange& range,
1149
+ const List<Expr>& inputs) {
1150
+ return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs}));
1151
+ }
1152
+ };
1153
+
1154
+ struct TupleLiteral : public Expr {
1155
+ explicit TupleLiteral(const TreeRef& tree) : Expr(tree) {
1156
+ tree_->match(TK_TUPLE_LITERAL);
1157
+ }
1158
+ List<Expr> inputs() const {
1159
+ return subtree(0);
1160
+ }
1161
+ static TupleLiteral create(
1162
+ const SourceRange& range,
1163
+ const List<Expr>& inputs) {
1164
+ return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs}));
1165
+ }
1166
+ };
1167
+
1168
+ struct DictLiteral : public Expr {
1169
+ explicit DictLiteral(const TreeRef& tree) : Expr(tree) {
1170
+ tree_->match(TK_DICT_LITERAL);
1171
+ }
1172
+ List<Expr> key_inputs() const {
1173
+ return subtree(0);
1174
+ }
1175
+ List<Expr> value_inputs() const {
1176
+ return subtree(1);
1177
+ }
1178
+ static DictLiteral create(
1179
+ const SourceRange& range,
1180
+ const List<Expr>& keys,
1181
+ const List<Expr>& values) {
1182
+ return DictLiteral(
1183
+ Compound::create(TK_DICT_LITERAL, range, {keys, values}));
1184
+ }
1185
+ };
1186
+
1187
+ struct Starred : public Expr {
1188
+ explicit Starred(const TreeRef& tree) : Expr(tree) {
1189
+ tree_->match(TK_STARRED);
1190
+ }
1191
+ Expr expr() const {
1192
+ return Expr(subtree(0));
1193
+ }
1194
+ static Starred create(const SourceRange& range, const Expr& expr) {
1195
+ return Starred(Compound::create(TK_STARRED, range, {expr}));
1196
+ }
1197
+ };
1198
+
1199
+ struct Delete : public Stmt {
1200
+ explicit Delete(const TreeRef& tree) : Stmt(tree) {
1201
+ tree_->match(TK_DELETE);
1202
+ }
1203
+ List<Expr> targets() const {
1204
+ return subtree(0);
1205
+ }
1206
+ static Delete create(const SourceRange& range, const List<Expr>& targets) {
1207
+ return Delete(Compound::create(TK_DELETE, range, {targets}));
1208
+ }
1209
+ };
1210
+
1211
+ /*
1212
+ * NOTE: transforming PEP 604 union into equivalent union type
1213
+ *
1214
+ * NOTE: Union[int, float] parses into:
1215
+ * <EXPR> expr:(subscript
1216
+ * (variable (ident Union))
1217
+ * (list
1218
+ * (variable (ident int))
1219
+ * (variable (ident float))))
1220
+ * <KIND> subscript
1221
+ *
1222
+ * NOTE: (int | float) parses into:
1223
+ * <EXPR> expr:(|
1224
+ * (variable (ident int))
1225
+ * (variable (ident float)))
1226
+ * <KIND> |
1227
+ */
1228
+
1229
+ inline void _flatten_pep604_union(
1230
+ const torch::jit::Expr& node,
1231
+ std::vector<torch::jit::Expr>* result) {
1232
+ // flatten possibly nested union expressions like (int | (float | str))
1233
+ // into a flat list of expressions like [int, float, str]
1234
+ if (node.kind() == '|') {
1235
+ auto as_binop = torch::jit::BinOp(node);
1236
+ _flatten_pep604_union(as_binop.lhs(), result);
1237
+ _flatten_pep604_union(as_binop.rhs(), result);
1238
+ } else {
1239
+ result->push_back(node);
1240
+ }
1241
+ }
1242
+
1243
+ inline std::vector<Expr> get_pep604_union_members(const Expr& node) {
1244
+ std::vector<Expr> result;
1245
+ _flatten_pep604_union(node, &result);
1246
+ return result;
1247
+ }
1248
+
1249
+ // Flattens a PEP 604 union into a classical union.
1250
+ // For example, ((x | y) | z) is transformed into Union[x, y, z].
1251
+ inline Expr pep604union_to_union(const Expr& expr) {
1252
+ // noop if not a pep604 union
1253
+ if (expr.kind() != '|')
1254
+ return expr;
1255
+
1256
+ // In order to support unions with more than 2 operands ((x|y)|z), we need to
1257
+ // recursively flatten the tree of | expressions.
1258
+ auto members = get_pep604_union_members(expr);
1259
+ auto synthesised_union = Subscript::create(
1260
+ expr.range(),
1261
+ Var::create(expr.range(), Ident::create(expr.range(), "Union")),
1262
+ List<Expr>::create(expr.range(), members));
1263
+ return std::move(synthesised_union);
1264
+ }
1265
+
1266
+ } // namespace jit
1267
+ } // namespace torch
1268
+
1269
+ namespace std {
1270
+
1271
+ template <typename T>
1272
+ struct iterator_traits<torch::jit::ListIterator<T>>
1273
+ : std::iterator_traits<torch::jit::TreeList::const_iterator> {};
1274
+
1275
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <c10/util/flat_hash_map.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/type_hashing.h>
7
+ #include <torch/csrc/jit/passes/create_functional_graphs.h>
8
+ #include <torch/csrc/jit/passes/utils/memory_dag.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * Alias analysis pass.
15
+ *
16
+ * This pass produces an AliasDb that contains aliasing and mutation
17
+ * information about the graph. Users can use this information to determine
18
+ * whether mutations to the graph are safe, i.e. they don't reorder/change
19
+ * nodes in a way that affects output.
20
+ *
21
+ * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be
22
+ * associated with one or more "alias sets". If two values share an alias set,
23
+ * that means they may alias, implying that a mutation to one value cannot be
24
+ * reordered past a use of the other. Only reordering two reads of an alias set
25
+ * is considered safe.
26
+ *
27
+ * There is a special alias set called the "wildcard set", which indicates that
28
+ * we're not sure what this value may alias. To be conservative, we consider the
29
+ * wildcard alias set as potentially aliasing any other wildcard value within
30
+ * the same type class. Whenever a value becomes contained by another value,
31
+ * such as when a Tensor is appended to a List[Tensor], the contained element
32
+ * becomes part of the wildcard set.
33
+ *
34
+ * Values that contain other mutable types, such as List[Tensor], are
35
+ * initialized as containing the Wildcard set for all contained mutable types.
36
+ *
37
+ * The AliasDb API references the idea of "mutable" vs "immutable"
38
+ * types. "Mutable" means that the object's value can change, while
39
+ * "immutable" means that the value is fixed. (For example, `List` is
40
+ * mutable, so you can add and delete elements from it. On the other
41
+ * hand, you can't modify a Tuple once you create it, making `Tuple` an
42
+ * immutable container.)
43
+ *
44
+ * `isFrozen` - if the Module is frozen then consider attributes as freshly
45
+ * created objects. Freezing API invokes alias analysis to check if they are
46
+ * mutated internally.
47
+ *
48
+ * `descendFunctionCalls` - recursively analyze function and method calls
49
+ * instead of conservative analysis. Generally analysis should be done after
50
+ * inlining so the implmentation for recursive analysis is unoptimized.
51
+ */
52
+ class AliasDb {
53
+ public:
54
+ TORCH_API explicit AliasDb(
55
+ std::shared_ptr<Graph> graphi,
56
+ bool isFrozen = false,
57
+ bool descendFunctionCalls = false);
58
+ TORCH_API ~AliasDb();
59
+
60
+ // There are limitations to what effects the alias analysis can track. Two
61
+ // kinds of nodes may have untracked effects:
62
+ // 1. Nodes that write to a value that may alias the graph inputs (since
63
+ // the inputs can be used outside the graph).
64
+ // 2. Nodes that write to something in the wildcard set.
65
+ //
66
+ // These nodes are considered not safe to eliminate or mutate under any
67
+ // circumstances.
68
+ bool writesToWildcard(Node* n) const;
69
+
70
+ // Does `n` write to an alias of one of the values in `vs`?
71
+ // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks
72
+ TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const;
73
+
74
+ // Does `a` and `b` potentially share a memory location or do either
75
+ // hold in memory any element that exists in the other
76
+ TORCH_API bool mayContainAlias(Value* a, Value* b) const;
77
+
78
+ TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef<Value*> b) const;
79
+
80
+ // Do any values in group `a` share a memory location or hold in memory
81
+ // any element that exists in group `b`
82
+ TORCH_API bool mayContainAlias(
83
+ const at::ArrayRef<Value*> a,
84
+ const at::ArrayRef<Value*> b) const;
85
+
86
+ // Do `a` and `b` potentially share a memory location?
87
+ TORCH_API bool mayAlias(const Value* a, const Value* b) const;
88
+ // Do any values in group `a` potentially share a memory location with any
89
+ // value in group `b`? i.e. may they overlap?
90
+ TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const;
91
+
92
+ // Do any nodes write to an alias set input to `n`?
93
+ TORCH_API bool hasInputWriters(const Node* n) const;
94
+
95
+ // Do any nodes write to an alias set output by `n`?
96
+ TORCH_API bool hasOutputWriters(const Node* n) const;
97
+
98
+ // Do any nodes write to an alias set inputed/outputed by `n`?
99
+ TORCH_API bool hasWriters(const Node* n) const;
100
+
101
+ // Do any nodes write to `v`s memory location?
102
+ TORCH_API bool hasWriters(const Value* v) const;
103
+
104
+ // Is the operation in-place? i.e. doesn't write anywhere but locations it
105
+ // reads from.
106
+ TORCH_API bool isMutable(Node* n) const;
107
+
108
+ TORCH_API bool escapesScope(const at::ArrayRef<Value*>& vs) const;
109
+
110
+ // Is it safe to change whether `a` and `b` alias each other ?
111
+ TORCH_API bool safeToChangeAliasingRelationship(
112
+ const at::ArrayRef<Value*>& a,
113
+ const at::ArrayRef<Value*>& b) const;
114
+
115
+ // Move `n` (already in the graph) after `movePoint` in the topological order.
116
+ //
117
+ // Tries to preserve value dependencies, so other nodes might be moved. We
118
+ // make two guarantees about the postcondition of the node list:
119
+ // - `n` is directly after `movePoint`.
120
+ // - only nodes between `n` and `movePoint` have been moved.
121
+ //
122
+ // Returns `false` if it's impossible to move `n` after `MovePoint` without
123
+ // violating dependencies, otherwise executes the move and returns `true`
124
+ TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint);
125
+ TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint);
126
+
127
+ bool couldMoveAfterTopologically(Node* n, Node* movePoint);
128
+ bool couldMoveBeforeTopologically(Node* n, Node* movePoint);
129
+
130
+ // For debugging: print alias db state to stdout
131
+ TORCH_API void dump() const;
132
+ TORCH_API std::string toString() const;
133
+
134
+ // Generates a DOT (www.graphviz.org) graph representation
135
+ //
136
+ // Returns `true` if the output file was successfully generated
137
+ //
138
+ // WARNING: The output dot file path can't include shell specific notations,
139
+ // for example you can't use "~/temp/aliasdb.dot"
140
+ // (instead, use "/home/user/temp/aliasdb.dot")
141
+ //
142
+ TORCH_API bool dumpToGraphvizFile(const char* filename) const;
143
+ TORCH_API std::string toGraphviz() const;
144
+
145
+ // Returns `true` if the given element is mutable or if it is a
146
+ // container type with an internal mutable element (e.g.
147
+ // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so
148
+ // it would be considered a "mutable type" in AliasDb)
149
+ static bool isMutableType(const Value* v);
150
+ static bool isMutableType(const TypePtr& type);
151
+
152
+ /**
153
+ * Mutation API
154
+ *
155
+ * These methods allow you to update AliasDb in-place if you are performing
156
+ * graph mutation.
157
+ *
158
+ * WARNING: These methods should be considered INTERNAL. They do not perform
159
+ * very many correctness checks, the user is responsible for making sure they
160
+ * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with
161
+ * this.
162
+ */
163
+ // Copy `existing`s aliasing info to `new_value`, and remove `existing`.
164
+ TORCH_API void replaceWithNewValue(Value* existing, Value* new_value);
165
+ // Copy `from`s aliasing info to `to`.
166
+ TORCH_API void copyValue(Value* from, Value* to);
167
+ // Create a new `value` that does not alias anything else.
168
+ TORCH_API void createValue(const Value* value);
169
+
170
+ // Enable more precise treatment of prim::TupleConstruct.
171
+ void enablePreciseTupleContainerAnalysis();
172
+
173
+ friend struct MutationRemover;
174
+
175
+ private:
176
+ // Helper for topologically-safe node moves.
177
+ class WorkingSet;
178
+ enum class MoveSide { BEFORE, AFTER };
179
+ bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun);
180
+ void move(Node* toMove, Node* movePoint, MoveSide moveSide);
181
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
182
+
183
+ bool isMutableTypeInternal(const Value* v) const;
184
+ bool isMutableTypeInternal(const TypePtr& type) const;
185
+
186
+ /**
187
+ * Write and read internal API
188
+ */
189
+ // Get all the values that `n` writes to.
190
+ // NOTE: this only returns values directly written to, not aliases thereof
191
+ //
192
+ // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks
193
+ MemoryLocations getWrites(Node* n) const;
194
+ void getWritesImpl(Node* n, MemoryLocations& ret) const;
195
+ // Register the fact that `n` writes to `v`.
196
+ void registerWrite(const Value* v, Node* n, bool writeToContained = false);
197
+ // Get all the values that `n` reads from.
198
+ // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks
199
+ MemoryLocations getReads(Node* n) const;
200
+ void getReadsImpl(Node* n, MemoryLocations& ret) const;
201
+
202
+ /**
203
+ * Wildcard methods
204
+ */
205
+ // Register `v` as a wildcard value.
206
+ c10::optional<Element*> setWildcard(const Value* v);
207
+
208
+ // Is this a value which will not alias?
209
+ bool nonAliasingValue(const Value* elem) const;
210
+
211
+ /**
212
+ * Special analysis methods
213
+ */
214
+ void analyze(const std::shared_ptr<Graph>& graph);
215
+ void analyze(Block* block);
216
+ void analyze(Node* node);
217
+ void analyzeImpl(Node* node);
218
+ void analyzeIf(Node* node);
219
+ void analyzeLoop(Node* node);
220
+ void analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph);
221
+ void analyzeSubgraph(Node* node);
222
+ void analyzeCreator(Node* node);
223
+ void analyzeExtractor(Node* node);
224
+ void analyzeChunk(Node* node);
225
+ void analyzeBroadcastingChunk(Node* node);
226
+ void analyzeFork(Node* node);
227
+ void analyzeWait(Node* node);
228
+ void analyzeAwaitable(Node* node);
229
+ void analyzeAwaitableWait(Node* node);
230
+ void analyzeRpcAsync(Node* node);
231
+ void analyzeBatchNorm(Node* node);
232
+ void analyzeInstanceNorm(Node* node);
233
+ void analyzeGradOf(Node* node);
234
+ void analyzeSetAttr(Node* node);
235
+ void analyzeConservative(Node* node);
236
+ void analyzeContainerConstruct(Node* node);
237
+ bool tryRegisteredAnalysis(Node* node);
238
+
239
+ /**
240
+ * Alias manipulation methods
241
+ */
242
+ void makeAllAlias(const std::vector<Value*>& values);
243
+ void makePointerTo(const Value* value, const Value* to);
244
+ TORCH_API void addToContainedElements(
245
+ const Value* element,
246
+ const Value* container);
247
+ void mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from);
248
+ void giveFreshAlias(
249
+ const Value* value,
250
+ bool add_wildcard_to_contained_elems = true);
251
+ Element* getOrCreateElement(const Value* value);
252
+
253
+ const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const;
254
+ bool functionalNonEscapingListUse(const Use& use) const;
255
+ bool functionalNonEscapingTupleUse(const Use& use) const;
256
+
257
+ std::shared_ptr<Graph> graph_;
258
+
259
+ // If the Module is frozen then consider attributes as freshly created
260
+ // objects. Freezing API invokes alias analysis to check if they are mutated
261
+ // internally.
262
+ bool isFrozen_;
263
+
264
+ bool descend_function_calls_;
265
+ std::unordered_map<Graph*, std::vector<std::shared_ptr<Graph>>>
266
+ function_call_copies_;
267
+
268
+ // The points-to graph that stores aliasing relationships
269
+ std::unique_ptr<MemoryDAGBuilder> memoryDAGBuilder_;
270
+ std::unique_ptr<MemoryDAG> memoryDAG_;
271
+
272
+ // Mapping of values to MemoryDAG elements
273
+ ska::flat_hash_map<const Value*, Element*> elementMap_;
274
+ // All wildcard Elements (one for each unique mutable type)
275
+ ska::flat_hash_map<TypePtr, Element*, HashType, EqualType> wildcardIndex_;
276
+ Element* getWildcard(const TypePtr& type) const;
277
+ c10::optional<Element*> tryGetOrCreateWildcard(const TypePtr& type);
278
+ void addContainedTypesToFreshElement(
279
+ Element* container_elem,
280
+ const AliasTypeSet& mut_types);
281
+ void pointUnionTypeElementToAllContainedTypes(
282
+ Element* container_elem,
283
+ const AliasTypeSet& mut_types);
284
+
285
+ std::vector<Element*> getElements(at::ArrayRef<Value*> vs) const;
286
+ bool mayAliasWildcard(const Value* v) const;
287
+ bool mayAliasWildcard(const at::ArrayRef<Value*> vs) const;
288
+ bool hasWriters(const at::ArrayRef<Value*>& values) const;
289
+
290
+ // Cached mapping of type ptrs to their mutable types
291
+ mutable ska::flat_hash_map<TypePtr, AliasTypeSet> mapped_mutable_types_;
292
+
293
+ /**
294
+ * State for tracking write info.
295
+ */
296
+ // Write registry where the analysis can record the writes as it sees them.
297
+ // This information is later denormalized into various caches to improve query
298
+ // efficiency.
299
+ struct WriteRegistry;
300
+ std::unique_ptr<WriteRegistry> writeRegistry_;
301
+
302
+ // Map of nodes to the memory locations that they write to
303
+ using TWriteIndex = ska::flat_hash_map<Node*, MemoryLocations>;
304
+ c10::optional<TWriteIndex> writeIndex_;
305
+ // Collection of all memory locations that are written to.
306
+ c10::optional<MemoryLocations> writtenToLocationsIndex_;
307
+ void buildWrittenToLocationsIndex();
308
+
309
+ std::unordered_set<const Value*> wildcards_;
310
+
311
+ std::string getElementName(const Element* e) const;
312
+
313
+ friend void Lint(const AliasDb* db);
314
+ };
315
+
316
+ // Helper check that invariants over AliasDb are maintained.
317
+ // Useful if you are using the AliasDb mutation API and want to check you did
318
+ // the right thing.
319
+ TORCH_API void Lint(const AliasDb* db);
320
+
321
+ } // namespace jit
322
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Intrusive doubly linked lists with sane reverse iterators.
9
+ // The header file is named generic_graph_node_list.h because it is ONLY
10
+ // used for Graph's Node lists, and if you want to use it for other
11
+ // things, you will have to do some refactoring.
12
+ //
13
+ // At the moment, the templated type T must support a few operations:
14
+ //
15
+ // - It must have a field: T* next_in_graph[2] = { nullptr, nullptr };
16
+ // which are used for the intrusive linked list pointers.
17
+ //
18
+ // - It must have a method 'destroy()', which removes T from the
19
+ // list and frees a T.
20
+ //
21
+ // In practice, we are only using it with Node and const Node. 'destroy()'
22
+ // needs to be renegotiated if you want to use this somewhere else.
23
+ //
24
+ // Regardless of the iteration direction, iterators always physically point
25
+ // to the element they logically point to, rather than
26
+ // the off-by-one behavior for all standard library reverse iterators like
27
+ // std::list.
28
+
29
+ // The list is includes two sentinel nodes, one at the beginning and one at the
30
+ // end with a circular link between them. It is an error to insert nodes after
31
+ // the end sentinel node but before the beginning node:
32
+
33
+ // Visualization showing only the next() links:
34
+ // HEAD -> first -> second -> ... -> last -> TAIL
35
+ // ^------------------------------------------
36
+
37
+ // Visualization showing only the prev() links:
38
+ // HEAD <- first <- second <- ... <- last <- TAIL
39
+ // ------------------------------------------^
40
+
41
+ static constexpr int kNextDirection = 0;
42
+ static constexpr int kPrevDirection = 1;
43
+
44
+ template <typename T>
45
+ struct generic_graph_node_list;
46
+
47
+ template <typename T>
48
+ struct generic_graph_node_list_iterator;
49
+
50
+ struct Node;
51
+ using graph_node_list = generic_graph_node_list<Node>;
52
+ using const_graph_node_list = generic_graph_node_list<const Node>;
53
+ using graph_node_list_iterator = generic_graph_node_list_iterator<Node>;
54
+ using const_graph_node_list_iterator =
55
+ generic_graph_node_list_iterator<const Node>;
56
+
57
+ template <typename T>
58
+ struct generic_graph_node_list_iterator {
59
+ generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {}
60
+ generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {}
61
+ generic_graph_node_list_iterator(
62
+ const generic_graph_node_list_iterator& rhs) = default;
63
+ generic_graph_node_list_iterator(
64
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
65
+ generic_graph_node_list_iterator& operator=(
66
+ const generic_graph_node_list_iterator& rhs) = default;
67
+ generic_graph_node_list_iterator& operator=(
68
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
69
+ T* operator*() const {
70
+ return cur;
71
+ }
72
+ T* operator->() const {
73
+ return cur;
74
+ }
75
+ generic_graph_node_list_iterator& operator++() {
76
+ AT_ASSERT(cur);
77
+ cur = cur->next_in_graph[d];
78
+ return *this;
79
+ }
80
+ generic_graph_node_list_iterator operator++(int) {
81
+ generic_graph_node_list_iterator old = *this;
82
+ ++(*this);
83
+ return old;
84
+ }
85
+ generic_graph_node_list_iterator& operator--() {
86
+ AT_ASSERT(cur);
87
+ cur = cur->next_in_graph[reverseDir()];
88
+ return *this;
89
+ }
90
+ generic_graph_node_list_iterator operator--(int) {
91
+ generic_graph_node_list_iterator old = *this;
92
+ --(*this);
93
+ return old;
94
+ }
95
+
96
+ // erase cur without invalidating this iterator
97
+ // named differently from destroy so that ->/. bugs do not
98
+ // silently cause the wrong one to be called.
99
+ // iterator will point to the previous entry after call
100
+ void destroyCurrent() {
101
+ T* n = cur;
102
+ cur = cur->next_in_graph[reverseDir()];
103
+ n->destroy();
104
+ }
105
+ generic_graph_node_list_iterator reverse() {
106
+ return generic_graph_node_list_iterator(cur, reverseDir());
107
+ }
108
+
109
+ private:
110
+ int reverseDir() {
111
+ return d == kNextDirection ? kPrevDirection : kNextDirection;
112
+ }
113
+ T* cur;
114
+ int d; // direction 0 is forward 1 is reverse, see next_in_graph
115
+ };
116
+
117
+ template <typename T>
118
+ struct generic_graph_node_list {
119
+ using iterator = generic_graph_node_list_iterator<T>;
120
+ using const_iterator = generic_graph_node_list_iterator<const T>;
121
+ generic_graph_node_list_iterator<T> begin() {
122
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[d], d);
123
+ }
124
+ generic_graph_node_list_iterator<const T> begin() const {
125
+ return generic_graph_node_list_iterator<const T>(head->next_in_graph[d], d);
126
+ }
127
+ generic_graph_node_list_iterator<T> end() {
128
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[!d], d);
129
+ }
130
+ generic_graph_node_list_iterator<const T> end() const {
131
+ return generic_graph_node_list_iterator<const T>(
132
+ head->next_in_graph[!d], d);
133
+ }
134
+ generic_graph_node_list_iterator<T> rbegin() {
135
+ return reverse().begin();
136
+ }
137
+ generic_graph_node_list_iterator<const T> rbegin() const {
138
+ return reverse().begin();
139
+ }
140
+ generic_graph_node_list_iterator<T> rend() {
141
+ return reverse().end();
142
+ }
143
+ generic_graph_node_list_iterator<const T> rend() const {
144
+ return reverse().end();
145
+ }
146
+ generic_graph_node_list reverse() {
147
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
148
+ }
149
+ const generic_graph_node_list reverse() const {
150
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
151
+ }
152
+ T* front() {
153
+ return head->next_in_graph[d];
154
+ }
155
+ const T* front() const {
156
+ return head->next_in_graph[d];
157
+ }
158
+ T* back() {
159
+ return head->next_in_graph[!d];
160
+ }
161
+ const T* back() const {
162
+ return head->next_in_graph[!d];
163
+ }
164
+ generic_graph_node_list(T* head, int d) : head(head), d(d) {}
165
+
166
+ private:
167
+ T* head; // both head and tail are sentinel nodes
168
+ // the first real node is head->next_in_graph[d]
169
+ // the tail sentinel is head->next_in_graph[!d]
170
+ int d;
171
+ };
172
+
173
+ template <typename T>
174
+ static inline bool operator==(
175
+ generic_graph_node_list_iterator<T> a,
176
+ generic_graph_node_list_iterator<T> b) {
177
+ return *a == *b;
178
+ }
179
+
180
+ template <typename T>
181
+ static inline bool operator!=(
182
+ generic_graph_node_list_iterator<T> a,
183
+ generic_graph_node_list_iterator<T> b) {
184
+ return *a != *b;
185
+ }
186
+
187
+ } // namespace jit
188
+ } // namespace torch
189
+
190
+ namespace std {
191
+
192
+ template <typename T>
193
+ struct iterator_traits<torch::jit::generic_graph_node_list_iterator<T>> {
194
+ using difference_type = int64_t;
195
+ using value_type = T*;
196
+ using pointer = T**;
197
+ using reference = T*&;
198
+ using iterator_category = bidirectional_iterator_tag;
199
+ };
200
+
201
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h ADDED
@@ -0,0 +1,1841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/attributes.h>
4
+ #include <torch/csrc/jit/ir/graph_node_list.h>
5
+ #include <torch/csrc/jit/ir/named_value.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+ #include <torch/csrc/jit/runtime/operator.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/schema_info.h>
12
+
13
+ #include <ATen/Utils.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/core/dynamic_type.h>
16
+ #include <ATen/core/enum_type.h>
17
+ #include <ATen/core/functional.h>
18
+ #include <ATen/core/interned_strings.h>
19
+ #include <ATen/core/ivalue.h>
20
+ #include <ATen/core/jit_type.h>
21
+ #include <c10/util/ArrayRef.h>
22
+ #include <c10/util/Exception.h>
23
+ #include <c10/util/Optional.h>
24
+
25
+ #include <functional>
26
+ #include <iosfwd>
27
+ #include <unordered_set>
28
+ #include <vector>
29
+
30
+ // Forward declare, the real meat is in python_ir.cpp
31
+ template <class T>
32
+ class THPPointer;
33
+ using THPObjectPtr = THPPointer<PyObject>;
34
+ using pyobj_list = std::vector<THPObjectPtr>;
35
+
36
+ namespace torch {
37
+ namespace jit {
38
+ namespace utils {
39
+ TORCH_API std::string getNodesModuleHierarchy(const Node& n);
40
+ } // namespace utils
41
+ class AliasDb;
42
+
43
+ using ::c10::Argument;
44
+ using ::c10::FunctionSchema;
45
+ using ::c10::Symbol;
46
+
47
+ using ::c10::ivalue::Shared;
48
+
49
+ using ::c10::IValue;
50
+ using ::c10::ivalue::Future;
51
+
52
+ using ::c10::ivalue::ConstantString;
53
+
54
+ #define C10_USING(T) using ::c10::T;
55
+ C10_FORALL_TYPES(C10_USING)
56
+ #undef C10_USING
57
+
58
+ #define C10_USING(T) using ::c10::T##Ptr;
59
+ C10_FORALL_TYPES(C10_USING)
60
+ #undef C10_USING
61
+
62
+ using ::c10::Type;
63
+ using ::c10::TypeEnv;
64
+ using ::c10::TypePtr;
65
+
66
+ using ::c10::getTypePtr;
67
+ using ::c10::MatchTypeReturn;
68
+ using ::c10::TypeKind;
69
+
70
+ using ::c10::fmap;
71
+
72
+ namespace prim {
73
+ using namespace ::c10::prim;
74
+ }
75
+ namespace attr {
76
+ using namespace ::c10::attr;
77
+ }
78
+ namespace aten {
79
+ using namespace ::c10::aten;
80
+ }
81
+ namespace cuda {
82
+ #if !defined(USE_ROCM)
83
+ using namespace ::c10::cuda;
84
+ #endif
85
+ } // namespace cuda
86
+
87
+ struct Function;
88
+ struct GraphFunction;
89
+ struct MatchedSchema;
90
+
91
+ // A Graph represents one "function" of computation.
92
+ // It uses a simple ownership model where the graph owns all the nodes inside
93
+ // it. All references inside the graph are raw pointers. Destroying the Graph
94
+ // will invalidate any pointers to nodes in the graph.
95
+ struct Graph;
96
+
97
+ // Node is the base class of the IR graph. It represents one computation
98
+ // and dependencies on a list of Values. The "prim-ops", so to speak.
99
+ struct Node;
100
+
101
+ // A Value represents an input or output to node that is either a
102
+ // Tensor or an opaque Handle object, as determined by type().
103
+ struct Value;
104
+
105
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
106
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n);
107
+
108
+ // A list of nodes, with inputs and outputs
109
+ struct Block;
110
+
111
+ // Each use is represented by this type, see 'Node::uses()'
112
+ // 'user' is the consumer of the value, 'offset' is the index into
113
+ // 'user's input this where the producers will be found.
114
+ struct Use {
115
+ Use(Node* user, size_t offset) : user(user), offset(offset) {}
116
+ Node* user;
117
+ size_t offset;
118
+
119
+ bool operator==(const Use& b) {
120
+ return user == b.user && offset == b.offset;
121
+ }
122
+ };
123
+
124
+ // Note [User node does not uniquely identify use]
125
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
126
+ // A while back, we wrote some code manipulating uses that looked like this:
127
+ //
128
+ // for (auto& use : used_val->uses_) {
129
+ // if (use.user == this_node) {
130
+ // use.offset += 1;
131
+ // break;
132
+ // }
133
+ // }
134
+ //
135
+ // This code is trying to find a particular use (our node's use) to update it.
136
+ // However, it's wrong: there may be *multiple* uses of a value %x in a node,
137
+ // as might be the case in this IR:
138
+ //
139
+ // %y = Add %x %x
140
+ //
141
+ // In this case, there are two uses of %x whose user is the node 'Add %x %x'.
142
+ // So, "use induced by this node" is not a well-formed concept.
143
+ //
144
+ // If you are looking for "use induced by an input", it's best to use
145
+ // findUseForInput() to get it.
146
+
147
+ // the list types are intentionally simple, but we type-def
148
+ // them here so if we need to change them, refactoring will be easier
149
+ using node_list = std::vector<Node*>;
150
+ using value_list = std::vector<Value*>;
151
+ using use_list = std::vector<Use>;
152
+ template <typename T>
153
+ using ArrayRef = at::ArrayRef<T>;
154
+ using NodeKind = Symbol;
155
+ using topo_position_t = int64_t;
156
+ using ValueSet = std::unordered_set<const Value*>;
157
+
158
+ struct OperatorSet;
159
+ template <typename T>
160
+ struct OperatorMap;
161
+
162
+ // This is a wrapper to allow invalidating the Python object
163
+ // safely when the C++ object for a Node/Value/Block is deleted
164
+ // like much of graph, it isn't safe for different threads to
165
+ // access the same graph
166
+ template <typename T>
167
+ struct Wrap {
168
+ explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {}
169
+ void clear() {
170
+ if (clear_cb) {
171
+ clear_cb(elem);
172
+ }
173
+ elem = nullptr;
174
+ }
175
+ T* elem;
176
+ void (*clear_cb)(void*);
177
+ };
178
+
179
+ struct Value {
180
+ AT_DISALLOW_COPY_AND_ASSIGN(Value);
181
+ Value(Node* node_, size_t offset_);
182
+
183
+ private:
184
+ friend struct Node;
185
+ friend struct Graph;
186
+ Node* node_;
187
+ size_t offset_;
188
+ size_t unique_ = 0; // unique id
189
+ use_list uses_;
190
+ std::string unique_name_;
191
+ TypePtr type_;
192
+ // a managing wrapper for Python to allow invalidation
193
+ std::shared_ptr<Wrap<Value>> wrap_;
194
+
195
+ public:
196
+ Value* setType(TypePtr type);
197
+ TORCH_API void inferTypeFrom(const at::Tensor& output);
198
+ TORCH_API void inferTypeFrom(
199
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
200
+ const TypePtr& type() const {
201
+ AT_ASSERT(type_ != nullptr);
202
+ return type_;
203
+ }
204
+ bool requires_grad() const {
205
+ return type()->requires_grad();
206
+ }
207
+ bool isCompleteTensor() const {
208
+ if (auto pt = type()->cast<TensorType>()) {
209
+ return pt->isComplete();
210
+ }
211
+ return false;
212
+ }
213
+ TORCH_API bool mustBeNone() const;
214
+ TORCH_API bool mustNotBeNone() const;
215
+ size_t unique() const {
216
+ return unique_;
217
+ }
218
+ bool hasDebugName() const {
219
+ return !unique_name_.empty();
220
+ }
221
+ static bool isValidName(const std::string& name);
222
+ TORCH_API Value* setDebugName(const std::string& name);
223
+ std::string debugName() const {
224
+ if (hasDebugName()) {
225
+ return unique_name_;
226
+ }
227
+ return c10::to_string(unique());
228
+ }
229
+ TORCH_API std::string debugNameBase() const;
230
+ Node* node() {
231
+ return node_;
232
+ }
233
+ size_t offset() const {
234
+ return offset_;
235
+ }
236
+ void setOffset(size_t offset) {
237
+ offset_ = offset;
238
+ }
239
+ const Node* node() const {
240
+ return node_;
241
+ }
242
+
243
+ /**
244
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
245
+ * Check #87343 for details.
246
+ */
247
+ Graph* owningGraph();
248
+ const Graph* owningGraph() const;
249
+ // TODO: make this more const correct
250
+ const use_list& uses() const {
251
+ return uses_;
252
+ }
253
+
254
+ bool hasUses() const {
255
+ return !uses().empty();
256
+ }
257
+
258
+ TORCH_API void replaceFirstUseWith(Value* newValue);
259
+
260
+ // Replaces all uses of this value with 'newValue'.
261
+ //
262
+ // Given: %3 = f(%1, %2)
263
+ // %4 = g(%3)
264
+ // %5 = h(%3, %3)
265
+ // Execute: %3.replaceAllUsesWith(%6)
266
+ // Result: %3 = f(%1, %2)
267
+ // %4 = g(%6)
268
+ // %5 = h(%6, %6)
269
+ TORCH_API void replaceAllUsesWith(Value* newValue);
270
+
271
+ // Replaces all uses of this value with 'newValue' after 'node'.
272
+ // Given: %3 = f(%1, %2)
273
+ // %4 = g(%3)
274
+ // %5 = inplace_(%3)
275
+ // %6 = h(%3, %3)
276
+ // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5)
277
+ // Result: %3 = f(%1, %2)
278
+ // %4 = g(%3)
279
+ // %5 = inplace_(%3)
280
+ // %6 = h(%5, %5)
281
+ // XXX: does not check scoping legality, consider using
282
+ // replaceAllUsesDominatedByNodeWith
283
+ TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue);
284
+
285
+ // Replaces all uses of this value with 'newValue' that are dominated by
286
+ // 'node'. Given:
287
+ // x = op(...).
288
+ // if cond:
289
+ // z = foo(..)
290
+ // bar(x)
291
+ // else:
292
+ // print(x)
293
+ // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x)
294
+ // but not print(x) because print is not dominated by foo.
295
+ // replaceAllUsesAfterNode does not check domination, so in this example
296
+ // it would produce invalid IR.
297
+ TORCH_API void replaceAllUsesDominatedByNodeWith(
298
+ const Node* node,
299
+ Value* newValue);
300
+
301
+ TORCH_API Value* copyMetadata(Value* from);
302
+
303
+ TORCH_API std::shared_ptr<Wrap<Value>> wrap() {
304
+ if (!wrap_) {
305
+ wrap_ = std::make_shared<Wrap<Value>>(this);
306
+ }
307
+ return wrap_;
308
+ }
309
+
310
+ virtual ~Value() {
311
+ if (wrap_) {
312
+ wrap_->clear();
313
+ }
314
+ }
315
+ };
316
+
317
+ struct TORCH_API Node {
318
+ AT_DISALLOW_COPY_AND_ASSIGN(Node);
319
+ friend struct Graph;
320
+ friend struct Block;
321
+ friend struct Value;
322
+ friend graph_node_list;
323
+ friend const_graph_node_list;
324
+ friend graph_node_list_iterator;
325
+ friend const_graph_node_list_iterator;
326
+
327
+ private:
328
+ const NodeKind kind_;
329
+ std::vector<Value*> inputs_;
330
+ std::vector<Value*> outputs_;
331
+ // subblocks
332
+ std::vector<Block*> blocks_;
333
+ Graph* graph_;
334
+ Block* owning_block_;
335
+ c10::optional<SourceRange> source_range_;
336
+ ScopePtr scope_;
337
+ c10::optional<InlinedCallStackPtr> callstack_;
338
+ // Assumes FunctionSchemas are persistent, so we don't manage their lifetime.
339
+ // This field is effective a cache that's populated on attribute lookups and
340
+ // invalidated every time we perform an operation that could potentially
341
+ // change the schema. note: mutable because schema_ is effectively a cache
342
+ mutable const Operator* op_;
343
+ topo_position_t topo_position_ = 0;
344
+ // a managing wrapper for Python to allow invalidation
345
+ std::shared_ptr<Wrap<Node>> wrap_;
346
+ // Stores the full schema name, if the operator is historic
347
+ // When the operator is deprecated or the name of the operator
348
+ // is changed, we need to rely on this name
349
+ // to retrieve old schemas to successfully apply upgraders
350
+ // for this operator.
351
+ c10::optional<std::string> historic_schema_name_ = c10::nullopt;
352
+
353
+ protected:
354
+ Node(Graph* graph_, NodeKind kind_); // defined after graph
355
+ public:
356
+ // Each Node but Return/Param Nodes are associated with exactly one
357
+ // place in the Node list of the Graph. The Graph itself is a circular
358
+ // doubly-linked list. The Return Node is used as the sentinel for the
359
+ // "beginning"/"end" of the list. This means that you can tell when
360
+ // you've traversed the entire list without means worrying about null
361
+ // pointers. `next_in_graph[0]` is the pointer to the next Node, while
362
+ // `next_in_graph[1]` is the pointer to the previous Node. The
363
+ // linked list is implemented as an array to allow the same iterator
364
+ // class for forward and reversed Node lists. Taken together, this
365
+ // list also represents a topological sort of the Nodes in the Graph.
366
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays)
367
+ Node* next_in_graph[2] = {nullptr, nullptr};
368
+
369
+ std::shared_ptr<Wrap<Node>> wrap() {
370
+ if (!wrap_) {
371
+ wrap_ = std::make_shared<Wrap<Node>>(this);
372
+ }
373
+ return wrap_;
374
+ }
375
+
376
+ const c10::optional<std::string> getHistoricSchemaName() {
377
+ return historic_schema_name_;
378
+ }
379
+
380
+ void setHistoricSchemaName(const std::string& name) {
381
+ historic_schema_name_ = name;
382
+ }
383
+
384
+ Node*& next() {
385
+ return next_in_graph[kNextDirection];
386
+ }
387
+ Node*& prev() {
388
+ return next_in_graph[kPrevDirection];
389
+ }
390
+ Node* const& next() const {
391
+ return next_in_graph[kNextDirection];
392
+ }
393
+ Node* const& prev() const {
394
+ return next_in_graph[kPrevDirection];
395
+ }
396
+
397
+ NodeKind kind() const {
398
+ return kind_;
399
+ }
400
+ Node* setSourceRange(SourceRange r) {
401
+ source_range_ = std::move(r);
402
+ return this;
403
+ }
404
+ SourceRange sourceRange() const;
405
+
406
+ /**
407
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
408
+ * Check #87343 for details.
409
+ */
410
+ Graph* owningGraph() {
411
+ return graph_;
412
+ }
413
+ const Graph* owningGraph() const {
414
+ return graph_;
415
+ }
416
+ Block* owningBlock() {
417
+ return owning_block_;
418
+ }
419
+ const Block* owningBlock() const {
420
+ return owning_block_;
421
+ }
422
+ ScopePtr scope() {
423
+ return scope_;
424
+ }
425
+ void setScope(ScopePtr scope) {
426
+ scope_ = std::move(scope);
427
+ }
428
+ std::string scopeName() const {
429
+ if (!scope_) {
430
+ return "";
431
+ }
432
+ return scope_->namesFromRoot();
433
+ }
434
+
435
+ // Copies the source range, scope and callstack from another node.
436
+ Node* copyMetadata(Node* from) {
437
+ this->setSourceRange(from->sourceRange());
438
+ this->setScope(from->scope());
439
+ if (auto cs = from->callstack()) {
440
+ this->setCallStack(*cs);
441
+ }
442
+ return this;
443
+ }
444
+
445
+ c10::optional<InlinedCallStackPtr> callstack() const {
446
+ return callstack_;
447
+ }
448
+ void setCallStack(InlinedCallStackPtr cs) {
449
+ callstack_ = std::move(cs);
450
+ }
451
+
452
+ // NB: This returns an ArrayRef; that means that it will
453
+ // get invalidated if you resize inputs (e.g., using addInput)
454
+ // We can't return a std::vector<Node*>& because there's no
455
+ // way to soundly cast to std::vector<const Node*> (an insane
456
+ // implementation of std::vector could make this representationally
457
+ // different.)
458
+ at::ArrayRef<Value*> inputs() {
459
+ return inputs_;
460
+ }
461
+ at::ArrayRef<const Value*> inputs() const {
462
+ // Vectors are not convertible in const-ness of elements, but
463
+ // raw pointers are.
464
+ return {inputs_.data(), inputs_.size()};
465
+ }
466
+ // NB: This returns an ArrayRef; that means that it will
467
+ // get invalidated if you resize inputs (e.g., using addInput)
468
+ // We can't return a std::vector<Node*>& because there's no
469
+ // way to soundly cast to std::vector<const Node*> (an insane
470
+ // implementation of std::vector could make this representationally
471
+ // different.)
472
+ at::ArrayRef<Value*> outputs() {
473
+ return outputs_;
474
+ }
475
+ at::ArrayRef<const Value*> outputs() const {
476
+ // Vectors are not convertible in const-ness of elements, but
477
+ // raw pointers are.
478
+ return {outputs_.data(), outputs_.size()};
479
+ }
480
+ Value* output(size_t i) const {
481
+ return outputs_.at(i);
482
+ }
483
+ bool hasUses() const {
484
+ for (auto o : outputs()) {
485
+ if (!o->uses().empty()) {
486
+ return true;
487
+ }
488
+ }
489
+ return false;
490
+ }
491
+
492
+ void replaceAllUsesWith(Node* n);
493
+
494
+ // replaces `this` with a new node with the same inputs and outputs
495
+ // but a new node symbol. does not destroy `this`
496
+ Node* replaceWithNewSymbol(Symbol new_symbol);
497
+
498
+ // Checks if this node is dominated by `dominator` which means that
499
+ // `dominator` will always be executed before `this` and `dominator`
500
+ // is in scope of `this.
501
+ bool isDominatedBy(const Node* dominator) const;
502
+
503
+ // lots of things like chunk have a single input or single output, so we have
504
+ // a helper to make accessing it easier
505
+ Value* input() {
506
+ AT_ASSERT(inputs_.size() == 1);
507
+ return inputs_.at(0);
508
+ }
509
+ Value* output() {
510
+ AT_ASSERT(outputs_.size() == 1);
511
+ return outputs_.at(0);
512
+ }
513
+ const Value* output() const {
514
+ AT_ASSERT(outputs_.size() == 1);
515
+ return outputs_.at(0);
516
+ }
517
+ const Value* input() const {
518
+ AT_ASSERT(inputs_.size() == 1);
519
+ return inputs_.at(0);
520
+ }
521
+ // Access a particular input. This is a checked index.
522
+ Value* input(size_t i) const {
523
+ return inputs_.at(i);
524
+ }
525
+
526
+ bool hasNamedInput(const std::string& unqualName) const;
527
+ Value* namedInput(const std::string& unqualName) const;
528
+ Value* namedInput(Symbol name) const;
529
+
530
+ c10::optional<IValue> get(Symbol name) const;
531
+
532
+ template <typename T>
533
+ c10::optional<T> get(Symbol name) const {
534
+ if (auto v = get(name)) {
535
+ return v->template to<T>();
536
+ }
537
+ return c10::nullopt;
538
+ }
539
+
540
+ // Returns true if the value of input name is statically known
541
+ bool is_constant(Symbol name) const {
542
+ return static_cast<bool>(get(name));
543
+ }
544
+ bool mustBeNone() const;
545
+
546
+ bool isNondeterministic() const;
547
+ bool hasSideEffects() const;
548
+
549
+ // instructions lowered by the interpreter and not run in the optimized graph
550
+ bool notExecutedOp() const {
551
+ return kind_ == prim::Constant || kind_ == prim::profile ||
552
+ kind_ == prim::profile_ivalue;
553
+ }
554
+
555
+ // Graphs
556
+
557
+ // Note [Topological invariant]
558
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
559
+ // We always maintain an up-to-date topological ordering of all nodes via
560
+ // the next()/prev() links. All transformations to graphs must preserve
561
+ // this topological ordering: for example, it is only valid to 'addInput'
562
+ // with an input which is topologically before the current node.
563
+ //
564
+ // Usually, it is obvious whether or not topological order is maintained;
565
+ // for example, if you are adding nodes to the end of the topsort, it's
566
+ // impossible for them to refer to inputs that are not in the topsort.
567
+ // If it is not obvious, please comment accordingly.
568
+
569
+ // Add 'node' as an input to 'this' at the end of existing
570
+ // arguments. Returns the added node for ease of chaining.
571
+ //
572
+ // Given: %3 = f(%1, %2)
573
+ // Execute: %3.addInput(%4)
574
+ // Result: %3 = f(%1, %2, %4)
575
+ Value* addInput(Value* value);
576
+
577
+ // Add 'value' as an input to 'this' at the specified position in the
578
+ // arguments. Returns the added value for ease of chaining.
579
+ Value* insertInput(size_t i, Value* value);
580
+
581
+ // Replace the input of 'this' at position 'i' with
582
+ // 'newValue', returning the old node.
583
+ //
584
+ // Given: %3 = f(%1, %2)
585
+ // Execute: %3.replaceInput(1, %4)
586
+ // Result: %3 = f(%1, %4)
587
+ Value* replaceInput(size_t i, Value* newValue);
588
+
589
+ // Replace all occurrences of 'from' in the inputs of this
590
+ // node with 'to'. Corresponds to llvm's replaceUsesOfWith.
591
+ //
592
+ // Given: %3 = f(%1, %2, %1)
593
+ // Execute: %3.replaceInputWith(%1, %4)
594
+ // Result: %3 = f(%4, %2, %4)
595
+ void replaceInputWith(Value* from, Value* to);
596
+
597
+ Value* addOutput();
598
+
599
+ Value* insertOutput(size_t i);
600
+
601
+ void eraseOutput(size_t i);
602
+
603
+ Block* addBlock();
604
+ void eraseBlock(size_t i);
605
+
606
+ // Each Node can have a list of subblocks. These are used to define structured
607
+ // nested control flow operators such as If and Loop.
608
+ // The meaning of a block is specific to the kind of node it is in, but
609
+ // all blocks share these semantics:
610
+ // * Nested lexical scoping: If a node 'Parent' has a subblock which contains
611
+ // a node 'Child', Child can use any value that was in scope for the Parent
612
+ // node in addition to any values defined before 'Child' in the subblock.
613
+ // * The list of inputs to the block are in scope for the duration of the
614
+ // block
615
+ // * the outputs of the Parent node are not in scope for the subblocks
616
+ // Typically the inputs to a block that represents control flow act as
617
+ // as the equivalents phi-nodes in standard SSA form,
618
+ // defining a new Value to represent any term that has multiple
619
+ // definitions depending on how control flowed. Outputs of the node containing
620
+ // control flow serve a similiar purpose defining new values for variables
621
+ // that would have different definitions depending on which way control
622
+ // flowed.
623
+
624
+ at::ArrayRef<Block*> blocks() {
625
+ return blocks_;
626
+ }
627
+ at::ArrayRef<const Block*> blocks() const {
628
+ // Vectors are not convertible in const-ness of elements, but
629
+ // raw pointers are.
630
+ return {blocks_.data(), blocks_.size()};
631
+ }
632
+
633
+ // Is 'this' before 'n' in the topological order?
634
+ bool isBefore(const Node* n) const;
635
+
636
+ // Is 'this' after 'n' in the topological order?
637
+ bool isAfter(const Node* n) const;
638
+
639
+ // Insert unattached 'this' node before 'n' in the topological order.
640
+ // Returns this (for chaining).
641
+ //
642
+ // Given: %3 = f(%1, %2)
643
+ // %4 = g(%3)
644
+ // and unattached: %5 = h(%1)
645
+ // Execute: %5.insertBefore(%4)
646
+ // Result: %3 = f(%1, %2)
647
+ // %5 = h(%1)
648
+ // %4 = g(%3)
649
+ Node* insertBefore(Node* n);
650
+
651
+ // Insert unattached 'this' node after 'n' in the topological order.
652
+ // Returns this (for chaining).
653
+ //
654
+ // Given: %3 = f(%1, %2)
655
+ // %4 = g(%3)
656
+ // and unattached: %5 = h(%1)
657
+ // Execute: %5.insertAfter(%4)
658
+ // Result: %3 = f(%1, %2)
659
+ // %4 = g(%3)
660
+ // %5 = h(%1)
661
+ Node* insertAfter(Node* n);
662
+
663
+ // Move 'this' (already in the graph) after 'n' in the topological order.
664
+ //
665
+ // NOTE: Does not check that value dependencies are preserved, see
666
+ // AliasDb::moveAfterTopologicallyValid
667
+ //
668
+ // Given: %2 = f(%1)
669
+ // %3 = g(%1)
670
+ // Execute: %2.moveAfter(%3)
671
+ // Result: %3 = g(%1)
672
+ // %2 = f(%1)
673
+ //
674
+ void moveAfter(Node* n);
675
+
676
+ // Move a node 'n' (already in the graph) before 'this' in the topological
677
+ // order.
678
+ //
679
+ // NOTE: Does not check that value dependencies are preserved, see
680
+ // AliasDb::moveBeforeTopologicallyValid
681
+ //
682
+ // Given: %2 = f(%1)
683
+ // %3 = g(%1)
684
+ // Execute: %3.moveBefore(%2)
685
+ // Result: %3 = g(%1)
686
+ // %2 = f(%1)
687
+ void moveBefore(Node* n);
688
+
689
+ // Remove the input at 'i' from this node.
690
+ //
691
+ // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling
692
+ // removeInput.
693
+ //
694
+ // Given: %3 = f(%1, %2)
695
+ // Execute: %3.removeInput(1)
696
+ // Result: %3 = f(%1)
697
+ void removeInput(size_t i);
698
+
699
+ // Remove all inputs from a node.
700
+ //
701
+ // Given: %3 = f(%1, %2)
702
+ // Execute: %3.removeAllInputs()
703
+ // Result: %3 = f()
704
+ void removeAllInputs();
705
+
706
+ // Remove all outputs from a node.
707
+ //
708
+ // Given: %1, %2 = f()
709
+ // Execute:removeAllInputs()
710
+ // Result: = f()
711
+ void removeAllOutputs();
712
+
713
+ // Rearrange the ordering of inputs or outputs of a node
714
+ // Given: %3 = f(%1, %2)
715
+ // Execute: %3.permuteInputs({1, 0})
716
+ // Result: %3 = f(%2, %1)
717
+ // Each index must appear exactly once
718
+ void permuteInputs(const std::vector<size_t>& new_inputs);
719
+ void permuteOutputs(const std::vector<size_t>& new_inputs);
720
+
721
+ // iterators of the node list starting at this node
722
+ // useful for resuming a search starting at this node
723
+ inline graph_node_list_iterator iterator() {
724
+ return {this, 0};
725
+ }
726
+ inline graph_node_list_iterator reverseIterator() {
727
+ return iterator().reverse();
728
+ }
729
+ inline const_graph_node_list_iterator iterator() const {
730
+ return {this, 0};
731
+ }
732
+ inline const_graph_node_list_iterator reverseIterator() const {
733
+ return iterator().reverse();
734
+ }
735
+
736
+ // Remove 'this' from the instruction list and deallocate it.
737
+ //
738
+ // Invariant: no outputs of 'this' may have any uses.
739
+ //
740
+ // Given: %2 = f(%1)
741
+ // %3 = g(%1)
742
+ // Execute: %2.destroy()
743
+ // Result: %3 = g(%1)
744
+ void destroy();
745
+
746
+ // Dynamically cast this node to the subclass indicated by the
747
+ // template variable, returning nullptr if the cast is invalid..
748
+ //
749
+ // Example usage: if(auto s = n.cast<Select>()) { ... }
750
+ template <typename T>
751
+ T* cast() {
752
+ if (T::Kind == kind()) {
753
+ return static_cast<T*>(this);
754
+ }
755
+ return nullptr;
756
+ }
757
+ template <typename T>
758
+ const T* cast() const {
759
+ if (T::Kind == kind()) {
760
+ return static_cast<const T*>(this);
761
+ }
762
+ return nullptr;
763
+ }
764
+
765
+ template <typename T>
766
+ T* expect() {
767
+ TORCH_CHECK(
768
+ T::Kind == kind(),
769
+ "expected a ",
770
+ T::Kind.toDisplayString(),
771
+ " but found a ",
772
+ kind().toDisplayString());
773
+ return static_cast<T*>(this);
774
+ }
775
+
776
+ bool matches(const FunctionSchema& schema) const;
777
+
778
+ // XXX: this function is meant to be used with string literals only!
779
+ bool matches(
780
+ const char* signature_literal,
781
+ at::ArrayRef<Symbol> const_inputs = {}) const;
782
+
783
+ bool isMemberOf(const OperatorSet& os) const;
784
+ template <typename T>
785
+ bool isMemberOf(const OperatorMap<T>& om) const {
786
+ auto it = om.map.find(kind());
787
+ if (it == om.map.end()) {
788
+ return false;
789
+ }
790
+ for (auto& op : it->second) {
791
+ if (matches(op.first->schema())) {
792
+ return true;
793
+ }
794
+ }
795
+ return false;
796
+ }
797
+
798
+ const FunctionSchema& schema() const;
799
+ const FunctionSchema* maybeSchema() const;
800
+ const Operator& getOperator() const;
801
+ Operation getOperation() const;
802
+
803
+ const Operator* maybeOperator() const;
804
+
805
+ void dump() const;
806
+
807
+ std::ostream& print(
808
+ std::ostream& out,
809
+ size_t level,
810
+ std::vector<const Node*>* groups,
811
+ bool print_source_locations = true,
812
+ bool print_attributes = true,
813
+ bool print_scopes = true,
814
+ bool print_body = true) const;
815
+
816
+ virtual ~Node() {
817
+ if (wrap_) {
818
+ wrap_->clear();
819
+ }
820
+ }
821
+
822
+ // Methods for accessing attributes
823
+ Node* copyAttributes(const Node& rhs) {
824
+ values_.clear();
825
+ for (const AVPtr& i : rhs.values_) {
826
+ values_.push_back(i->clone());
827
+ }
828
+ return this;
829
+ }
830
+ bool hasAttribute(Symbol name) const {
831
+ AT_ASSERT(name.is_attr());
832
+ return findAttr(name, false) != values_.end();
833
+ }
834
+ bool hasAttributeS(const std::string& name) const {
835
+ return hasAttribute(Symbol::attr(name));
836
+ }
837
+ AttributeKind kindOf(Symbol name) const {
838
+ AT_ASSERT(name.is_attr());
839
+ return (*findAttr(name, true))->kind();
840
+ }
841
+ AttributeKind kindOfS(const std::string& name) const {
842
+ return kindOf(Symbol::attr(name));
843
+ }
844
+ Node* removeAttribute(Symbol name) {
845
+ AT_ASSERT(name.is_attr());
846
+ values_.erase(findAttr(name, true));
847
+ return this;
848
+ }
849
+ Node* removeAttributeS(const std::string& name) {
850
+ return removeAttribute(Symbol::attr(name));
851
+ }
852
+ bool hasAttributes() const {
853
+ return !values_.empty();
854
+ }
855
+ size_t numAttributes() const {
856
+ return values_.size();
857
+ }
858
+ // The names are returned in order, since name actually is the index.
859
+ std::vector<Symbol> attributeNames() const {
860
+ std::vector<Symbol> names;
861
+ names.reserve(values_.size());
862
+ for (const AVPtr& a : values_) {
863
+ names.push_back(a->name);
864
+ }
865
+ return names;
866
+ }
867
+ std::vector<const char*> attributeNamesS() const {
868
+ std::vector<const char*> names;
869
+ names.reserve(values_.size());
870
+ for (const AVPtr& a : values_) {
871
+ names.push_back(a->name.toUnqualString());
872
+ }
873
+ return names;
874
+ }
875
+
876
+ #define CREATE_ACCESSOR(Kind, method) \
877
+ Node* method##_(Symbol name, Kind##Attr::ConstructorType v) { \
878
+ return setAttr<Kind##Attr>( \
879
+ name, std::forward<Kind##Attr::ConstructorType>(v)); \
880
+ } \
881
+ const Kind##Attr::ValueType& method(Symbol name) const { \
882
+ return getAttr<Kind##Attr>(name); \
883
+ }
884
+
885
+ CREATE_ACCESSOR(Float, f)
886
+ CREATE_ACCESSOR(Complex, c)
887
+ CREATE_ACCESSOR(Floats, fs)
888
+ CREATE_ACCESSOR(ComplexVals, cs)
889
+ CREATE_ACCESSOR(String, s)
890
+ CREATE_ACCESSOR(Strings, ss)
891
+ CREATE_ACCESSOR(Int, i)
892
+ CREATE_ACCESSOR(Ints, is)
893
+ CREATE_ACCESSOR(Graph, g)
894
+ CREATE_ACCESSOR(Graphs, gs)
895
+ CREATE_ACCESSOR(Type, ty)
896
+ CREATE_ACCESSOR(Types, tys)
897
+ CREATE_ACCESSOR(IValue, ival)
898
+
899
+ #undef CREATE_ACCESSOR
900
+
901
+ // Our Graphs are not very const-correct, so we need to allow returning
902
+ // non-const references too
903
+ GraphAttr::ValueType& g(Symbol name) {
904
+ return getAttr<GraphAttr>(name);
905
+ }
906
+
907
+ // does not use CREATE_ACCESSOR because we need additional asserts
908
+ Node* t_(Symbol name, TensorAttr::ConstructorType v) {
909
+ return setAttr<TensorAttr>(
910
+ name, std::forward<TensorAttr::ConstructorType>(v));
911
+ }
912
+ const TensorAttr::ValueType& t(Symbol name) const {
913
+ return getAttr<TensorAttr>(name);
914
+ }
915
+
916
+ Node* ts_(Symbol name, TensorsAttr::ConstructorType v) {
917
+ return setAttr<TensorsAttr>(
918
+ name, std::forward<TensorsAttr::ConstructorType>(v));
919
+ }
920
+ const TensorsAttr::ValueType& ts(Symbol name) const {
921
+ return getAttr<TensorsAttr>(name);
922
+ }
923
+
924
+ Block* findCommonAncestorBlockWith(Node* n);
925
+
926
+ size_t blocksFromGraphBlock();
927
+
928
+ private:
929
+ void printAttrValue(std::ostream& out, const Symbol& name) const;
930
+ void printAttributes(std::ostream& out, bool ignore_subgraph) const;
931
+
932
+ template <typename T>
933
+ Node* setAttr(Symbol name, typename T::ConstructorType v) {
934
+ AT_ASSERT(name.is_attr());
935
+ auto it = findAttr(name, false);
936
+ auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v)));
937
+ // NOLINTNEXTLINE(bugprone-branch-clone)
938
+ if (it == values_.end()) {
939
+ values_.push_back(std::move(nv));
940
+ } else {
941
+ *it = std::move(nv);
942
+ }
943
+ return this;
944
+ }
945
+ template <typename T>
946
+ typename T::ValueType& getAttr(Symbol name) const {
947
+ AT_ASSERT(name.is_attr());
948
+ auto it = findAttr(name, true);
949
+ auto* child = dynamic_cast<T*>(it->get());
950
+ if (child == nullptr) {
951
+ throw IRAttributeError(name, true);
952
+ }
953
+ return child->value();
954
+ }
955
+ using AVPtr = AttributeValue::Ptr;
956
+ // NB: For determinism, we use a vector rather than a hash map. This does
957
+ // mean that lookups are O(n), so you shouldn't use Attributes to store
958
+ // a big pile of messages.
959
+ std::vector<AVPtr> values_;
960
+ std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) {
961
+ AT_ASSERT(name.is_attr());
962
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
963
+ return v->name == name;
964
+ });
965
+ if (required && it == values_.end()) {
966
+ throw IRAttributeError(name, false);
967
+ }
968
+ AT_ASSERT(!required || it != values_.end());
969
+ return it;
970
+ }
971
+ std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required)
972
+ const {
973
+ AT_ASSERT(name.is_attr());
974
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
975
+ return v->name == name;
976
+ });
977
+ if (required && it == values_.end()) {
978
+ throw IRAttributeError(name, false);
979
+ }
980
+ AT_ASSERT(!required || it != values_.end());
981
+ return it;
982
+ }
983
+
984
+ enum class MoveSide { BEFORE, AFTER };
985
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
986
+
987
+ std::pair<Value*, const Argument&> findInput(Symbol name);
988
+ // Lookup iterator in use list of _input i_ that corresponds to its use of
989
+ // _this_
990
+ use_list::iterator findUseForInput(size_t i);
991
+
992
+ // remove the use of input i, this sets input i to nullptr, but
993
+ // is only used internally to Node before setting it to a new value
994
+ // or erasing the entry from the list.
995
+ Value* dropInput(size_t i);
996
+
997
+ bool inBlockList() const {
998
+ if (next() == nullptr) {
999
+ AT_ASSERT(prev() == nullptr);
1000
+ }
1001
+ return next() != nullptr;
1002
+ }
1003
+
1004
+ void removeFromList();
1005
+ void lint() const;
1006
+
1007
+ void assignTopoPosition();
1008
+
1009
+ protected:
1010
+ // subclasses must override
1011
+ // this function is used by createClone to initialize a new version
1012
+ // of a node in another graph. It should allocate a new instance of the same
1013
+ // concrete type as 'this', but in graph 'g' which might be different
1014
+ // than graph_
1015
+ virtual Node* allocNewInstance(Graph* g) {
1016
+ return new Node(g, kind());
1017
+ }
1018
+ // create a copy of all properties of Node s into this.
1019
+ // subclasses should extend if they have additional information to copy.
1020
+ // 'this' will be allocated with s->allocNewInstance(g) so it should have
1021
+ // the same concrete type as 's'
1022
+ virtual void cloneFrom(Node* s);
1023
+ };
1024
+
1025
+ struct Block {
1026
+ friend struct Node;
1027
+ friend struct Graph;
1028
+
1029
+ AT_DISALLOW_COPY_AND_ASSIGN(Block);
1030
+ TORCH_API Block(Graph* graph_, Node* node_);
1031
+
1032
+ at::ArrayRef<Value*> inputs() {
1033
+ return input_->outputs();
1034
+ }
1035
+ at::ArrayRef<const Value*> inputs() const {
1036
+ const auto& inputs = input_->outputs();
1037
+ return {inputs.data(), inputs.size()};
1038
+ }
1039
+ at::ArrayRef<Value*> outputs() {
1040
+ return output_->inputs();
1041
+ }
1042
+ at::ArrayRef<const Value*> outputs() const {
1043
+ return static_cast<const Node*>(output_)->inputs();
1044
+ }
1045
+ graph_node_list nodes() {
1046
+ return {input_, kNextDirection};
1047
+ }
1048
+ const_graph_node_list nodes() const {
1049
+ return {input_, kNextDirection};
1050
+ }
1051
+ Node* return_node() {
1052
+ return output_;
1053
+ }
1054
+ const Node* return_node() const {
1055
+ return output_;
1056
+ }
1057
+ Node* param_node() {
1058
+ return input_;
1059
+ }
1060
+ const Node* param_node() const {
1061
+ return input_;
1062
+ }
1063
+ /**
1064
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
1065
+ * Check #87343 for details.
1066
+ */
1067
+ Graph* owningGraph() {
1068
+ return graph_;
1069
+ }
1070
+ const Graph* owningGraph() const {
1071
+ return graph_;
1072
+ }
1073
+ Node* owningNode() {
1074
+ return owning_node_;
1075
+ }
1076
+ const Node* owningNode() const {
1077
+ return owning_node_;
1078
+ }
1079
+
1080
+ Value* addInput(const std::string& name = "") {
1081
+ Value* v = input_->addOutput();
1082
+ v->setDebugName(name);
1083
+ return v;
1084
+ }
1085
+ Value* insertInput(size_t i, const std::string& name = "") {
1086
+ Value* v = input_->insertOutput(i);
1087
+ v->setDebugName(name);
1088
+ return v;
1089
+ }
1090
+ void eraseInput(size_t i) {
1091
+ input_->eraseOutput(i);
1092
+ }
1093
+ void removeAllInputs() {
1094
+ input_->removeAllOutputs();
1095
+ }
1096
+ size_t registerOutput(Value* v) {
1097
+ output_->addInput(v);
1098
+ return outputs().size() - 1;
1099
+ }
1100
+ size_t insertOutput(size_t i, Value* n) {
1101
+ output_->insertInput(i, n);
1102
+ return i;
1103
+ }
1104
+ void eraseOutput(size_t i) {
1105
+ output_->removeInput(i);
1106
+ }
1107
+ void removeAllOutputs() {
1108
+ output_->removeAllInputs();
1109
+ }
1110
+
1111
+ void replaceOutput(size_t i, Value* n) {
1112
+ output_->replaceInput(i, n);
1113
+ }
1114
+ void permuteOutputs(const std::vector<size_t>& new_inputs) {
1115
+ output_->permuteInputs(new_inputs);
1116
+ }
1117
+ void permuteInputs(const std::vector<size_t>& new_inputs) {
1118
+ input_->permuteOutputs(new_inputs);
1119
+ }
1120
+
1121
+ Node* appendNode(Node* n) {
1122
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1123
+ n->insertBefore(output_);
1124
+ return n;
1125
+ }
1126
+ Node* prependNode(Node* n) {
1127
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1128
+ n->insertAfter(input_);
1129
+ return n;
1130
+ }
1131
+
1132
+ // clone all inputs, nodes, and outputs from src and append them
1133
+ // to the inputs, nodes, and outputs of this block
1134
+ // value_map is used whenever a node in src references a free variable
1135
+ // in src to look up its corresponding value
1136
+ TORCH_API void cloneFrom(Block* src, std::function<Value*(Value*)> value_map);
1137
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1138
+
1139
+ TORCH_API std::shared_ptr<Wrap<Block>> wrap() {
1140
+ if (!wrap_) {
1141
+ wrap_ = std::make_shared<Wrap<Block>>(this);
1142
+ }
1143
+ return wrap_;
1144
+ }
1145
+
1146
+ virtual ~Block() {
1147
+ if (wrap_) {
1148
+ wrap_->clear();
1149
+ }
1150
+ }
1151
+
1152
+ void clear() {
1153
+ removeAllOutputs();
1154
+ for (auto it = nodes().rbegin(); it != nodes().rend(); it++) {
1155
+ it.destroyCurrent();
1156
+ }
1157
+ removeAllInputs();
1158
+ }
1159
+
1160
+ private:
1161
+ void reIndexTopology();
1162
+
1163
+ // get rid of all nodes
1164
+ // destroys in reverse order so that uses internal to this block
1165
+ // do not have to be removed before you can destroy the block
1166
+ void destroy();
1167
+
1168
+ Graph* const graph_;
1169
+ // holds outputs in a way that can be reflected
1170
+ // as a Use object
1171
+ // also used as the beginning/end of the circular node list to avoid
1172
+ // having corner cases where the list is empty.
1173
+ Node* const output_;
1174
+ Node* const input_;
1175
+ Node* const
1176
+ owning_node_; // either the node that has this block or nullptr for root
1177
+ // a managing wrapper for Python to allow invalidation
1178
+ std::shared_ptr<Wrap<Block>> wrap_;
1179
+ };
1180
+
1181
+ struct Graph : std::enable_shared_from_this<Graph> {
1182
+ AT_DISALLOW_COPY_AND_ASSIGN(Graph);
1183
+ friend struct Node;
1184
+ friend struct Value;
1185
+ friend struct Block;
1186
+
1187
+ private:
1188
+ // only used to keep track of allocated nodes
1189
+ // actual representation of Graph is done with
1190
+ // inputs, outputs, nodes
1191
+
1192
+ std::unordered_set<const Node*> all_nodes;
1193
+ std::unordered_set<const Value*> all_values;
1194
+ std::unordered_set<const Block*> all_blocks;
1195
+ size_t next_unique_;
1196
+
1197
+ std::unordered_map<std::string, Value*> unique_names_;
1198
+ // name_base_suffix tracks largest suffix currently used by all names sharing
1199
+ // same name_base. Key of this map is name_base, value is largest suffix
1200
+ // numeric value.
1201
+ std::unordered_map<std::string, size_t> name_base_suffix_;
1202
+
1203
+ ScopePtr current_scope_;
1204
+
1205
+ Block* const block_;
1206
+ // when insertNode() is called, the node is inserted before this node
1207
+ // by default this is set to append to the top level block
1208
+ Node* insert_before_;
1209
+ int64_t predicted_insert_count_ = 0;
1210
+
1211
+ c10::optional<size_t> op_version_;
1212
+
1213
+ public:
1214
+ Graph(ScopePtr scope_root = c10::make_intrusive<Scope>())
1215
+ : next_unique_(0),
1216
+ current_scope_(std::move(scope_root)),
1217
+ block_(new Block(this, nullptr)),
1218
+ insert_before_(return_node()) {}
1219
+
1220
+ at::ArrayRef<Value*> inputs() {
1221
+ return block_->inputs();
1222
+ }
1223
+ at::ArrayRef<const Value*> inputs() const {
1224
+ const Block& block = *block_;
1225
+ return block.inputs();
1226
+ }
1227
+ at::ArrayRef<Value*> outputs() {
1228
+ return block_->outputs();
1229
+ }
1230
+ at::ArrayRef<const Value*> outputs() const {
1231
+ const Block& block = *block_;
1232
+ return block.outputs();
1233
+ }
1234
+ graph_node_list nodes() {
1235
+ return block_->nodes();
1236
+ }
1237
+ const_graph_node_list nodes() const {
1238
+ const Block& block = *block_;
1239
+ return block.nodes();
1240
+ }
1241
+ Node* param_node() {
1242
+ return block_->param_node();
1243
+ }
1244
+ const Node* param_node() const {
1245
+ return block_->param_node();
1246
+ }
1247
+ Node* return_node() {
1248
+ return block_->return_node();
1249
+ }
1250
+ const Node* return_node() const {
1251
+ return block_->return_node();
1252
+ }
1253
+ const std::unordered_map<std::string, Value*>& debugNames() const {
1254
+ return unique_names_;
1255
+ }
1256
+
1257
+ TORCH_API void push_scope(const std::string& scope_name);
1258
+ TORCH_API void pop_scope();
1259
+
1260
+ ScopePtr current_scope() {
1261
+ return current_scope_;
1262
+ }
1263
+
1264
+ void set_op_version(c10::optional<size_t> version) {
1265
+ op_version_ = version;
1266
+ }
1267
+
1268
+ c10::optional<size_t> get_op_version() {
1269
+ return op_version_;
1270
+ }
1271
+
1272
+ void set_current_scope(ScopePtr scope) {
1273
+ current_scope_ = std::move(scope);
1274
+ }
1275
+
1276
+ Value* addInput(const std::string& name = "") {
1277
+ return block_->addInput(name);
1278
+ }
1279
+ Value* insertInput(size_t i, const std::string& name = "") {
1280
+ return block_->insertInput(i, name);
1281
+ }
1282
+ void eraseInput(size_t i) {
1283
+ block_->eraseInput(i);
1284
+ }
1285
+ size_t registerOutput(Value* n) {
1286
+ return block_->registerOutput(n);
1287
+ }
1288
+ void eraseOutput(size_t i) {
1289
+ block_->eraseOutput(i);
1290
+ }
1291
+
1292
+ TORCH_API Node* create(NodeKind kind, size_t num_outputs = 1);
1293
+ TORCH_API Node* create(
1294
+ NodeKind kind,
1295
+ ArrayRef<Value*> inputs,
1296
+ size_t num_outputs = 1);
1297
+
1298
+ TORCH_API Node* createNone();
1299
+ TORCH_API Node* createAutogradZero();
1300
+ TORCH_API Node* createUninitialized(TypePtr typ);
1301
+ TORCH_API Node* createWithSubgraph(Symbol kind);
1302
+ TORCH_API Node* createDifferentiableSubgraph();
1303
+ TORCH_API Node* createTuple(
1304
+ at::ArrayRef<Value*> values,
1305
+ TupleTypePtr optional_named_tuple = nullptr);
1306
+ TORCH_API Node* createTupleUnpack(Value* v);
1307
+ TORCH_API Node* createTupleIndex(
1308
+ Value* tup,
1309
+ Value* idx,
1310
+ const TypePtr& output_type);
1311
+ TORCH_API Node* createTupleSlice(
1312
+ Value* tup,
1313
+ int64_t beg,
1314
+ int64_t step_size,
1315
+ int64_t num_values);
1316
+ TORCH_API Node* createEnumName(Value* e);
1317
+ TORCH_API Node* createEnumValue(Value* e);
1318
+ TORCH_API Node* createList(
1319
+ const TypePtr& contained_type,
1320
+ at::ArrayRef<Value*> values);
1321
+ TORCH_API Node* createListUnpack(Value* v, size_t size);
1322
+ TORCH_API Node* createDict(
1323
+ const TypePtr& key_type,
1324
+ const TypePtr& value_type,
1325
+ at::ArrayRef<Value*> keys,
1326
+ at::ArrayRef<Value*> values);
1327
+ TORCH_API Node* createNumToTensor(Value* value);
1328
+ TORCH_API Node* createObject(const ClassTypePtr& type);
1329
+ TORCH_API Node* createSetAttr(
1330
+ Value* obj,
1331
+ const std::string& field,
1332
+ Value* newValue);
1333
+ TORCH_API Node* createGetAttr(Value* obj, const std::string& field);
1334
+ Value* insertGetAttr(Value* obj, const std::string& field) {
1335
+ return insertNode(createGetAttr(obj, field))->output();
1336
+ }
1337
+ TORCH_API Node* createStore(const std::string& name, Value* v);
1338
+ TORCH_API Node* createLoad(const std::string& name, const TypePtr& type);
1339
+ TORCH_API Node* createIsInstance(Value* v, at::ArrayRef<TypePtr> types);
1340
+
1341
+ TORCH_API Value* insertUncheckedCast(Value* v, TypePtr type);
1342
+
1343
+ // Insert a ToList operator with argument \p v and output type \p type.
1344
+ // \returns the output of the operation.
1345
+ TORCH_API Value* insertToList(Value* v, TypePtr type);
1346
+
1347
+ TORCH_API Value* insertFunctionCall(
1348
+ Function* callee,
1349
+ const MatchedSchema& matched);
1350
+ TORCH_API Value* insertMethodCall(
1351
+ std::string method_name,
1352
+ const MatchedSchema& matched);
1353
+
1354
+ // Note: defined in python_ir.cpp and can be used only in python extension
1355
+ Node* createPythonOp(
1356
+ THPObjectPtr&& pyobj,
1357
+ const std::string& cconv,
1358
+ pyobj_list&& scalar_args);
1359
+ // clone n, making a new node in _this_ graph.
1360
+ // use value_map to translate inputs of n to inputs of the cloned node
1361
+ // if copy_blocks is false, it will not recursively clone the nested blocks
1362
+ // this node contains.
1363
+ TORCH_API Node* createClone(
1364
+ Node* n,
1365
+ const std::function<Value*(Value*)>& value_map,
1366
+ bool copy_blocks = true);
1367
+
1368
+ // Insert constant IValue into the graph.
1369
+ TORCH_API Value* insertConstant(
1370
+ const IValue& val,
1371
+ c10::optional<SourceRange> loc = c10::nullopt,
1372
+ c10::optional<ScopePtr> scope = c10::nullopt);
1373
+
1374
+ // Schema-driven insert:
1375
+ // This inserts a node into the graph with inputs determined from args and
1376
+ // kwargs using Python argument matching rules, and checks that the op matches
1377
+ // a known schema.
1378
+ //
1379
+ // If this node successfully completes, it guarentees the node
1380
+ // is a correctly-formed invocation of opname
1381
+ TORCH_API Value* insert(
1382
+ Symbol opname,
1383
+ at::ArrayRef<NamedValue> args,
1384
+ at::ArrayRef<NamedValue> kwargs = {},
1385
+ const c10::optional<SourceRange>& range = {});
1386
+
1387
+ Node* appendNode(Node* n) {
1388
+ return block_->appendNode(n);
1389
+ }
1390
+
1391
+ Node* prependNode(Node* n) {
1392
+ return block_->prependNode(n);
1393
+ }
1394
+
1395
+ // insert before insert_before_ node
1396
+ // initialized to insert at the end of the top level block
1397
+ // can be changed with setInsertPoint()
1398
+ Node* insertNode(Node* n) {
1399
+ AT_ASSERT(
1400
+ insert_before_->inBlockList() &&
1401
+ "insert point node is no longer in a block list");
1402
+ return n->insertBefore(insert_before_);
1403
+ }
1404
+ // set where nodes are inserted to append to the end of this block
1405
+ void setInsertPoint(Block* b) {
1406
+ AT_ASSERT(b->owningGraph() == this);
1407
+ setInsertPoint(b->return_node());
1408
+ }
1409
+ // set where nodes are inserted to insert _before_ this node
1410
+ // for implementation simplicity we only support inserting before a node for
1411
+ // now
1412
+ void setInsertPoint(Node* n) {
1413
+ AT_ASSERT(n->owningGraph() == this && n->inBlockList());
1414
+ insert_before_ = n;
1415
+ predicted_insert_count_ = 0;
1416
+ }
1417
+ Node* insertPoint() {
1418
+ return insert_before_;
1419
+ }
1420
+
1421
+ // the top level block
1422
+ Block* block() {
1423
+ return block_;
1424
+ }
1425
+ const Block* block() const {
1426
+ return block_;
1427
+ }
1428
+
1429
+ // Checks well-formedness and invariants of graph
1430
+ TORCH_API void lint() const;
1431
+ // for use in debugger
1432
+ TORCH_API void dump() const;
1433
+
1434
+ TORCH_API ~Graph();
1435
+
1436
+ TORCH_API std::string toString(bool print_source_locations = true) const;
1437
+
1438
+ TORCH_API std::ostream& print(
1439
+ std::ostream& out,
1440
+ bool print_source_locations = true) const;
1441
+
1442
+ friend TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
1443
+
1444
+ TORCH_API std::shared_ptr<Graph> copy();
1445
+ TORCH_API std::unique_ptr<Graph> copyUnique();
1446
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1447
+
1448
+ private:
1449
+ friend TORCH_API void Lint(const AliasDb* db);
1450
+ TORCH_API void freeNode(Node* n);
1451
+ TORCH_API void freeValue(Value* v);
1452
+ TORCH_API void freeBlock(Block* b);
1453
+ void cloneFrom(Graph& src);
1454
+ };
1455
+
1456
+ /** \brief An utility class for setting temporary insertion points.
1457
+ *
1458
+ * When an object of this class is created, it stores the current insertion
1459
+ * point, sets the new one, and restores the original insertion point when the
1460
+ * object is destroyed.
1461
+ */
1462
+ struct WithInsertPoint {
1463
+ WithInsertPoint(Node* n) : prev_(n->owningGraph()->insertPoint()) {
1464
+ n->owningGraph()->setInsertPoint(n);
1465
+ }
1466
+ WithInsertPoint(Block* b) : WithInsertPoint(b->return_node()) {}
1467
+
1468
+ ~WithInsertPoint() {
1469
+ prev_->owningGraph()->setInsertPoint(prev_);
1470
+ }
1471
+
1472
+ private:
1473
+ Node* prev_;
1474
+ };
1475
+
1476
+ /** \brief An utility class for setting temporary scopes.
1477
+ *
1478
+ * When an object of this class is created, it stores the current scope, sets
1479
+ * the new one, and restores the original scope when the object is destroyed.
1480
+ */
1481
+ struct WithCurrentScope {
1482
+ WithCurrentScope(Graph& g, ScopePtr scope)
1483
+ : graph_(&g), prev_scope_(g.current_scope()) {
1484
+ g.set_current_scope(std::move(scope));
1485
+ }
1486
+ ~WithCurrentScope() {
1487
+ graph_->set_current_scope(prev_scope_);
1488
+ }
1489
+
1490
+ private:
1491
+ Graph* graph_;
1492
+ ScopePtr prev_scope_;
1493
+ };
1494
+
1495
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1496
+ inline Value::Value(Node* node_, size_t offset_)
1497
+ : node_(node_),
1498
+ offset_(offset_),
1499
+ unique_(node_->graph_->next_unique_++),
1500
+ type_(TensorType::get()) {
1501
+ node_->graph_->all_values.emplace(this);
1502
+ }
1503
+
1504
+ inline Value* Value::setType(TypePtr type) {
1505
+ AT_ASSERT(type);
1506
+ if (auto dyn = type->castRaw<c10::DynamicType>()) {
1507
+ type = dyn->fallback();
1508
+ }
1509
+ type_ = std::move(type);
1510
+ for (Use& use : uses_) {
1511
+ use.user->op_ = nullptr;
1512
+ }
1513
+ return this;
1514
+ }
1515
+
1516
+ inline Graph* Value::owningGraph() {
1517
+ return node()->owningGraph();
1518
+ }
1519
+
1520
+ inline const Graph* Value::owningGraph() const {
1521
+ return node()->owningGraph();
1522
+ }
1523
+
1524
+ /************* All nodes not required to be defined before Graph **************/
1525
+ struct ProfileOp : public Node {
1526
+ static const Symbol Kind;
1527
+ ProfileOp(Graph* graph, std::function<void(std::vector<IValue>&)> callback)
1528
+ : Node(graph, ::c10::prim::profile), callback_(std::move(callback)) {}
1529
+
1530
+ void cloneFrom(Node* other_) override;
1531
+ Node* allocNewInstance(Graph* g) override;
1532
+
1533
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1534
+ return callback_;
1535
+ }
1536
+
1537
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1538
+ callback_ = std::move(callback);
1539
+ }
1540
+
1541
+ bool hasSeenTensor() const {
1542
+ return has_seen_tensor_;
1543
+ }
1544
+
1545
+ void setHasSeenTensor(bool has_seen_tensor) {
1546
+ has_seen_tensor_ = has_seen_tensor;
1547
+ }
1548
+
1549
+ private:
1550
+ std::function<void(std::vector<IValue>&)> callback_;
1551
+ bool has_seen_tensor_ = false;
1552
+ };
1553
+
1554
+ struct TORCH_API ProfileIValueOp : public Node {
1555
+ static const Symbol Kind;
1556
+ ProfileIValueOp(
1557
+ Graph* graph,
1558
+ std::function<void(std::vector<IValue>&)> callback)
1559
+ : Node(graph, ::c10::prim::profile_ivalue),
1560
+ callback_(std::move(callback)) {}
1561
+
1562
+ void cloneFrom(Node* other_) override;
1563
+ Node* allocNewInstance(Graph* g) override;
1564
+
1565
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1566
+ return callback_;
1567
+ }
1568
+
1569
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1570
+ callback_ = std::move(callback);
1571
+ }
1572
+
1573
+ private:
1574
+ std::function<void(std::vector<IValue>&)> callback_;
1575
+ };
1576
+
1577
+ // execute a Python function, used for Ops we can't optimize but that we want to
1578
+ // optimize around
1579
+ //
1580
+ // Note: actual implementation (ConcretePythonOp) is defined in python_ir.cpp
1581
+ // which is not included in libtorch.so. We still include some bits and pieces
1582
+ // of PythonOp here to enable writing simple passes generically. In general,
1583
+ // python-aware bits need to be moved to the descendant classes.
1584
+ struct TORCH_API PythonOp : public Node {
1585
+ using Node::Node;
1586
+
1587
+ virtual std::string name() const = 0;
1588
+ virtual void writeScalars(std::ostream& out) const = 0;
1589
+ void cloneFrom(Node* other_) override = 0;
1590
+ Node* allocNewInstance(Graph* g) override = 0;
1591
+ // recover the autograd.Function instance, if this PythonOp's function
1592
+ // was originally SomeFunction.apply
1593
+ // used in ONNX for discovering symbolics
1594
+ virtual c10::optional<THPObjectPtr> autogradFunction() const = 0;
1595
+
1596
+ virtual void lint_python() const = 0;
1597
+ };
1598
+
1599
+ TORCH_API void LintGraph(const std::shared_ptr<Graph>& graph);
1600
+
1601
+ TORCH_API at::ArrayRef<Value*> createTupleUnpack(Value* v);
1602
+
1603
+ /** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values.
1604
+ * The insertion happens at the current insertion point.
1605
+ * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE
1606
+ * values and their cloned copies in \p G.
1607
+ */
1608
+ TORCH_API std::vector<Value*> insertGraph(
1609
+ Graph& g,
1610
+ Graph& callee,
1611
+ ArrayRef<Value*> inputs);
1612
+ TORCH_API std::vector<Value*> insertGraph(
1613
+ Graph& g,
1614
+ Graph& callee,
1615
+ ArrayRef<Value*> inputs,
1616
+ std::unordered_map<Value*, Value*>& value_map);
1617
+
1618
+ /** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and
1619
+ * replace all its uses with corresponding outputs of the inserted function.
1620
+ * This asserts that the number of outputs of the original node and the
1621
+ * graph are the same.
1622
+ */
1623
+ TORCH_API std::vector<Value*> inlineCallTo(
1624
+ Node* to_replace,
1625
+ GraphFunction* callee,
1626
+ bool use_graph = true);
1627
+
1628
+ TORCH_API std::vector<Value*> inlineCallTo(
1629
+ Node* to_replace,
1630
+ GraphFunction* callee,
1631
+ Graph* callee_graph);
1632
+
1633
+ /** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a
1634
+ * tuple unpack node and return the resulting values.
1635
+ */
1636
+ TORCH_API std::vector<Value*> unpackOutputs(const std::vector<Value*>& outputs);
1637
+
1638
+ TORCH_API std::vector<Node*> findAllNodes(Graph& g, Symbol kind, bool recurse);
1639
+ TORCH_API std::vector<Node*> findAllNodes(Block& b, Symbol kind, bool recurse);
1640
+ TORCH_API std::vector<Node*> findAllNodes(
1641
+ at::ArrayRef<Block*> a,
1642
+ Symbol kind,
1643
+ bool recurse);
1644
+
1645
+ struct TORCH_API OperatorSet {
1646
+ OperatorSet(std::initializer_list<const char*> sig_literals);
1647
+ std::vector<std::shared_ptr<Operator>> getOps() const;
1648
+ void insert(std::initializer_list<const char*> sig_literals);
1649
+
1650
+ private:
1651
+ friend struct Node;
1652
+ std::unordered_map<Symbol, std::vector<std::shared_ptr<Operator>>> ops;
1653
+ };
1654
+
1655
+ template <typename T>
1656
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1657
+ struct OperatorMap {
1658
+ // Type aliasing
1659
+ using OpMapType = typename std::pair<std::shared_ptr<Operator>, T>;
1660
+ using ValueType = std::vector<OpMapType>;
1661
+ using MapType = std::unordered_map<Symbol, ValueType>;
1662
+
1663
+ OperatorMap() = default;
1664
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1665
+ explicit OperatorMap(
1666
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> init) {
1667
+ insert(init);
1668
+ }
1669
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1670
+ explicit OperatorMap(std::initializer_list<std::pair<const char*, T>> init) {
1671
+ insert(init);
1672
+ }
1673
+
1674
+ void insert(const std::shared_ptr<Operator>& op, T val) {
1675
+ // Remove if exists before insert
1676
+ erase(op);
1677
+ map[Symbol::fromQualString(op->schema().name())].emplace_back(
1678
+ std::make_pair(op, val));
1679
+ }
1680
+
1681
+ void insert(const OperatorSet& op_set, T val) {
1682
+ for (auto& op : op_set.getOps()) {
1683
+ insert(op, val);
1684
+ }
1685
+ }
1686
+
1687
+ void insert(
1688
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> v) {
1689
+ for (auto& el : v) {
1690
+ insert(el.first, el.second);
1691
+ }
1692
+ }
1693
+
1694
+ void insert(std::initializer_list<std::pair<const char*, T>> v) {
1695
+ for (auto& el : v) {
1696
+ insert(getOperatorForLiteral(el.first), el.second);
1697
+ }
1698
+ }
1699
+
1700
+ void erase(const std::shared_ptr<Operator>& op) {
1701
+ auto it = map.find(Symbol::fromQualString(op->schema().name()));
1702
+ if (it == map.end()) {
1703
+ return;
1704
+ }
1705
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1706
+ if (vit->first->schema() == op->schema()) {
1707
+ it->second.erase(vit);
1708
+ break;
1709
+ }
1710
+ }
1711
+ if (it->second.size() == 0) {
1712
+ map.erase(Symbol::fromQualString(op->schema().name()));
1713
+ }
1714
+ }
1715
+
1716
+ bool contains(const Operator& op) const {
1717
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1718
+ if (it == map.end()) {
1719
+ return false;
1720
+ }
1721
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1722
+ if (vit->first->schema() == op.schema()) {
1723
+ return true;
1724
+ }
1725
+ }
1726
+ return false;
1727
+ }
1728
+
1729
+ bool contains(const Node* n) const {
1730
+ return n->maybeOperator() && contains(n->getOperator());
1731
+ }
1732
+
1733
+ c10::optional<T> find(const Operator& op) {
1734
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1735
+ if (it == map.end()) {
1736
+ return c10::nullopt;
1737
+ }
1738
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1739
+ if (vit->first->schema() == op.schema()) {
1740
+ return vit->second;
1741
+ }
1742
+ }
1743
+ return c10::nullopt;
1744
+ }
1745
+
1746
+ // TODO: return iterator
1747
+ std::vector<OpMapType> getAllKeysAndValues() const {
1748
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1749
+ std::vector<OpMapType> keys_values;
1750
+ for (auto& symbol_mapping : map) {
1751
+ auto& vec = symbol_mapping.second;
1752
+ for (auto& pair : vec) {
1753
+ keys_values.push_back(pair);
1754
+ }
1755
+ }
1756
+ return keys_values;
1757
+ }
1758
+
1759
+ private:
1760
+ friend struct Node;
1761
+ MapType map;
1762
+ };
1763
+
1764
+ template <typename T>
1765
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1766
+ struct FunctionSchemaMap {
1767
+ // Type aliasing
1768
+ using FuncSchemaMapType = typename std::pair<FunctionSchema, T>;
1769
+ using ValueType = std::vector<FuncSchemaMapType>;
1770
+ using MapType = std::unordered_map<Symbol, ValueType>;
1771
+
1772
+ FunctionSchemaMap() = default;
1773
+ void insert(const FunctionSchema& schema, T val) {
1774
+ // Remove if exists before insert
1775
+ erase(schema);
1776
+ map[Symbol::fromQualString(schema.name())].emplace_back(
1777
+ std::make_pair(schema, val));
1778
+ }
1779
+
1780
+ void erase(const FunctionSchema& schema) {
1781
+ auto it = map.find(Symbol::fromQualString(schema.name()));
1782
+ if (it == map.end()) {
1783
+ return;
1784
+ }
1785
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1786
+ if (vit->first == schema) {
1787
+ it->second.erase(vit);
1788
+ break;
1789
+ }
1790
+ }
1791
+ if (it->second.size() == 0) {
1792
+ map.erase(Symbol::fromQualString(schema.name()));
1793
+ }
1794
+ }
1795
+
1796
+ bool contains(const FunctionSchema& schema) const {
1797
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1798
+ if (it == map.end()) {
1799
+ return false;
1800
+ }
1801
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1802
+ if (vit->first->schema() == schema) {
1803
+ return true;
1804
+ }
1805
+ }
1806
+ return false;
1807
+ }
1808
+
1809
+ c10::optional<T> find(const FunctionSchema& schema) const {
1810
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1811
+ if (it == map.end()) {
1812
+ return c10::nullopt;
1813
+ }
1814
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1815
+ if (vit->first == schema) {
1816
+ return vit->second;
1817
+ }
1818
+ }
1819
+ return c10::nullopt;
1820
+ }
1821
+
1822
+ // TODO: return iterator
1823
+ std::vector<FuncSchemaMapType> getAllKeysAndValues() const {
1824
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1825
+ std::vector<FuncSchemaMapType> keys_values;
1826
+ for (auto& symbol_mapping : map) {
1827
+ auto& vec = symbol_mapping.second;
1828
+ for (auto& pair : vec) {
1829
+ keys_values.push_back(pair);
1830
+ }
1831
+ }
1832
+ return keys_values;
1833
+ }
1834
+
1835
+ private:
1836
+ friend struct Node;
1837
+ MapType map;
1838
+ };
1839
+
1840
+ } // namespace jit
1841
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct IfView {
10
+ explicit IfView(Node* node) : node_(node) {
11
+ AT_ASSERT(node->kind() == ::c10::prim::If);
12
+ }
13
+ Value* cond() const {
14
+ return node_->input(0);
15
+ }
16
+ Block* thenBlock() const {
17
+ return node_->blocks().at(0);
18
+ }
19
+ Block* elseBlock() const {
20
+ return node_->blocks().at(1);
21
+ }
22
+ ArrayRef<Value*> thenOutputs() const {
23
+ return thenBlock()->outputs();
24
+ }
25
+ ArrayRef<Value*> elseOutputs() const {
26
+ return elseBlock()->outputs();
27
+ }
28
+ ArrayRef<Value*> outputs() const {
29
+ return node_->outputs();
30
+ }
31
+ Node* node() const {
32
+ return node_;
33
+ }
34
+ operator Node*() const {
35
+ return node_;
36
+ }
37
+
38
+ void permuteOutputs(const std::vector<size_t>& new_output_order) {
39
+ node_->permuteOutputs(new_output_order);
40
+ thenBlock()->permuteOutputs(new_output_order);
41
+ elseBlock()->permuteOutputs(new_output_order);
42
+ }
43
+
44
+ private:
45
+ Node* node_;
46
+ };
47
+
48
+ struct LoopView {
49
+ explicit LoopView(Node* node) : node_(node) {
50
+ AT_ASSERT(
51
+ node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop);
52
+ }
53
+ Block* bodyBlock() const {
54
+ return node_->blocks().at(0);
55
+ }
56
+ Value* cond() const {
57
+ return node_->input(0);
58
+ }
59
+ Value* maxTripCount() const {
60
+ return node_->input(0);
61
+ }
62
+ Value* inputCond() const {
63
+ return node_->input(1);
64
+ }
65
+ Value* nextCond() const {
66
+ return bodyBlock()->outputs().at(0);
67
+ }
68
+ Value* currentTripCount() const {
69
+ return bodyBlock()->inputs().at(0);
70
+ }
71
+ ArrayRef<Value*> carriedInputs() const {
72
+ // skip trip count and cond
73
+ return node_->inputs().slice(2);
74
+ }
75
+ ArrayRef<Value*> carriedInputsWithCond() const {
76
+ // skip trip count and cond
77
+ return node_->inputs().slice(1);
78
+ }
79
+ ArrayRef<Value*> carriedOutputs() const {
80
+ return node_->outputs();
81
+ }
82
+ ArrayRef<Value*> bodyCarriedInputs() const {
83
+ // skip trip count and cond
84
+ return bodyBlock()->inputs().slice(1);
85
+ }
86
+ ArrayRef<Value*> bodyCarriedOutputs() const {
87
+ return bodyBlock()->outputs().slice(1);
88
+ }
89
+ Node* node() const {
90
+ return node_;
91
+ }
92
+ operator Node*() const {
93
+ return node_;
94
+ }
95
+
96
+ void permuteLoopCarried(const std::vector<size_t>& new_output_order) {
97
+ node_->permuteOutputs(new_output_order);
98
+ // skip trip count and cond
99
+ node_->permuteInputs(adjustIndices(2, new_output_order));
100
+ auto adjusted_block_order = adjustIndices(1, new_output_order);
101
+ bodyBlock()->permuteOutputs(adjusted_block_order);
102
+ bodyBlock()->permuteInputs(adjusted_block_order);
103
+ }
104
+
105
+ void replaceMaxTripCount(Value* new_max_trip_count) {
106
+ node_->replaceInput(0, new_max_trip_count);
107
+ }
108
+ void replaceInputCondition(Value* new_input_condition) {
109
+ node_->replaceInput(1, new_input_condition);
110
+ }
111
+
112
+ // our way of encoding loops makes them difficult to turn back into python
113
+ // syntax. we have to check properties of the condition and trip count inputs
114
+ // to figure out which one it initially was. ModifiedLoops are not directly
115
+ // mappable to either For or While
116
+ enum LoopType { While, For, ModifiedLoop };
117
+
118
+ LoopType loopType() {
119
+ auto trip_count = toIValue(maxTripCount());
120
+ auto cond_input = toIValue(inputCond());
121
+ auto cond_next = toIValue(nextCond());
122
+
123
+ bool condition_is_always_true =
124
+ cond_input && cond_input->toBool() && cond_next && cond_next->toBool();
125
+ bool trip_count_is_specified = !trip_count || // trip is not a constant
126
+ trip_count->toInt() !=
127
+ std::numeric_limits<int64_t>::max() || // it is a constant but not
128
+ // the default one
129
+ !currentTripCount()
130
+ ->uses()
131
+ .empty(); // it is actually being used in the body.
132
+
133
+ if (condition_is_always_true) {
134
+ // if the trip count was not specified this was a user-written while True:
135
+ return trip_count_is_specified ? For : While;
136
+ } else {
137
+ if (trip_count_is_specified) {
138
+ return ModifiedLoop;
139
+ }
140
+ return While;
141
+ }
142
+ }
143
+
144
+ private:
145
+ Node* node_;
146
+
147
+ // adjust index_ordering by adding indices 0 - thorugh adjust, and
148
+ // incrementing all existing inputs by adjust
149
+ static std::vector<size_t> adjustIndices(
150
+ size_t adjust,
151
+ const std::vector<size_t>& index_ordering) {
152
+ std::vector<size_t> adjusted;
153
+ adjusted.reserve(adjust + index_ordering.size());
154
+ for (const auto i : c10::irange(adjust)) {
155
+ adjusted.push_back(i);
156
+ }
157
+ for (auto index : index_ordering) {
158
+ adjusted.push_back(index + adjust);
159
+ }
160
+ return adjusted;
161
+ }
162
+ };
163
+ } // namespace jit
164
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <torch/csrc/jit/frontend/source_range.h>
4
+ #include <torch/csrc/jit/ir/constants.h>
5
+ #include <torch/csrc/utils/variadic.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Value;
11
+
12
+ /**
13
+ * A value with optional extra name and location information. Used during
14
+ * schema matching to provide extra error information and resolve kwargs.
15
+ */
16
+ struct NamedValue {
17
+ NamedValue(const SourceRange& loc, const std::string& name, Value* value)
18
+ : loc_(loc), name_(name), value_(value) {}
19
+ NamedValue(const SourceRange& loc, Value* value) : loc_(loc), value_(value) {}
20
+
21
+ /* implicit */ NamedValue(Value* value) : value_(value) {}
22
+ NamedValue(const std::string& name, Value* value)
23
+ : name_(name), value_(value) {}
24
+
25
+ /* implicit */ NamedValue(IValue value)
26
+ : value_(nullptr), ivalue_(std::move(value)) {}
27
+
28
+ NamedValue(const std::string& name, IValue value)
29
+ : name_(name), ivalue_(std::move(value)) {}
30
+
31
+ template <
32
+ typename T,
33
+ typename = enable_if_t<
34
+ (!std::is_same<decay_t<T>, NamedValue>::value &&
35
+ !std::is_same<decay_t<T>, Value*>::value &&
36
+ !std::is_same<decay_t<T>, IValue>::value)>>
37
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
38
+ NamedValue(T&& t) : NamedValue(IValue(std::forward<T>(t))) {}
39
+
40
+ template <
41
+ typename T,
42
+ typename = enable_if_t<
43
+ (!std::is_same<decay_t<T>, Value*>::value &&
44
+ !std::is_same<decay_t<T>, IValue>::value)>>
45
+ NamedValue(const std::string& name, T&& t)
46
+ : NamedValue(name, IValue(std::forward<T>(t))) {}
47
+
48
+ SourceRange locOr(const SourceRange& backup_location) const {
49
+ if (!loc_)
50
+ return backup_location;
51
+ return loc();
52
+ }
53
+
54
+ // note: this will insert a constant node into the graph at the current
55
+ // insert point if this NamedValue is actually a constant
56
+ Value* value(Graph& g) const {
57
+ if (!value_)
58
+ return insertConstant(
59
+ g, ivalue_); // use insertConstant to remove need to include ir.h here
60
+ return value_;
61
+ }
62
+
63
+ const std::string& name() const {
64
+ AT_ASSERT(name_);
65
+ return *name_;
66
+ }
67
+
68
+ const SourceRange& loc() const {
69
+ AT_ASSERT(loc_);
70
+ return *loc_;
71
+ }
72
+
73
+ at::TypePtr type() const;
74
+
75
+ private:
76
+ c10::optional<SourceRange> loc_;
77
+ c10::optional<std::string> name_;
78
+ Value* value_{nullptr};
79
+ // only valid if value_ == nullptr;
80
+ IValue ivalue_;
81
+ };
82
+
83
+ } // namespace jit
84
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/frontend/source_range.h>
8
+ #include <unordered_map>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ struct ModuleInstanceInfo;
13
+ constexpr size_t kModuleInstanceInfo = 2;
14
+
15
+ namespace utils {
16
+ std::string get_module_info(const ModuleInstanceInfo& module_instance_info);
17
+ } // namespace utils
18
+
19
+ // Scope is a node of a trie that represents the tree of nested scopes.
20
+ // Individual scopes are pushed and popped from Graph, which holds a
21
+ // pointer to the current scope. Each Node in Graph holds a pointer
22
+ // to the scope that was current when the node was created.
23
+ // The trie never needs to shrink, it only grows until it is disposed
24
+ // of when Graph is deallocated. Hence, pointers to scopes held by nodes
25
+ // will always be valid as long as Graph is alive.
26
+ struct Scope;
27
+ using ScopePtr = c10::intrusive_ptr<Scope>;
28
+ using c10::Symbol;
29
+
30
+ struct TORCH_API Scope : public c10::intrusive_ptr_target {
31
+ private:
32
+ ScopePtr parent_;
33
+ Symbol name_;
34
+ ScopePtr intrusive_from_this();
35
+
36
+ public:
37
+ Scope();
38
+
39
+ Scope(ScopePtr parent, Symbol name);
40
+
41
+ ScopePtr push(Symbol name);
42
+
43
+ ScopePtr parent();
44
+
45
+ bool isRoot() const;
46
+
47
+ bool isBlank() const;
48
+
49
+ ScopePtr getRoot();
50
+
51
+ size_t getDepth();
52
+
53
+ Symbol name() const;
54
+
55
+ std::string namesFromRoot(const std::string& separator = "/") const;
56
+ };
57
+
58
+ struct Function;
59
+ struct InlinedCallStack;
60
+
61
+ /**
62
+ * ModuleInstanceInfo is a structure to include the module type and instance
63
+ * name. It also provide public methods to get the pointer to module type and
64
+ * instance name.
65
+ *
66
+ * This structure is mainly used as a private member in InlinedCallStack, such
67
+ * that one can follow the callstack to find the relevant module hierarchy.
68
+ */
69
+ struct ModuleInstanceInfo {
70
+ private:
71
+ c10::ClassTypePtr module_type_{nullptr};
72
+ std::string instance_name_;
73
+
74
+ public:
75
+ ModuleInstanceInfo() = default;
76
+ ModuleInstanceInfo(c10::ClassTypePtr module_type, std::string instance_name);
77
+ c10::ClassTypePtr class_type() {
78
+ return module_type_;
79
+ }
80
+ c10::ClassTypePtr class_type() const {
81
+ return module_type_;
82
+ }
83
+ std::string instance_name() const {
84
+ return instance_name_;
85
+ }
86
+
87
+ bool operator==(const ModuleInstanceInfo& rhs) const {
88
+ return (class_type() == rhs.class_type()) &&
89
+ (instance_name() == rhs.instance_name());
90
+ }
91
+ };
92
+
93
+ /**
94
+ * InlinedCallStack is an element in a list representing callstack of functions
95
+ * that have been inlined.
96
+ *
97
+ * Each such element holds info about the current callsite (Function and
98
+ * SourceRange) and a pointer to the next element in the list. The last element
99
+ * in the list represents the innermost function that was inlined.
100
+ *
101
+ * For instance, if a node has a callstack
102
+ * [foo, source_range1] -> [bar, source_range2]
103
+ * it means that this node was originally from function 'bar' that was called
104
+ * at 'source_range2' in function 'foo' that was called in the current function
105
+ * at 'source_range1'.
106
+ *
107
+ * If a node did not come from any inlined function, its callstack will be
108
+ * empty.
109
+ *
110
+ * The callstack lists only grow, we never remove elements from them, which
111
+ * allows us to reuse same elements in different lists. For instance, if we
112
+ * inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham'
113
+ * and 'baz', the callstacks would look like:
114
+ *
115
+ * [baz, source_range3] --
116
+ * \
117
+ * --> [foo, source_range1] -> [bar, source_range2]
118
+ * /
119
+ * [ham, source_range4] --
120
+ */
121
+ using InlinedCallStackPtr = c10::intrusive_ptr<InlinedCallStack>;
122
+ using InlinedCallStackEntry =
123
+ std::tuple<Function*, SourceRange, c10::optional<ModuleInstanceInfo>>;
124
+
125
+ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target {
126
+ private:
127
+ c10::optional<InlinedCallStackPtr> callee_;
128
+ Function* fn_;
129
+ // Reason for fn_name_ even though we have fn_
130
+ // Serialized callstack is used in circustmances where InlinedCallstack
131
+ // cannot be constructed during runtime, e.g. mobile runtime or
132
+ // delegated backends.
133
+ // Since in those cases we do not have Function* we store function name
134
+ // fn_name does not give you access to the same information that Function*
135
+ // does, however in mobile/delegated backend runtime we use InlindedCallStack
136
+ // for exception stack and for that purpose fn_name_ suffices.
137
+ const std::string fn_name_;
138
+ SourceRange source_range_;
139
+ InlinedCallStackPtr intrusive_from_this();
140
+ c10::optional<ModuleInstanceInfo> module_instance_info_;
141
+
142
+ public:
143
+ // Constructor for a leaf callstack node.
144
+ InlinedCallStack(Function* fn, SourceRange source_range);
145
+
146
+ // Constructor for a leaf callstack node.
147
+ InlinedCallStack(
148
+ Function* fn,
149
+ SourceRange source_range,
150
+ c10::optional<ModuleInstanceInfo> module_instance_info);
151
+
152
+ // Constructor for a leaf callstack node.
153
+ InlinedCallStack(
154
+ Function* fn,
155
+ SourceRange source_range,
156
+ c10::optional<ModuleInstanceInfo> module_instance_info,
157
+ std::string& function_name);
158
+
159
+ // Constructor for an inner callstack node.
160
+ InlinedCallStack(
161
+ InlinedCallStackPtr callee,
162
+ Function* fn,
163
+ SourceRange source_range);
164
+
165
+ InlinedCallStack(
166
+ InlinedCallStackPtr callee,
167
+ Function* fn,
168
+ SourceRange source_range,
169
+ c10::optional<ModuleInstanceInfo> module_instance_info);
170
+
171
+ InlinedCallStack(
172
+ InlinedCallStackPtr callee,
173
+ Function* fn,
174
+ SourceRange source_range,
175
+ c10::optional<ModuleInstanceInfo> module_instance_info,
176
+ std::string& function_name);
177
+
178
+ // Return next element in the callstack list.
179
+ c10::optional<InlinedCallStackPtr> callee() const;
180
+
181
+ // Return module instance associated with the current element.
182
+ c10::optional<ModuleInstanceInfo> module_instance() const;
183
+
184
+ // Returns the source range of the node
185
+ SourceRange source_range() const;
186
+
187
+ Function* function() const;
188
+
189
+ const std::string& function_name() const;
190
+
191
+ // Return callstack as a vector of [Function, SourceRange] pairs.
192
+ std::vector<InlinedCallStackEntry> vec();
193
+
194
+ void setCallee(c10::optional<InlinedCallStackPtr>);
195
+
196
+ bool operator==(const InlinedCallStack& rhs) const {
197
+ // No need to compare fn_, since source_range equivalence check
198
+ // should suffice.
199
+ return (module_instance().has_value() ==
200
+ rhs.module_instance().has_value()) &&
201
+ (module_instance().has_value() &&
202
+ module_instance().value() == rhs.module_instance().value()) &&
203
+ callee() == rhs.callee() && source_range() == rhs.source_range();
204
+ }
205
+
206
+ bool operator!=(const InlinedCallStack& rhs) const {
207
+ return !(*this == rhs);
208
+ }
209
+ };
210
+
211
+ // {source range, node name, InlinedCallStack}
212
+ // We store node name because same debug infor will be used for
213
+ // profiling as well, so we need to know op names as well.
214
+ using DebugInfoTuple =
215
+ std::tuple<SourceRange, std::string, InlinedCallStackPtr>;
216
+ constexpr size_t kDebugInfoTupleSourceRangeIndex{0};
217
+ constexpr size_t kDebugInfoTupleNodeNameIndex{1};
218
+ constexpr size_t kDebugInfoTupleInlinedCSIndex{2};
219
+ } // namespace jit
220
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /**
12
+ * \brief A structure describing a match of a pattern in a graph.
13
+ *
14
+ * The structure contains an anchor node, from which the match was found, and
15
+ * match-maps for nodes and values. A match-map specifies the correspondance
16
+ * between nodes in the pattern graph (match-map keys) with nodes in the actual
17
+ * graph (match-map values). We keep such maps for both nodes and values.
18
+ */
19
+ struct Match {
20
+ Node* anchor;
21
+ std::unordered_map<const Node*, Node*> nodes_map;
22
+ std::unordered_map<const Value*, Value*> values_map;
23
+ };
24
+
25
+ /**
26
+ * \brief Find all matches of a \p PATTERN in a \p GRAPH.
27
+ *
28
+ * The function returns a vector of match-descriptors (see description of
29
+ * `struct Match`).
30
+ *
31
+ * Matching rules:
32
+ * - Pattern graph must contain a single block.
33
+ * - Matched subgraphs do not span across different blocks.
34
+ * - No uses outside the match are allowed, except for Param and Return nodes.
35
+ * Basically, we're matching hammocks, not arbitrary subgraphs.
36
+ * - The pattern graph must return only one value (i.e. it must have a single
37
+ * node leading to return).
38
+ * - Nodes that are not used in computation of the return value in the pattern
39
+ * graph are ignored during matching (IOW, we're essentially performing DCE on
40
+ * the pattern).
41
+ * - Pattern graph nodes cannot alias. TODO: the check not implemented yet.
42
+ * - Aliasing nodes in the graph cannot consitute a match (i.e. through all
43
+ * found matches, no nodes in the subgraph alias with each other). TODO: check
44
+ * not implemented yet.
45
+ * - The matcher will not mutate either the pattern graph or the matched graph.
46
+ * The matched graph is taken as non-const so that Match may contain non-const
47
+ * pointers. This enables clients of this API to use Match to drive mutations.
48
+ *
49
+ * Note [Multi-output Patterns]
50
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51
+ * Subgraph matcher provides limited support for multi-output patterns. With a
52
+ * single output pattern, a single scan through the graph is sufficient to
53
+ * find all the matches: given a starting node (an "anchor"), we can
54
+ * deterministically check whether a pattern matches a subgraph corresponding to
55
+ * this anchor node. For a general case of multi-output patterns, we would have
56
+ * N anchors, which would result in M^N comparisons (M is the size of the
57
+ * graph). Clearly this is computationally prohibitive.
58
+ *
59
+ * To overcome this, we impose some constraints on the multi-output patterns
60
+ * that we accept. We require that checking whether the pattern matches a
61
+ * subgraph would still be fully determined by a single node in the graph. To
62
+ * achieve this, we designate the first output in the pattern as the "main"
63
+ * output and assume that we can traverse up from this node to match the
64
+ * entire pattern.
65
+ *
66
+ * Corrolary 1: the order of outputs in the pattern matters!
67
+ * Corollary 2: patterns cannot contain any nodes not participating in the main
68
+ * output computation.
69
+ */
70
+ std::vector<Match> TORCH_API
71
+ findPatternMatches(const Graph& pattern, Graph& graph);
72
+
73
+ } // namespace jit
74
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct HashType {
10
+ size_t operator()(const TypePtr& type) const;
11
+ size_t operator()(const c10::ConstTypePtr& type) const;
12
+ };
13
+
14
+ struct EqualType {
15
+ bool operator()(const TypePtr& a, const TypePtr& b) const;
16
+ bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
17
+ };
18
+
19
+ } // namespace jit
20
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/operator_name.h>
7
+ #include <torch/csrc/jit/runtime/instruction.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ namespace mobile {
12
+
13
+ using Stack = std::vector<c10::IValue>;
14
+ using DebugHandle = int64_t;
15
+
16
+ class Function;
17
+
18
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
19
+ struct Code {
20
+ std::vector<Instruction> instructions_;
21
+ std::vector<DebugHandle> debug_handles_;
22
+ std::vector<c10::OperatorName> op_names_;
23
+ std::vector<int> operator_input_sizes_;
24
+ std::vector<std::function<void(Stack&)>> operators_;
25
+ std::vector<c10::IValue> constants_;
26
+ std::vector<c10::TypePtr> types_;
27
+ // TODO After we actually export CALL instructions we can remove this.
28
+ // We may need a two-stage importing scheme, where we firstly construct all
29
+ // function objects, and then append referenced function pointers. This could
30
+ // be done in parseMethods().
31
+ std::vector<mobile::Function*> functions_;
32
+ size_t register_size_ = 0; // Aggregated output size.
33
+ // initialized means operators_ array is filled with operators
34
+ bool initialized = false;
35
+ };
36
+
37
+ } // namespace mobile
38
+ } // namespace jit
39
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/flat_hash_map.h>
3
+ #include <caffe2/serialize/inline_container.h>
4
+ #include <torch/csrc/jit/api/compilation_unit.h>
5
+ #include <torch/csrc/jit/ir/scope.h>
6
+ #include <torch/csrc/jit/serialization/source_range_serialization.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ /*
11
+ * MobileDebugTable:
12
+ * Deserializes debug_pkl and callstack_map records from PT model's zip archive
13
+ * and stores them in a map of debug handles to DebugInfoPair. Debug handles are
14
+ * unique per model and runtime, be in lite interpreter or delegate, an
15
+ * exception of BackendRuntimeException should raised using debug handles.
16
+ * getSourceDebugString method is responsible for translating debug
17
+ * handles to correspond debug information.
18
+ * This debug informatin includes stack trace of model level source code and
19
+ * module hierarchy where the exception occurred.
20
+ */
21
+ class MobileDebugTable {
22
+ public:
23
+ MobileDebugTable() = default;
24
+ MobileDebugTable(
25
+ std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
26
+ const std::shared_ptr<CompilationUnit>& cu);
27
+
28
+ template <typename It>
29
+ MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
30
+
31
+ std::string getSourceDebugString(
32
+ const int64_t debug_handle,
33
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
34
+ std::string getSourceDebugString(
35
+ const std::vector<int64_t>& debug_handles,
36
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
37
+ std::string getModuleHierarchyInfo(
38
+ const int64_t debug_handle,
39
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
40
+ std::string getModuleHierarchyInfo(
41
+ const std::vector<int64_t>& debug_handles,
42
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
43
+
44
+ const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
45
+ const {
46
+ return callstack_ptr_map_;
47
+ }
48
+
49
+ private:
50
+ std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
51
+ const std::vector<int64_t>& debug_handles,
52
+ const std::string& top_module_type_name = "ModuleTypeUnknown") const;
53
+ ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
54
+ };
55
+
56
+ } // namespace jit
57
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cerrno>
5
+ #include <cstddef>
6
+ #include <cstring>
7
+ #include <fstream>
8
+ #include <istream>
9
+ #include <memory>
10
+
11
+ #include <c10/core/CPUAllocator.h>
12
+ #include <c10/core/impl/alloc_cpu.h>
13
+ #include <caffe2/serialize/read_adapter_interface.h>
14
+
15
+ #if defined(HAVE_MMAP)
16
+ #include <fcntl.h>
17
+ #include <sys/mman.h>
18
+ #include <sys/stat.h>
19
+ #include <sys/types.h>
20
+ #include <unistd.h>
21
+ #endif
22
+
23
+ /**
24
+ * @file
25
+ *
26
+ * Helpers for identifying file formats when reading serialized data.
27
+ *
28
+ * Note that these functions are declared inline because they will typically
29
+ * only be called from one or two locations per binary.
30
+ */
31
+
32
+ namespace torch {
33
+ namespace jit {
34
+
35
+ /**
36
+ * The format of a file or data stream.
37
+ */
38
+ enum class FileFormat {
39
+ UnknownFileFormat = 0,
40
+ FlatbufferFileFormat,
41
+ ZipFileFormat,
42
+ };
43
+
44
+ /// The size of the buffer to pass to #getFileFormat(), in bytes.
45
+ constexpr size_t kFileFormatHeaderSize = 8;
46
+ constexpr size_t kMaxAlignment = 16;
47
+
48
+ /**
49
+ * Returns the likely file format based on the magic header bytes in @p header,
50
+ * which should contain the first bytes of a file or data stream.
51
+ */
52
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
53
+ static inline FileFormat getFileFormat(const char* data) {
54
+ // The size of magic strings to look for in the buffer.
55
+ static constexpr size_t kMagicSize = 4;
56
+
57
+ // Bytes 4..7 of a Flatbuffer-encoded file produced by
58
+ // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
59
+ // actual Flatbuffer data.)
60
+ static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
61
+ 'P', 'T', 'M', 'F'};
62
+ static constexpr size_t kFlatbufferMagicOffset = 4;
63
+
64
+ // The first four bytes of a ZIP file.
65
+ static constexpr std::array<char, kMagicSize> kZipMagicString = {
66
+ 'P', 'K', '\x03', '\x04'};
67
+
68
+ // Note that we check for Flatbuffer magic first. Since the first four bytes
69
+ // of flatbuffer data contain an offset to the root struct, it's theoretically
70
+ // possible to construct a file whose offset looks like the ZIP magic. On the
71
+ // other hand, bytes 4-7 of ZIP files are constrained to a small set of values
72
+ // that do not typically cross into the printable ASCII range, so a ZIP file
73
+ // should never have a header that looks like a Flatbuffer file.
74
+ if (std::memcmp(
75
+ data + kFlatbufferMagicOffset,
76
+ kFlatbufferMagicString.data(),
77
+ kMagicSize) == 0) {
78
+ // Magic header for a binary file containing a Flatbuffer-serialized mobile
79
+ // Module.
80
+ return FileFormat::FlatbufferFileFormat;
81
+ } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
82
+ // Magic header for a zip file, which we use to store pickled sub-files.
83
+ return FileFormat::ZipFileFormat;
84
+ }
85
+ return FileFormat::UnknownFileFormat;
86
+ }
87
+
88
+ /**
89
+ * Returns the likely file format based on the magic header bytes of @p data.
90
+ * If the stream position changes while inspecting the data, this function will
91
+ * restore the stream position to its original offset before returning.
92
+ */
93
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
94
+ static inline FileFormat getFileFormat(std::istream& data) {
95
+ FileFormat format = FileFormat::UnknownFileFormat;
96
+ std::streampos orig_pos = data.tellg();
97
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
98
+ std::array<char, kFileFormatHeaderSize> header;
99
+ data.read(header.data(), header.size());
100
+ if (data.good()) {
101
+ format = getFileFormat(header.data());
102
+ }
103
+ data.seekg(orig_pos, data.beg);
104
+ return format;
105
+ }
106
+
107
+ /**
108
+ * Returns the likely file format based on the magic header bytes of the file
109
+ * named @p filename.
110
+ */
111
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
112
+ static inline FileFormat getFileFormat(const std::string& filename) {
113
+ std::ifstream data(filename, std::ifstream::binary);
114
+ return getFileFormat(data);
115
+ }
116
+
117
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
118
+ static void file_not_found_error() {
119
+ std::stringstream message;
120
+ message << "Error while opening file: ";
121
+ if (errno == ENOENT) {
122
+ message << "no such file or directory" << std::endl;
123
+ } else {
124
+ message << "error no is: " << errno << std::endl;
125
+ }
126
+ TORCH_CHECK(false, message.str());
127
+ }
128
+
129
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
130
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
131
+ const char* filename) {
132
+ #if defined(HAVE_MMAP)
133
+ int fd = open(filename, O_RDONLY);
134
+ if (fd < 0) {
135
+ // failed to open file, chances are it's no such file or directory.
136
+ file_not_found_error();
137
+ }
138
+ struct stat statbuf {};
139
+ fstat(fd, &statbuf);
140
+ size_t size = statbuf.st_size;
141
+ void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
142
+ close(fd);
143
+ auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
144
+ std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
145
+ #else
146
+ FILE* f = fopen(filename, "rb");
147
+ if (f == nullptr) {
148
+ file_not_found_error();
149
+ }
150
+ fseek(f, 0, SEEK_END);
151
+ size_t size = ftell(f);
152
+ fseek(f, 0, SEEK_SET);
153
+ // make sure buffer size is multiple of alignment
154
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
155
+ std::shared_ptr<char> data(
156
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
157
+ fread(data.get(), size, 1, f);
158
+ fclose(f);
159
+ #endif
160
+ return std::make_tuple(data, size);
161
+ }
162
+
163
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
164
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
165
+ std::istream& in) {
166
+ // get size of the stream and reset to orig
167
+ std::streampos orig_pos = in.tellg();
168
+ in.seekg(orig_pos, std::ios::end);
169
+ const long size = in.tellg();
170
+ in.seekg(orig_pos, in.beg);
171
+
172
+ // read stream
173
+ // NOLINT make sure buffer size is multiple of alignment
174
+ size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
175
+ std::shared_ptr<char> data(
176
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
177
+ in.read(data.get(), size);
178
+
179
+ // reset stream to original position
180
+ in.seekg(orig_pos, in.beg);
181
+ return std::make_tuple(data, size);
182
+ }
183
+
184
+ // NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
185
+ static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
186
+ caffe2::serialize::ReadAdapterInterface* rai) {
187
+ size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
188
+ std::shared_ptr<char> data(
189
+ static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
190
+ rai->read(
191
+ 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
192
+ return std::make_tuple(data, buffer_size);
193
+ }
194
+
195
+ } // namespace jit
196
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <istream>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <unordered_map>
7
+ #include <vector>
8
+
9
+ #include <ATen/core/ivalue.h>
10
+ #include <c10/core/Device.h>
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/jit/mobile/module.h>
14
+
15
+ /**
16
+ * Defines the public API for loading flatbuffer-serialized mobile modules.
17
+ * Note that this header must not include or depend on flatbuffer-defined
18
+ * types, to avoid leaking those details to PyTorch clients.
19
+ */
20
+
21
+ namespace torch {
22
+ namespace jit {
23
+
24
+ /// All non-copied data pointers provided to `parse_and_initialize_*` functions
25
+ /// must be aligned to this boundary. Since the Module will point directly into
26
+ /// the data, this alignment is necessary to ensure that certain types/structs
27
+ /// are properly aligned.
28
+ constexpr size_t kFlatbufferDataAlignmentBytes = 16;
29
+
30
+ /// Maps file names to file contents.
31
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
32
+
33
+ // On high level, to produce a Module from a file on disk, we need to go
34
+ // through the follow steps:
35
+ // 1. Read: Read the file from disk -> memory
36
+ // 2. Deserialize: Parse the bytes to produce some in memory manipulable
37
+ // structure
38
+ // 3. Module initialization: Produce mobile::Module out of the structure
39
+ // produced in 2.
40
+ // Under this context, the structure described in 2. is the flatbuffer-defined
41
+ // type mobile::serialization::Module. However, this step/type is not visible in
42
+ // the public API.
43
+
44
+ // Parse a mobile::Module from raw bytes.
45
+ //
46
+ // This function does steps 2+3 described above.
47
+ //
48
+ // Does not take ownership of `data`; if you want it to take ownership, see the
49
+ // shared_ptr overload of this function.
50
+ //
51
+ // If should_copy_tensor_memory is true, then the returned module will NOT have
52
+ // refences to `data`, so `data` can be freed immediately.
53
+ //
54
+ // If should_copy_tensor_memory is false, then returned module will have tensors
55
+ // that points inside of `data`; the caller will need to make sure that `data`
56
+ // outlives the returned Module. Also, `data` must be aligned to
57
+ // kFlatbufferDataAlignmentBytes.
58
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
59
+ void* data,
60
+ size_t size, // of `data`, in bytes.
61
+ c10::optional<at::Device> device = c10::nullopt,
62
+ ExtraFilesMap* extra_files = nullptr,
63
+ bool should_copy_tensor_memory = false);
64
+
65
+ // Parse a mobile::Module from raw bytes.
66
+ //
67
+ // This function does steps 2+3 described above.
68
+ //
69
+ // The returned Module holds a reference to `data`, which must be aligned to
70
+ // kFlatbufferDataAlignmentBytes.
71
+ //
72
+ // If you do not want the Module to hold a reference to `data`, see the raw
73
+ // pointer overload of this function.
74
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
75
+ std::shared_ptr<char> data,
76
+ size_t size, // of `data`, in bytes.
77
+ c10::optional<at::Device> device = c10::nullopt,
78
+ ExtraFilesMap* extra_files = nullptr);
79
+
80
+ // Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
81
+ //
82
+ // This is the same as parse_and_initialize_mobile_module() except that it also
83
+ // extracts JIT source files and constants. Can be used to construct a
84
+ // jit::Module.
85
+ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
86
+ void* data,
87
+ size_t size, // of `data`, in bytes.
88
+ ExtraFilesMap& jit_sources,
89
+ std::vector<IValue>& jit_constants,
90
+ c10::optional<at::Device> device = c10::nullopt,
91
+ ExtraFilesMap* extra_files = nullptr);
92
+
93
+ // Load a mobile::Module from a filepath.
94
+ //
95
+ // This function does steps 1+2+3 described above.
96
+ //
97
+ // We need to have this as a convienience because Python API will need to wrap
98
+ // this. C++ clients should use one of the versions of
99
+ // parse_and_initialize_mobile_module() so they can manage the raw data more
100
+ // directly.
101
+ TORCH_API mobile::Module load_mobile_module_from_file(
102
+ const std::string& filename,
103
+ c10::optional<at::Device> device = c10::nullopt,
104
+ ExtraFilesMap* extra_files = nullptr);
105
+
106
+ TORCH_API uint64_t get_bytecode_version(std::istream& in);
107
+ TORCH_API uint64_t get_bytecode_version(const std::string& filename);
108
+ TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
109
+
110
+ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
111
+ char* flatbuffer_content);
112
+
113
+ // The methods below are less efficient because it need to read the stream in
114
+ // its entirity to a buffer
115
+ TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
116
+ std::istream& in,
117
+ c10::optional<at::Device> device = c10::nullopt,
118
+ ExtraFilesMap* extra_files = nullptr);
119
+
120
+ TORCH_API mobile::Module parse_flatbuffer_no_object(
121
+ std::shared_ptr<char> data,
122
+ size_t size,
123
+ c10::optional<at::Device> device);
124
+
125
+ TORCH_API mobile::Module parse_and_initialize_mobile_module(
126
+ void* data,
127
+ size_t,
128
+ c10::optional<at::Device>,
129
+ ExtraFilesMap* extra_files,
130
+ bool should_copy_tensor_memory);
131
+
132
+ // no op, TODO(qihan) delete
133
+ TORCH_API bool register_flatbuffer_loader();
134
+
135
+ } // namespace jit
136
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/code.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ class Frame {
13
+ public:
14
+ explicit Frame(const Code& code) : code_(code) {}
15
+ const Code& getCode() const {
16
+ return code_;
17
+ }
18
+
19
+ void step() {
20
+ pc_++;
21
+ }
22
+
23
+ void jump(size_t n) {
24
+ pc_ += n;
25
+ }
26
+
27
+ size_t getPC() const {
28
+ return pc_;
29
+ }
30
+
31
+ const Instruction& getInstruction() const {
32
+ return code_.instructions_.at(pc_);
33
+ }
34
+
35
+ c10::optional<int64_t> getDebugHandle() const {
36
+ return getDebugHandle(pc_);
37
+ }
38
+
39
+ c10::optional<int64_t> getDebugHandle(size_t pc) const {
40
+ if (pc >= code_.debug_handles_.size()) {
41
+ return {};
42
+ }
43
+ return code_.debug_handles_[pc];
44
+ }
45
+
46
+ private:
47
+ const Code& code_;
48
+ size_t pc_{0};
49
+ };
50
+
51
+ } // namespace mobile
52
+ } // namespace jit
53
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <ATen/core/function.h>
6
+ #include <ATen/core/function_schema.h>
7
+ #include <ATen/core/ivalue.h>
8
+ #include <torch/csrc/jit/mobile/code.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ enum OpCode : uint8_t;
13
+ struct Instruction;
14
+ struct OperatorString;
15
+
16
+ namespace mobile {
17
+
18
+ class TORCH_API Function : public torch::jit::Function {
19
+ public:
20
+ explicit Function(c10::QualifiedName name);
21
+ Function(
22
+ c10::QualifiedName name,
23
+ Code code,
24
+ at::optional<c10::FunctionSchema> schema);
25
+ void run(Stack& stack) override;
26
+ at::IValue operator()(Stack& stack);
27
+ void ensure_defined() override {}
28
+ size_t num_inputs() const override;
29
+ const c10::QualifiedName& qualname() const override;
30
+ bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override;
31
+
32
+ // NOTE: the APIs below is dangerous: if you call append_instruction with
33
+ // dbg_handle and then call it without; then the dbg_handle will become
34
+ // misaligned. Therefore only use ONE variant at time.
35
+ void append_instruction(OpCode op, int X, int N, int64_t dbg_handle);
36
+ void append_instruction(OpCode op, int X, int N);
37
+ void append_operator(
38
+ const std::string& name,
39
+ const std::string& overload_name,
40
+ const c10::optional<int>& num_specified_args);
41
+ void append_constant(const c10::IValue& constant);
42
+ void append_type(const c10::TypePtr& type);
43
+ void append_function(mobile::Function& func);
44
+
45
+ void set_register_size(size_t size);
46
+
47
+ int64_t get_debug_handle(size_t pc) const;
48
+ const Code& get_code() const;
49
+ Code& get_code();
50
+
51
+ torch::jit::Function& setSchema(c10::FunctionSchema schema) override;
52
+ bool hasSchema() const;
53
+ const c10::FunctionSchema& getSchema() const override;
54
+
55
+ // Returns the debug handle corresponding to where the execution
56
+ // is halted due to exception.
57
+ // If no corresponding debug handle is found then -1 is returned.
58
+ const std::vector<int64_t>& getExceptionDebugHandles() const;
59
+ static Function& registerFunc(
60
+ const std::string& qualified_name,
61
+ const std::vector<Instruction>& instructions,
62
+ const std::vector<c10::IValue>& constants,
63
+ const std::vector<c10::TypePtr>& types,
64
+ const size_t register_size);
65
+
66
+ // if not initialize, initialize by loading operators.
67
+ // return true of all op loaded, return false if some op is not found
68
+ // in the current runtime. Then, the ops that did not found will be filled
69
+ // in unsupported_op_names
70
+ bool initialize_operators(bool should_check_operators);
71
+
72
+ private:
73
+ c10::QualifiedName name_;
74
+ Code code_;
75
+ at::optional<c10::FunctionSchema> schema_; // (byte-code version 4+)
76
+ };
77
+
78
+ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
79
+ c10::OperatorName opname,
80
+ c10::optional<int> num_specified_args);
81
+
82
+ TORCH_API std::string operator_str(const c10::OperatorName& opname);
83
+
84
+ } // namespace mobile
85
+ } // namespace jit
86
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/mobile/module.h>
3
+ #include <torch/csrc/jit/mobile/parse_operators.h>
4
+
5
+ #include <istream>
6
+ #include <memory>
7
+
8
+ #include <caffe2/serialize/file_adapter.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ using caffe2::serialize::FileAdapter;
13
+ using caffe2::serialize::IStreamAdapter;
14
+ using caffe2::serialize::ReadAdapterInterface;
15
+ using ExtraFilesMap = std::unordered_map<std::string, std::string>;
16
+
17
+ constexpr const char* kArchiveNameBytecode = "bytecode";
18
+ constexpr const char* kArchiveNameConstants = "constants";
19
+ constexpr const char* kArchiveNameVersion = "version";
20
+
21
+ // The family of methods below load a serialized Mobile Module
22
+ // into a mobile::Module object.
23
+ TORCH_API mobile::Module _load_for_mobile(
24
+ std::istream& in,
25
+ c10::optional<at::Device> device,
26
+ ExtraFilesMap& extra_file,
27
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
28
+
29
+ TORCH_API mobile::Module _load_for_mobile(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device,
32
+ ExtraFilesMap& extra_files);
33
+
34
+ TORCH_API mobile::Module _load_for_mobile(
35
+ std::unique_ptr<ReadAdapterInterface> rai,
36
+ c10::optional<c10::Device> device,
37
+ ExtraFilesMap& extra_files,
38
+ uint64_t module_load_options = kDefaultMobileLoadOptions);
39
+
40
+ TORCH_API mobile::Module _load_for_mobile(
41
+ const std::string& filename,
42
+ c10::optional<at::Device> device,
43
+ ExtraFilesMap& extra_files,
44
+ uint64_t module_load_options);
45
+
46
+ TORCH_API mobile::Module _load_for_mobile(
47
+ std::istream& in,
48
+ c10::optional<at::Device> device = c10::nullopt);
49
+
50
+ TORCH_API mobile::Module _load_for_mobile(
51
+ const std::string& filename,
52
+ c10::optional<at::Device> device = c10::nullopt);
53
+
54
+ TORCH_API mobile::Module _load_for_mobile(
55
+ std::unique_ptr<ReadAdapterInterface> rai,
56
+ c10::optional<c10::Device> device = c10::nullopt);
57
+
58
+ /**
59
+ * Load only the contents of the "extra/" files whose names are
60
+ * passed in the map (extra_files). Populate the corresponding values
61
+ * with the contents of those files. Do not attempt to load the entire
62
+ * model, and stop once the extra files have been extracted.
63
+ *
64
+ * This API is needed to be able to load GPU models on linux CPU
65
+ * machines and extract only the extra files so that we can inspect
66
+ * the metadata that was added to the .ptl archive when it was
67
+ * generated.
68
+ *
69
+ */
70
+ void _load_extra_only_for_mobile(
71
+ const std::string& filename,
72
+ c10::optional<at::Device> device,
73
+ ExtraFilesMap& extra_files);
74
+
75
+ // Currently used by both mobile/import.cpp and model_compatibility.cpp.
76
+ // Should be removed after model_compatibility.cpp start using simplified
77
+ // version type_resolver and obj_loader.
78
+ at::TypePtr resolveTypeNameMobile(
79
+ const c10::QualifiedName& qn,
80
+ std::shared_ptr<CompilationUnit> compilation_unit);
81
+ c10::StrongTypePtr typeResolverMobile(
82
+ const c10::QualifiedName& qn,
83
+ const std::shared_ptr<CompilationUnit>& compilation_unit);
84
+ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
85
+ const at::StrongTypePtr& type,
86
+ const at::IValue& input,
87
+ mobile::CompilationUnit& mobile_compilation_unit);
88
+
89
+ // Given a reader, which has access to a model file,
90
+ // return true if there exists tensors in `bytecode` archive
91
+ bool isTensorInBytecodeArchive(
92
+ caffe2::serialize::PyTorchStreamReader& stream_reader);
93
+
94
+ namespace mobile {
95
+
96
+ /**
97
+ * Given a torch::jit::mobile::Module, return a set of operator names
98
+ * (with overload name) that are used by any method in this mobile
99
+ * Mobile. This method runs through the bytecode for all methods
100
+ * in the specified model (module), and extracts all the root
101
+ * operator names. Root operators are operators that are called
102
+ * directly by the model (as opposed to non-root operators, which
103
+ * may be called transitively by the root operators).
104
+ *
105
+ */
106
+ TORCH_API std::set<std::string> _export_operator_list(
107
+ torch::jit::mobile::Module& module);
108
+
109
+ } // namespace mobile
110
+
111
+ } // namespace jit
112
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <torch/csrc/jit/mobile/module.h>
7
+
8
+ #include <istream>
9
+ #include <map>
10
+ #include <string>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ /**
16
+ * Loads named parameters from the serialized data in @p in.
17
+ *
18
+ * Calls #TORCH_CHECK() if the data format is not recognized.
19
+ */
20
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
21
+ std::istream& in,
22
+ c10::optional<at::Device> device = c10::nullopt);
23
+
24
+ /**
25
+ * Loads named parameters from the serialized data in @p filename.
26
+ *
27
+ * Calls #TORCH_CHECK() if the data format is not recognized.
28
+ */
29
+ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
30
+ const std::string& filename,
31
+ c10::optional<at::Device> device = c10::nullopt);
32
+
33
+ // NOTE: Please prefer using _load_parameters over using the function below.
34
+ TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
35
+ const mobile::Module& module);
36
+
37
+ } // namespace jit
38
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * @file
5
+ * Declarations shared between import_data.cpp and export_data.cpp
6
+ */
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace mobile {
11
+
12
+ namespace internal {
13
+ /**
14
+ * The name of the mobile::Module attribute which contains saved parameters, as
15
+ * a Dict of names to Tensors. Only used for Flatbuffer serialization.
16
+ */
17
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
18
+ constexpr char kSavedParametersAttributeName[] = "data";
19
+ } // namespace internal
20
+
21
+ } // namespace mobile
22
+ } // namespace jit
23
+ } // namespace torch