applied-ai-018 commited on
Commit
db4ff34
·
verified ·
1 Parent(s): 71a0112

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torchgen/api/autograd.py +853 -0
  7. env-llmeval/lib/python3.10/site-packages/torchgen/api/structured.py +158 -0
  8. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__init__.py +3 -0
  9. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/signatures.py +424 -0
  14. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/types.py +190 -0
  15. env-llmeval/lib/python3.10/site-packages/torchgen/api/types/types_base.py +270 -0
  16. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py +2 -0
  22. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py +73 -0
  26. env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py +81 -0
  27. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py +392 -0
  32. env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py +7 -0
  33. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml +61 -0
  35. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/ATenOpList.cpp +36 -0
  36. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp +73 -0
  37. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h +23 -0
  38. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions.h +29 -0
  39. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h +22 -0
  40. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.cpp +13 -0
  41. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Function.h +26 -0
  42. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h +16 -0
  43. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.cpp +103 -0
  44. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h +143 -0
  45. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h +11 -0
  46. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunctions.h +33 -0
  47. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h +23 -0
  48. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h +19 -0
  49. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operator.h +18 -0
  50. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp +19 -0
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/gen_lazy_tensor.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/__pycache__/utils.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc ADDED
Binary file (3.19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/autograd.py ADDED
@@ -0,0 +1,853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from dataclasses import dataclass
3
+ from typing import cast, Dict, List, Match, Optional, Sequence, Set, Tuple
4
+
5
+ from torchgen import local
6
+
7
+ from torchgen.api import cpp
8
+ from torchgen.api.types import BaseCType, Binding, NamedCType, tensorListT
9
+ from torchgen.model import (
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ ListType,
14
+ NativeFunction,
15
+ NativeFunctionsViewGroup,
16
+ SchemaKind,
17
+ Type,
18
+ )
19
+ from torchgen.utils import IDENT_REGEX
20
+
21
+
22
+ # Represents a saved attribute involved in backward calculation.
23
+ # Note that it can be a derived property of an input argument, e.g.:
24
+ # we could save `other.scalar_type()` instead of the entire `other` tensor.
25
+ @dataclass(frozen=True)
26
+ class SavedAttribute:
27
+ # The NamedCType holds the updated name and cpp type of the attribute
28
+ # for the name, Suffix is appended if it's derived property, e.g.: `other_scalar_type`
29
+ nctype: NamedCType
30
+
31
+ # The expression to read the derived property at save time, e.g.:
32
+ # `other.scalar_type()`.
33
+ expr: str
34
+
35
+
36
+ # Represents a backward formula that calculates derivatives for one
37
+ # or more tensors.
38
+ @dataclass(frozen=True)
39
+ class Derivative:
40
+ # The formula string (legit C++ expression).
41
+ # Note that expressions against input arguments have been replaced with the
42
+ # corresponding saved attributes.
43
+ # E.g.:
44
+ # raw formula: `mul_tensor_backward(grad, self, other.scalar_type())`
45
+ # here: `mul_tensor_backward(grad, self, other_scalar_type)`
46
+ formula: str
47
+
48
+ # The formula string before input argument replacement
49
+ original_formula: str
50
+
51
+ # Names of the arguments for which this formula calculates derivatives.
52
+ var_names: Tuple[str, ...]
53
+
54
+ # Saved inputs that are referenced by the formula.
55
+ saved_inputs: Tuple[SavedAttribute, ...]
56
+
57
+ # Saved outputs that are referenced by the formula.
58
+ saved_outputs: Tuple[SavedAttribute, ...]
59
+
60
+ # Gradients that are referenced by name in the formula.
61
+ named_gradients: Set[str]
62
+
63
+
64
+ # Represents a forward formula that calculates forward derivatives
65
+ # for one tensor.
66
+ @dataclass(frozen=True)
67
+ class ForwardDerivative:
68
+ # The formula string (legit C++ expression).
69
+ # Note that special keywords such as "linear" or "element_wise" have been
70
+ # replaced by the automatically generated formula.
71
+ formula: str
72
+
73
+ # Name of the output arguments for which this formula calculates forward
74
+ # derivatives
75
+ var_names: Tuple[str, ...]
76
+
77
+ # Type of the output arguments for which this formula calculates forward
78
+ # derivatives
79
+ var_types: Tuple[Type, ...]
80
+
81
+ # Inputs for which the forward derivatives are required for this formula
82
+ required_inputs_fw_grad: Optional[Tuple[str, ...]]
83
+
84
+ # Inputs for which the primal is required for this formula
85
+ required_inputs_primal: Optional[Tuple[str, ...]]
86
+
87
+ # Flag to specify if this formula requires the original value of self
88
+ # This is only used by inplace operations
89
+ required_original_self_value: bool
90
+
91
+ # If this formula is specified in derivatives.yaml or if we are re-using the
92
+ # out of place formula for inplace
93
+ is_reusing_outplace_formula: bool
94
+
95
+
96
+ # Represents differentiability info for a NativeFunction.
97
+ @dataclass(frozen=True)
98
+ class DifferentiabilityInfo:
99
+ # The base name read from derivatives.yaml.
100
+ name: str
101
+
102
+ # The matching native function.
103
+ #
104
+ # There can be multiple NativeFunction having the same base name:
105
+ # - different overloads with different types of input arguments;
106
+ # - in-place/out/functional variants of the same function;
107
+ #
108
+ # We first use the schema string (under the 'name' key) in derivatives.yaml
109
+ # to find the NativeFunction having the same schema string.
110
+ # Then we find the in-place/out/functional variants of the matching function.
111
+ # Among these variants, we choose the one having the same name as the
112
+ # derivatives.yaml entry. If there is no exact match, then we choose the
113
+ # in-place variant.
114
+ # TODO: maybe the logic to search for all variants is no longer necessary?
115
+ func: NativeFunction
116
+
117
+ # The name of the generated autograd function.
118
+ # It's set only if we will calculate a derivative, i.e.
119
+ # 'args_with_derivatives' is not empty.
120
+ op: Optional[str]
121
+
122
+ # The derivatives formulae for this function.
123
+ # Note that the length of this sequence is the number of differentiable inputs
124
+ derivatives: Sequence[Derivative]
125
+
126
+ # The forward derivatives formulae for this function.
127
+ # Note that the length of this sequence is the number of differentiable outputs
128
+ forward_derivatives: Sequence[ForwardDerivative]
129
+
130
+ # The union of 'saved_inputs' of all 'derivatives'.
131
+ all_saved_inputs: Sequence[SavedAttribute]
132
+
133
+ # The union of 'saved_outputs' of all 'derivatives'.
134
+ all_saved_outputs: Sequence[SavedAttribute]
135
+
136
+ # All named gradients that are available for use, in the same
137
+ # order as in the grads vector.
138
+ available_named_gradients: Sequence[str]
139
+
140
+ # The named gradients that are used in any of the derivatives.
141
+ # Invariant: all(name in available_named_gradients for name in used_named_gradients)
142
+ used_named_gradients: Set[str]
143
+
144
+ # The function's input arguments for which it calculates derivatives.
145
+ # It's the union of 'var_names' of all 'derivatives', sorted by the
146
+ # argument order in the function schema.
147
+ args_with_derivatives: Sequence[Binding]
148
+
149
+ # Names of arguments whose derivative formula is 'non_differentiable'.
150
+ non_differentiable_arg_names: Sequence[str]
151
+
152
+ # Raw data read from derivatives.yaml.
153
+ output_differentiability: Optional[List[bool]]
154
+
155
+ # output_differentiability in derivatives.yaml can be a list of
156
+ # conditions that express if the output is differentiable. In this case,
157
+ # the number of conditions must match the number of outputs
158
+ # (NB: we only support one condition right now).
159
+ # output_differentiability gets populated with True for each condition,
160
+ # while output_differentiability_conditions gets populated with the conditions
161
+ output_differentiability_conditions: Optional[List[str]]
162
+
163
+ @property
164
+ def has_derivatives(self) -> bool:
165
+ return len(self.args_with_derivatives) > 0
166
+
167
+ # Generates a new DifferentiabilityInfo using the exact same set of derivative information,
168
+ # but with a new operator name.
169
+ # This is used when generating "copy" variants of view ops,
170
+ # which are able to use the exact same derivative formula as the original view op
171
+ # See Note [Codegen'd {view}_copy Operators]
172
+ def create_view_copy_from_view_derivative(
173
+ self, g: NativeFunctionsViewGroup
174
+ ) -> Optional["DifferentiabilityInfo"]:
175
+ if g.view_copy is None:
176
+ return None
177
+ f = g.view_copy
178
+
179
+ name_split_by_period = self.name.split(".", maxsplit=2)
180
+ # Append a "_copy" to the base name of the operator (but keep the overload name the same)
181
+ view_copy_name = f"{name_split_by_period[0]}_copy." + ".".join(
182
+ name_split_by_period[1:]
183
+ )
184
+ view_copy_op_name = None if self.op is None else f"{self.op}_copy"
185
+
186
+ return DifferentiabilityInfo(
187
+ # Use the "_copy" version of name/func/op
188
+ name=view_copy_name,
189
+ func=f,
190
+ op=view_copy_op_name,
191
+ # But keep all derivative info the same
192
+ derivatives=self.derivatives,
193
+ forward_derivatives=self.forward_derivatives,
194
+ all_saved_inputs=self.all_saved_inputs,
195
+ all_saved_outputs=self.all_saved_outputs,
196
+ available_named_gradients=self.available_named_gradients,
197
+ used_named_gradients=self.used_named_gradients,
198
+ args_with_derivatives=self.args_with_derivatives,
199
+ non_differentiable_arg_names=self.non_differentiable_arg_names,
200
+ output_differentiability=self.output_differentiability,
201
+ output_differentiability_conditions=self.output_differentiability_conditions,
202
+ )
203
+
204
+
205
+ def uses_ident(info: Optional[DifferentiabilityInfo], ident: str) -> bool:
206
+ if info is None:
207
+ return False
208
+ for derivative in info.derivatives:
209
+ formula = derivative.formula
210
+ if re.search(IDENT_REGEX.format(ident), formula):
211
+ return True
212
+ return False
213
+
214
+
215
+ def uses_retain_variables(info: Optional[DifferentiabilityInfo]) -> bool:
216
+ return uses_ident(info, "retain_variables")
217
+
218
+
219
+ def uses_single_grad(info: Optional[DifferentiabilityInfo]) -> bool:
220
+ return uses_ident(info, "grad")
221
+
222
+
223
+ # Represents a differentiable `Argument`.
224
+ # How is it different from the `Argument` type?
225
+ # - It's processed Arguments which are differentiable and only used in the
226
+ # context of the autograd codegen;
227
+ # - It can represent SelfArgument or regular Argument but not TensorOptionsArgument;
228
+ @dataclass(frozen=True)
229
+ class DifferentiableInput:
230
+ name: str
231
+ type: Type
232
+
233
+ # TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
234
+ cpp_type: str
235
+
236
+
237
+ # Represents a differentiable `Return`.
238
+ # How it it different from the `Return` type?
239
+ # - The name in `Return` is optional. Here it is always populated using the same
240
+ # `cpp.return_names()` method.
241
+ # TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
242
+ # - It's processed Returns which are differentiable, in compliance with the
243
+ # `output_differentiability` field defined in derivatives.yaml (if specified),
244
+ # and are only used in the context of the autograd codegen;
245
+ @dataclass(frozen=True)
246
+ class DifferentiableOutput:
247
+ name: str
248
+ type: Type
249
+
250
+ # TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
251
+ cpp_type: str
252
+
253
+
254
+ @dataclass(frozen=True)
255
+ class NativeFunctionWithDifferentiabilityInfo:
256
+ func: NativeFunction
257
+ info: Optional[Dict[str, DifferentiabilityInfo]]
258
+ fw_derivatives: Optional[Dict[str, Sequence[ForwardDerivative]]]
259
+
260
+
261
+ # TODO: Update comment below since it is out of date.
262
+ def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
263
+ """How are we going to call the underlying implementation of a
264
+ declaration? There are two strategies:
265
+ - use_derived: we want to call the implementation on CPUDoubleType
266
+ (or a similar, derived Type instance). Because these derived
267
+ instances deal in Tensors, not Variables (it's a completely different
268
+ object, so it doesn't dispatch back to VariableType), code on
269
+ this dispatch path needs to wrap/unwrap tensors. If the
270
+ derived implementation takes and returns tensors, the
271
+ implementation is usually differentiable (although we also use
272
+ the derived dispatch path for non-differentiable functions
273
+ that we still want to dispatch on the derived Type instance;
274
+ e.g., size())
275
+ - use_type: we want to call the implementation on Type, because
276
+ it is implemented concretely, and the functions it invokes will
277
+ get dispatched back to VariableType (which will ensure that they
278
+ are differentiable.)
279
+ """
280
+ # fn is derived as long as any of its per-key differentiability infos
281
+ # has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
282
+ # and ADInplaceOrViewType. We want to generate these functions as long as a
283
+ # derivative is defined for ANY dispatch key.
284
+ if fn.func.is_abstract or (
285
+ fn.info is not None and any(info.has_derivatives for info in fn.info.values())
286
+ ):
287
+ # If the function is abstract (not implemented on at::Type), we must
288
+ # call the implementation on the derived type with unpacked tensors.
289
+
290
+ # If the function has a derivative specified and is concrete, we could
291
+ # call either implementation. We prefer the calling the derived
292
+ # type's implementation with unpacked tensors because it is more
293
+ # performant in some cases: any internal calls to other ATen functions
294
+ # won't have the history tracked.
295
+
296
+ # If the function has a type dispatched argument (i.e. is a factory),
297
+ # we prefer calling the derived type's implementation both because it is
298
+ # more performant and to ensure factory functions return tensors with _version
299
+ # of 0 (probably not strictly necessary, but nice to have to keeps versions simple
300
+ # to understand.
301
+
302
+ return "use_derived"
303
+ else:
304
+ # If the function is concrete (we don't have to override it) and we
305
+ # didn't declare it in derivatives.yaml, we'll assume that it is
306
+ # actually implemented out of differentiable functions. (This
307
+ # assumption might not hold, but then you'll see gradcheck fail.)
308
+ return "use_type"
309
+
310
+
311
+ def is_foreach_func(f: NativeFunction) -> bool:
312
+ return f.func.name.name.base.startswith("_foreach_")
313
+
314
+
315
+ # note(crcrpar): Most foreach functions can reference an out-place `torch` function whose schema kind
316
+ # is functional for their backward derivatives (and forward derivatives in the future), i.e.,
317
+ # they would find such one in `functional_info_by_signature`. There however are some exceptions:
318
+ _foreach_with_inplace_ref = {"_foreach_zero_"}
319
+ _foreach_with_tensor_overload = {
320
+ "_foreach_add.Tensor",
321
+ "_foreach_mul.Tensor",
322
+ "_foreach_div.Tensor",
323
+ }
324
+
325
+
326
+ # Checks if `function_schema` is a native, non-foreach function which `f`, a foreach function
327
+ # reference to generate derivatives.
328
+ def is_reference_for_foreach(
329
+ f: NativeFunction,
330
+ function_schema: FunctionSchema,
331
+ ) -> bool:
332
+ return (
333
+ f.func.name.name.base.split("_foreach_")[-1] == function_schema.name.name.base
334
+ and (
335
+ not function_schema.name.name.inplace
336
+ or str(f.func.name) in _foreach_with_inplace_ref
337
+ )
338
+ and all(
339
+ ref_arg.type in (arg.type, getattr(arg.type, "elem", None))
340
+ for arg, ref_arg in zip(
341
+ f.func.arguments.flat_non_out,
342
+ function_schema.arguments.flat_non_out,
343
+ )
344
+ )
345
+ )
346
+
347
+
348
+ # TODO(crcrpar): Avoid hard coding "Default" ideally.
349
+ def gen_foreach_derivativeinfo(
350
+ foreach_function: NativeFunction,
351
+ functional_info_by_signature: Dict[
352
+ FunctionSchema, Dict[str, DifferentiabilityInfo]
353
+ ],
354
+ non_functional_info_by_signature: Dict[
355
+ FunctionSchema, Dict[str, DifferentiabilityInfo]
356
+ ],
357
+ dispatch_key: str = "Default",
358
+ ) -> Tuple[Optional[DifferentiabilityInfo], bool]:
359
+ """Generate DifferentiabilityInfo for out-place foreach function, return the existing one for in-place.
360
+
361
+ The second return value indicates whether the info is generated in this function.
362
+ """
363
+ ref_diff_info: Optional[DifferentiabilityInfo] = None
364
+
365
+ for function_schema, diff_info in functional_info_by_signature.items():
366
+ if not is_reference_for_foreach(foreach_function, function_schema):
367
+ continue
368
+ ref_diff_info = diff_info[dispatch_key]
369
+ if ref_diff_info is not None:
370
+ break
371
+ # note(crcrpar): It seems like `zero`'s info isn't available in functional_info_by_signature
372
+ # while the info of `zero_` is in non_functional_info_by_signature
373
+ if (
374
+ ref_diff_info is None
375
+ and foreach_function.func.kind() == SchemaKind.inplace
376
+ and str(foreach_function.func.name) in _foreach_with_inplace_ref
377
+ ):
378
+ for function_schema, diff_info in non_functional_info_by_signature.items():
379
+ if not is_reference_for_foreach(foreach_function, function_schema):
380
+ continue
381
+ ref_diff_info = diff_info[dispatch_key]
382
+ if ref_diff_info is not None:
383
+ break
384
+ if ref_diff_info is None:
385
+ return None, False
386
+
387
+ # non out-place uses the existing Derivative.
388
+ if foreach_function.func.kind() == SchemaKind.inplace:
389
+ return ref_diff_info, False
390
+
391
+ map_refarg2foreacharg, map_name2arg = {}, {}
392
+ for i, (arg, ref_arg) in enumerate(
393
+ zip(
394
+ foreach_function.func.arguments.flat_non_out,
395
+ function_schema.arguments.flat_non_out,
396
+ )
397
+ ):
398
+ map_refarg2foreacharg[ref_arg.name] = arg.name
399
+ map_name2arg[arg.name] = arg
400
+
401
+ all_saved_inputs, all_saved_outputs, all_var_names = [], [], []
402
+ modified_derivative_formulas = []
403
+ for i, derivative in enumerate(ref_diff_info.derivatives):
404
+ modified_formula = derivative.formula.replace("grad", "grads[i]").replace(
405
+ "result", "result[i]"
406
+ )
407
+ saved_inputs, saved_outputs = [], []
408
+ # note(crcrpar): This context seems necessary to call `cpp.argument_type`
409
+ with local.parametrize(
410
+ use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
411
+ use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
412
+ ):
413
+ for ref_input in derivative.saved_inputs:
414
+ ref_input_jit_name = ref_input.expr.split(".")[0]
415
+ mapped_name = map_refarg2foreacharg[ref_input_jit_name]
416
+ if isinstance(map_name2arg[mapped_name].type, ListType):
417
+ mapped_expr = mapped_name + "[i]"
418
+ else:
419
+ mapped_expr = mapped_name
420
+ new_expr = ref_input.expr.replace(ref_input_jit_name, mapped_expr)
421
+ modified_formula = modified_formula.replace(
422
+ cast(str, ref_input.nctype.name), new_expr
423
+ )
424
+
425
+ nctype = cpp.argument_type(map_name2arg[mapped_name], binds=mapped_name)
426
+ canonical_nctype = NamedCType(
427
+ nctype.name, nctype.type.remove_const_ref()
428
+ )
429
+ saved_inputs.append(
430
+ SavedAttribute(nctype=canonical_nctype, expr=mapped_name)
431
+ )
432
+ for ref_output in derivative.saved_outputs:
433
+ if ref_output.nctype.name == "result":
434
+ saved_outputs.append(
435
+ SavedAttribute(
436
+ nctype=NamedCType(
437
+ name="result", type=BaseCType(tensorListT)
438
+ ),
439
+ expr="result",
440
+ )
441
+ )
442
+ else:
443
+ raise RuntimeError("")
444
+ var_names = [map_refarg2foreacharg[var] for var in derivative.var_names]
445
+ all_var_names.extend(var_names)
446
+ all_saved_inputs.extend(saved_inputs)
447
+ all_saved_outputs.extend(saved_outputs)
448
+ modified_derivative = Derivative(
449
+ formula=modified_formula,
450
+ original_formula=derivative.formula,
451
+ var_names=tuple(var_names),
452
+ saved_inputs=tuple(saved_inputs),
453
+ saved_outputs=tuple(saved_outputs),
454
+ named_gradients=set(),
455
+ )
456
+ modified_derivative_formulas.append(modified_derivative)
457
+
458
+ with local.parametrize(
459
+ use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
460
+ use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
461
+ ):
462
+ args_with_derivatives = [
463
+ Binding(
464
+ name=arg.name,
465
+ nctype=cpp.argument_type(arg, binds=arg.name),
466
+ argument=arg,
467
+ default=None,
468
+ )
469
+ for arg in foreach_function.func.arguments.flat_non_out
470
+ if arg.name in all_var_names
471
+ ]
472
+
473
+ forward_derivatives: List[ForwardDerivative] = []
474
+ fw_derivative: ForwardDerivative
475
+ for fw_derivative in ref_diff_info.forward_derivatives:
476
+ var_names: List[str] = list(fw_derivative.var_names) # type: ignore[no-redef]
477
+ var_types: List[Type] = list(fw_derivative.var_types)
478
+ required_inputs_fw_grad: List[str] = []
479
+ required_inputs_primal: List[str] = []
480
+ if fw_derivative.required_inputs_fw_grad is not None:
481
+ required_inputs_fw_grad = list(fw_derivative.required_inputs_fw_grad)
482
+ if fw_derivative.required_inputs_primal:
483
+ required_inputs_primal = list(fw_derivative.required_inputs_primal)
484
+ modified_formula = fw_derivative.formula
485
+
486
+ # Foreach's result is TensorList
487
+ if "result" in modified_formula:
488
+ modified_formula = fw_derivative.formula.replace("result", "result[i]")
489
+
490
+ for foreach_arg, ref_arg in zip(
491
+ foreach_function.func.arguments.flat_non_out,
492
+ ref_diff_info.func.func.arguments.flat_non_out,
493
+ ):
494
+ # Modify reference forward formula
495
+ if (
496
+ isinstance(foreach_arg.type, ListType)
497
+ and not foreach_arg.type.is_tensor_like()
498
+ ):
499
+ # Assuming ScalarList
500
+ modified_formula = modified_formula.replace(
501
+ ref_arg.name, foreach_arg.name + "[i]"
502
+ )
503
+ elif foreach_arg.type.is_tensor_like():
504
+ # Assuming TensorList / Tensor
505
+ # assert isinstance(foreach_arg.type, ListType), f"{foreach_function.func.name}, {foreach_arg.type}"
506
+ assert isinstance(foreach_arg.type, ListType) or (
507
+ foreach_arg.type == BaseType(BaseTy.Tensor)
508
+ and str(foreach_function.func.name) in _foreach_with_tensor_overload
509
+ ), f"{foreach_function.func.name}, {foreach_arg.type}"
510
+ for suffix in ("_p", "_t"):
511
+ curr_expr = ref_arg.name + suffix
512
+ if curr_expr in modified_formula:
513
+ new_expr = foreach_arg.name + suffix
514
+ modified_formula = modified_formula.replace(curr_expr, new_expr)
515
+ else:
516
+ # Assuming Scalar
517
+ if foreach_arg.name != ref_arg.name:
518
+ modified_formula = modified_formula.replace(
519
+ ref_arg.name, foreach_arg.name
520
+ )
521
+
522
+ # note(crcrpar): there should exist a cooler way...
523
+ for i, name in enumerate(var_names):
524
+ if name == ref_arg.name:
525
+ var_names[i] = foreach_arg.name
526
+ var_types[i] = foreach_arg.type
527
+ for i, name in enumerate(required_inputs_fw_grad):
528
+ if name == ref_arg.name:
529
+ required_inputs_fw_grad[i] = foreach_arg.name
530
+ for i, name in enumerate(required_inputs_primal):
531
+ if name == ref_arg.name:
532
+ required_inputs_primal[i] = foreach_arg.name
533
+ forward_derivatives.append(
534
+ ForwardDerivative(
535
+ formula=modified_formula,
536
+ var_names=tuple(var_names),
537
+ var_types=tuple(var_types),
538
+ required_inputs_fw_grad=tuple(required_inputs_fw_grad),
539
+ required_inputs_primal=tuple(required_inputs_primal),
540
+ required_original_self_value=fw_derivative.required_original_self_value,
541
+ is_reusing_outplace_formula=fw_derivative.is_reusing_outplace_formula,
542
+ )
543
+ )
544
+
545
+ return (
546
+ DifferentiabilityInfo(
547
+ name=foreach_function.func.name.name.base,
548
+ func=foreach_function,
549
+ op=f"Foreach{ref_diff_info.op}{foreach_function.func.name.overload_name}",
550
+ derivatives=modified_derivative_formulas,
551
+ forward_derivatives=forward_derivatives,
552
+ all_saved_inputs=tuple(set(all_saved_inputs)),
553
+ all_saved_outputs=tuple(set(all_saved_outputs)),
554
+ available_named_gradients=(),
555
+ used_named_gradients=set(),
556
+ args_with_derivatives=args_with_derivatives,
557
+ non_differentiable_arg_names=[],
558
+ output_differentiability=None,
559
+ output_differentiability_conditions=None,
560
+ ),
561
+ True,
562
+ )
563
+
564
+
565
+ def match_differentiability_info(
566
+ native_functions: List[NativeFunction],
567
+ differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
568
+ ) -> List[NativeFunctionWithDifferentiabilityInfo]:
569
+ """Sets the "derivative" key on declarations to matching autograd function
570
+ In-place functions will use the out-of-place derivative definition if there
571
+ is no in-place specific derivative.
572
+ """
573
+
574
+ functional_info_by_signature = {
575
+ schema.signature(strip_default=True): info_dict
576
+ for schema, info_dict in differentiability_infos.items()
577
+ if schema.kind() == SchemaKind.functional
578
+ }
579
+ non_functional_info_by_signature = {
580
+ schema.signature(strip_default=True): info_dict
581
+ for schema, info_dict in differentiability_infos.items()
582
+ if schema.kind() != SchemaKind.functional
583
+ }
584
+
585
+ def find_info(
586
+ f: NativeFunction,
587
+ ) -> Tuple[Optional[Dict[str, DifferentiabilityInfo]], bool]:
588
+ # Don't bother matching info to generated out= variants
589
+ if "generated" in f.tags and f.func.kind() == SchemaKind.out:
590
+ return None, False
591
+
592
+ # (1) Check for an exact match
593
+ if f.func in differentiability_infos:
594
+ return differentiability_infos[f.func], True
595
+
596
+ # (2) If no exact match, check if the out-of-place variant
597
+ # of this operator has a match.
598
+ # i.e mul() for mul_() or mul_out()
599
+ # note(crcrpar): Check foreach or not because in-place foreach functions use backward defined for the existing
600
+ # native functions instead of the out-place counterparts.
601
+ f_sig = f.func.signature(strip_default=True)
602
+ if f_sig in functional_info_by_signature and not is_foreach_func(f):
603
+ return functional_info_by_signature[f_sig], False
604
+
605
+ # (3) Some operators have a derivative explicitly defined for the mutable
606
+ # variant, but get a code-generated out-of-place variant which does *not*
607
+ # come with a derivative formula.
608
+ # For the generated out-of-place variant, use the mutable variant's formula
609
+ # if it exists.
610
+ if "generated" in f.tags and f_sig in non_functional_info_by_signature:
611
+ info_dict = non_functional_info_by_signature[f_sig]
612
+ # See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
613
+ assert not any(
614
+ any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs)
615
+ for info in info_dict.values()
616
+ ), f"""\
617
+ Attempted to convert a derivative formula for a mutable operator
618
+ to be used by automatically by its functional variant ("{str(f.func)}").
619
+ this is not currently supported (we'd need to fix up the formula in the codegen)."""
620
+ return info_dict, False
621
+
622
+ # (4) Generate derivative information of foreach functions if none is defined in `derivatives.yaml`
623
+ if is_foreach_func(f):
624
+ assert f.func not in differentiability_infos
625
+ diff_info, is_generated = gen_foreach_derivativeinfo(
626
+ f,
627
+ functional_info_by_signature,
628
+ non_functional_info_by_signature,
629
+ )
630
+ if diff_info is None:
631
+ return None, False
632
+ # TODO(crcrpar): Avoid hard coding "Default" ideally.
633
+ diff_info_dict = {"Default": diff_info}
634
+ if is_generated:
635
+ differentiability_infos[f.func] = diff_info_dict
636
+ functional_info_by_signature[f.func] = diff_info_dict
637
+ return diff_info_dict, is_generated
638
+
639
+ return None, False
640
+
641
+ result: List[NativeFunctionWithDifferentiabilityInfo] = []
642
+ for f in native_functions:
643
+ info_dict, is_exact_match = find_info(f)
644
+
645
+ # Currently, the '.strides()' to 'strides_or_error' replacement does not support
646
+ # 'self' derivatives of an inplace function, so we must check for this case.
647
+ if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
648
+ for info in info_dict.values():
649
+ for derivative in info.derivatives:
650
+ if "self" in derivative.var_names:
651
+ for saved_input in derivative.saved_inputs:
652
+ assert "strides_or_error" not in saved_input.expr, (
653
+ "Calling '.strides()' in the 'self' derivative formula of an "
654
+ f"in-place function is not supported: {f.func}"
655
+ )
656
+
657
+ if not info_dict:
658
+ result.append(
659
+ NativeFunctionWithDifferentiabilityInfo(
660
+ func=f, info=None, fw_derivatives=None
661
+ )
662
+ )
663
+ continue
664
+
665
+ fw_derivative_dict: Dict[str, Sequence[ForwardDerivative]] = {}
666
+ for key, info in info_dict.items():
667
+ if not info.forward_derivatives:
668
+ fw_derivative_dict[key] = []
669
+ continue
670
+
671
+ forward_derivatives = info.forward_derivatives
672
+
673
+ # For functions that have a single def for out-of-place and inplace (like abs())
674
+ if f.func.kind() == SchemaKind.inplace:
675
+ # For inplace functions there is a little bit of work to do:
676
+ # 1) Validate the formula and make sure the input that is modified in not used:
677
+ # - If there is a formula for the inplace variant of the function (is_exact_match == True) then
678
+ # we make sure that the original value of the input that is being modified inplace (self_p) is
679
+ # not used in the formula. Note that the formula can use "original_self_p" here and that would
680
+ # trigger a clone of the original input.
681
+ # - If we are re-using the out of place formula (is_exact_match == False) then we replace every
682
+ # occurrence of self_p and self_t by original_self_p and original_self_t. These will be
683
+ # populated by cloned version of the original input (either the clone done by the backward AD
684
+ # logic if self is also used in a backward formula or a special clone that we add).
685
+ # 2) At this point, there cannot be a self_p in the formula.
686
+ # 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
687
+ # simply called self (as it is modified inplace).
688
+ # 4) Update the required primals data in case it used to contain "result" but should now contain
689
+ # "self"
690
+ # 5) If it is not an exact match, the user formula is not modifying the existing forward grad
691
+ # inplace as it should. So add some code that makes sure that we do so if the forward grad
692
+ # already exists.
693
+
694
+ assert (
695
+ len(info.forward_derivatives) == 1
696
+ ) # Only single output inplace should exist
697
+ fw_info = info.forward_derivatives[0]
698
+ formula = fw_info.formula
699
+
700
+ def replace_self_with_original_self(formula: str, postfix: str) -> str:
701
+ def repl(m: Match[str]) -> str:
702
+ return f"{m.group(1)}original_self{postfix}{m.group(2)}"
703
+
704
+ return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
705
+
706
+ if re.search(IDENT_REGEX.format("self_p"), formula):
707
+ if is_exact_match:
708
+ # For manually defined formulas, don't allow the original value to be used
709
+ raise RuntimeError(
710
+ f'The formula for "{f.func.name}" is using the original value of self '
711
+ "that is being modified inplace. This would lead to wrong forward gradients. "
712
+ 'Please use "result" in the formula only.'
713
+ )
714
+ else:
715
+ # When the original formula is out of place, we save a clone of the primal
716
+ # value to be able to access this value if needed
717
+ # replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
718
+ formula = replace_self_with_original_self(formula, "_p")
719
+ formula = replace_self_with_original_self(formula, "_t")
720
+
721
+ # replace "result" from the formula by "self_p"
722
+ def repl(m: Match[str]) -> str:
723
+ return f"{m.group(1)}self_p{m.group(2)}"
724
+
725
+ formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
726
+
727
+ required_primals = fw_info.required_inputs_primal
728
+ if re.search(IDENT_REGEX.format("self_p"), formula):
729
+ required_primals = (
730
+ required_primals + ("self",) if required_primals else ("self",)
731
+ )
732
+
733
+ if not is_exact_match:
734
+ # NOTE [In-place forward AD formula Optimization]
735
+ #
736
+ # This optimization transforms the formula to directly do inplace, i.e.
737
+ # instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
738
+ #
739
+ # 1) the formula satisfies the pattern: "self_t.op(*args)"
740
+ # 2) "op" in (1) needs to be the same as the op the derivative is for
741
+ #
742
+ # (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
743
+ # If there is a need, we can relax (2) to allow any op that has an in-place variant
744
+ is_single_method_on_self_t = False
745
+ directly_do_inplace = False
746
+ op_name: Optional[str] = None
747
+ between_parens: Optional[str] = None
748
+ match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
749
+ if match:
750
+ op_name, between_parens = match.group(1), match.group(2)
751
+
752
+ # We want to...
753
+ # Match: self_t.op1(other_p.op2(arg))
754
+ # Avoid: self_t.op1(args) + self_t.op2(args)
755
+ # Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
756
+ def check_parens_nest_level_gt_zero(s: str) -> bool:
757
+ level = 1
758
+ for ch in s:
759
+ if ch == ")":
760
+ level -= 1
761
+ if level == 0:
762
+ return False
763
+ if ch == "(":
764
+ level += 1
765
+ return True
766
+
767
+ is_single_method_on_self_t = check_parens_nest_level_gt_zero(
768
+ between_parens
769
+ )
770
+ directly_do_inplace = (
771
+ is_single_method_on_self_t and op_name == info.name
772
+ )
773
+
774
+ if directly_do_inplace:
775
+ assert op_name is not None
776
+ assert between_parens is not None
777
+ formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
778
+ else:
779
+ # Make sure that the forward grad is modified inplace when the original formula
780
+ # is out of place
781
+ formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
782
+
783
+ required_original_self_value = bool(
784
+ re.search(IDENT_REGEX.format("original_self_p"), formula)
785
+ ) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
786
+
787
+ forward_derivatives = [
788
+ ForwardDerivative(
789
+ formula=formula,
790
+ var_names=("self",),
791
+ var_types=fw_info.var_types,
792
+ required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
793
+ required_inputs_primal=required_primals,
794
+ required_original_self_value=required_original_self_value,
795
+ is_reusing_outplace_formula=not is_exact_match,
796
+ ),
797
+ ]
798
+
799
+ fw_derivative_dict[key] = forward_derivatives
800
+
801
+ result.append(
802
+ NativeFunctionWithDifferentiabilityInfo(
803
+ func=f, info=info_dict, fw_derivatives=fw_derivative_dict
804
+ )
805
+ )
806
+
807
+ return result
808
+
809
+
810
+ def is_differentiable(
811
+ name: str, type: Type, info: Optional[DifferentiabilityInfo]
812
+ ) -> bool:
813
+ return type.is_tensor_like() and (
814
+ info is None or name not in info.non_differentiable_arg_names
815
+ )
816
+
817
+
818
+ def gen_differentiable_outputs(
819
+ fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
820
+ ) -> List[DifferentiableOutput]:
821
+ f = fn.func
822
+ info = fn.info[key] if fn.info else None
823
+ outputs: List[DifferentiableOutput] = [
824
+ DifferentiableOutput(
825
+ name=name,
826
+ type=ret.type,
827
+ cpp_type=cpp.return_type(ret, symint=True).cpp_type(),
828
+ )
829
+ for name, ret in zip(cpp.return_names(f), f.func.returns)
830
+ ]
831
+ output_differentiability = info.output_differentiability if info else None
832
+ if output_differentiability is not None:
833
+ if len(output_differentiability) != len(outputs):
834
+ raise RuntimeError(
835
+ f"The length of output_differentiability ({len(output_differentiability)}), "
836
+ f"does not match the number of outputs ({len(outputs)})."
837
+ )
838
+ differentiable_outputs: List[DifferentiableOutput] = []
839
+ if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
840
+ raise RuntimeError(
841
+ "output_differentiability=False for inplace operation (version_counter won't get updated)"
842
+ )
843
+ for differentiable, output in zip(output_differentiability, outputs):
844
+ if differentiable:
845
+ differentiable_outputs.append(output)
846
+ return differentiable_outputs
847
+ candidate_differentiable_outputs = list(
848
+ filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
849
+ )
850
+ if uses_single_grad(info):
851
+ return candidate_differentiable_outputs[:1]
852
+ else:
853
+ return candidate_differentiable_outputs
env-llmeval/lib/python3.10/site-packages/torchgen/api/structured.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union
2
+
3
+ from torchgen.api import cpp
4
+
5
+ from torchgen.api.types import (
6
+ ArgName,
7
+ ArrayRefCType,
8
+ BaseCType,
9
+ Binding,
10
+ ConstRefCType,
11
+ dimnameListT,
12
+ intArrayRefT,
13
+ iOptTensorListRefT,
14
+ iTensorListRefT,
15
+ NamedCType,
16
+ OptionalCType,
17
+ optionalIntArrayRefT,
18
+ optionalScalarRefT,
19
+ optionalTensorRefT,
20
+ scalarT,
21
+ tensorT,
22
+ )
23
+ from torchgen.model import (
24
+ Argument,
25
+ BaseTy,
26
+ BaseType,
27
+ ListType,
28
+ NativeFunctionsGroup,
29
+ OptionalType,
30
+ SelfArgument,
31
+ TensorOptionsArguments,
32
+ Type,
33
+ )
34
+ from torchgen.utils import assert_never
35
+
36
+ # This file describes the translation of JIT schema to the structured functions API.
37
+ # This is similar to native API, but a number of historical problems with native
38
+ # API have been fixed.
39
+
40
+
41
+ # Translation of types occurring in JIT arguments to a C++ argument type.
42
+ # NB: For now, mutable doesn't do anything; but it could if we make
43
+ # some more nominal types
44
+ def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
45
+ # If it's a value type, do the value type translation
46
+ # NB: structured kernels ALWAYS have symint off, since they involve actual
47
+ # kernels that require real ints. The one exception is the
48
+ # CompositeExplicitAutograd and the meta function (which could
49
+ # hypothetically be SymInt), but for simplicity we plan for these to just
50
+ # be handled in Python
51
+ r = cpp.valuetype_type(t, symint=False, binds=binds)
52
+ if r is not None:
53
+ return r
54
+
55
+ if isinstance(t, BaseType):
56
+ if t.name == BaseTy.Tensor:
57
+ return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
58
+ elif t.name == BaseTy.Scalar:
59
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
60
+ else:
61
+ raise AssertionError(f"base type should have been value type {t}")
62
+ elif isinstance(t, OptionalType):
63
+ if t.elem == BaseType(BaseTy.Tensor):
64
+ return NamedCType(binds, BaseCType(optionalTensorRefT))
65
+ elif t.elem == BaseType(BaseTy.Scalar):
66
+ return NamedCType(binds, BaseCType(optionalScalarRefT))
67
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
68
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
69
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
70
+ return NamedCType(binds, OptionalCType(elem.type))
71
+ elif isinstance(t, ListType):
72
+ if t.elem == BaseType(BaseTy.Tensor):
73
+ return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
74
+ elif t.elem == OptionalType(BaseType(BaseTy.Tensor)):
75
+ return NamedCType(binds, BaseCType(iOptTensorListRefT))
76
+ # TODO: delete these special cases; see torchgen.api.cpp--these
77
+ # must be changed in tandem, but there are problems; see
78
+ # https://github.com/pytorch/pytorch/pull/51485
79
+ elif str(t.elem) == "int":
80
+ return NamedCType(binds, BaseCType(intArrayRefT))
81
+ elif str(t.elem) == "Dimname":
82
+ return NamedCType(binds, BaseCType(dimnameListT))
83
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
84
+ return NamedCType(binds, ArrayRefCType(elem.type))
85
+ else:
86
+ raise AssertionError(f"unrecognized type {repr(t)}")
87
+
88
+
89
+ def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
90
+ return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
91
+
92
+
93
+ # returns_type intentionally omitted, because structured kernels never "return";
94
+ # instead, they always indirectly report their outputs (in the case of a meta
95
+ # function, by calling set_output; in the case of an impl function, by writing
96
+ # directly into the provided out argument).
97
+
98
+
99
+ # Structured kernels are never defaulted
100
+ def argument(a: Union[Argument, SelfArgument, TensorOptionsArguments]) -> List[Binding]:
101
+ if isinstance(a, Argument):
102
+ return [
103
+ Binding(
104
+ nctype=argument_type(a, binds=a.name),
105
+ name=a.name,
106
+ default=None,
107
+ argument=a,
108
+ )
109
+ ]
110
+ elif isinstance(a, SelfArgument):
111
+ return argument(a.argument)
112
+ elif isinstance(a, TensorOptionsArguments):
113
+ raise AssertionError("structured kernels don't support TensorOptions yet")
114
+ else:
115
+ assert_never(a)
116
+
117
+
118
+ def impl_arguments(g: NativeFunctionsGroup) -> List[Binding]:
119
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
120
+
121
+ if g.out.precomputed:
122
+ # A list of parameters for the impl function with
123
+ # certain parameters replaced with precomputed counterparts
124
+ # as specified in native_functions.yaml.
125
+ non_out_args_replaced: List[
126
+ Union[Argument, TensorOptionsArguments, SelfArgument]
127
+ ] = []
128
+ for a in g.out.func.arguments.non_out:
129
+ if isinstance(a, Argument) and a.name in g.out.precomputed.replace:
130
+ # If a is in precompute.replace, append the parameters
131
+ # that should replace it onto non_out_args_replaced.
132
+ for replacement in g.out.precomputed.replace[a.name]:
133
+ non_out_args_replaced.append(replacement)
134
+ else:
135
+ # If not, push a as it is.
136
+ non_out_args_replaced.append(a)
137
+
138
+ args.extend(non_out_args_replaced)
139
+ # g.out.precomputed.add is the list of parameters that are added
140
+ # without replacement after the non out args and just before the out args
141
+ args.extend(g.out.precomputed.add)
142
+ else:
143
+ args.extend(g.out.func.arguments.non_out)
144
+
145
+ args.extend(g.out.func.arguments.out)
146
+ return [r for arg in args for r in argument(arg)]
147
+
148
+
149
+ def meta_arguments(g: NativeFunctionsGroup) -> List[Binding]:
150
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
151
+ args.extend(g.functional.func.arguments.non_out)
152
+ return [r for arg in args for r in argument(arg)]
153
+
154
+
155
+ def out_arguments(g: NativeFunctionsGroup) -> List[Binding]:
156
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
157
+ args.extend(g.out.func.arguments.out)
158
+ return [r for arg in args for r in argument(arg)]
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .types import *
2
+ from .types_base import *
3
+ from .signatures import * # isort:skip
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (249 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc ADDED
Binary file (6.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc ADDED
Binary file (9.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/signatures.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from typing import Iterator, List, Optional, Sequence, Set, Tuple, Union
4
+
5
+ from torchgen.model import (
6
+ BackendIndex,
7
+ FunctionSchema,
8
+ NativeFunction,
9
+ NativeFunctionsGroup,
10
+ NativeFunctionsViewGroup,
11
+ )
12
+
13
+ from .types_base import Binding, CType, Expr
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class CppSignature:
18
+ """
19
+ A CppSignature represents a single overload in the C++ API. For
20
+ any given function schema, there may be multiple CppSignatures
21
+ corresponding to it, based on how we desugar to C++. See also
22
+ CppSignatureGroup.
23
+ """
24
+
25
+ # The schema this signature is derived from
26
+ func: FunctionSchema
27
+
28
+ # Is this a C++ signature for a method, i.e. Tensor::my_op(...)?
29
+ method: bool
30
+
31
+ # Is this a faithful C++ signature (i.e. following the JIT schema) or a convenience API
32
+ # (i.e. with a potential TensorOptions argument and out arguments in the front)
33
+ faithful: bool
34
+
35
+ # Is this a symint C++ signature. For BC reasons, functions that take
36
+ # SymInts still present as int64_t in C++, and the SymInt variant is
37
+ # offered at a different overload name
38
+ #
39
+ # NB: If a function RETURNS a SymInt, this is ALWAYS false
40
+ symint: bool
41
+
42
+ # The set of C++ arguments which should not have defaults applied to them
43
+ cpp_no_default_args: Set[str]
44
+
45
+ # Is this a fallback C++ binding? Fallback bindings are enabled by
46
+ # manual_cpp_binding: True and are alternate, non-public API that
47
+ # lets manual C++ binding implementors access the binding that would
48
+ # have been automatically generated
49
+ fallback_binding: bool = False
50
+
51
+ # Return the unpacked argument structure of this signature,
52
+ # discarding information about which arguments are semantically
53
+ # related to each other.
54
+ def arguments(self) -> Sequence[Binding]:
55
+ return cpp.arguments(
56
+ self.func.arguments,
57
+ faithful=self.faithful,
58
+ symint=self.symint,
59
+ method=self.method,
60
+ cpp_no_default_args=self.cpp_no_default_args,
61
+ )
62
+
63
+ def name(self, *, suppress_symint_suffix: bool = False) -> str:
64
+ n = cpp.name(
65
+ self.func,
66
+ faithful_name_for_out_overloads=self.faithful,
67
+ symint_overload=False if suppress_symint_suffix else self.symint,
68
+ )
69
+ if self.fallback_binding:
70
+ n = f"__dispatch_{n}"
71
+ return n
72
+
73
+ # Render the C++ declaration for this signature
74
+ def decl(
75
+ self,
76
+ *,
77
+ name: Optional[str] = None,
78
+ prefix: str = "",
79
+ is_redispatching_fn: bool = False,
80
+ suppress_symint_suffix: bool = False,
81
+ ) -> str:
82
+ returns_type = cpp.returns_type(
83
+ self.func.returns, symint=self.symint
84
+ ).cpp_type()
85
+ cpp_args = [a.decl() for a in self.arguments()]
86
+ if is_redispatching_fn:
87
+ cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
88
+ cpp_args_str = ", ".join(cpp_args)
89
+ if name is None:
90
+ name = prefix + self.name(suppress_symint_suffix=suppress_symint_suffix)
91
+ return f"{returns_type} {name}({cpp_args_str})"
92
+
93
+ # Render the C++ definition for this signature, not including
94
+ # the body (with curly braces)
95
+ def defn(
96
+ self,
97
+ *,
98
+ name: Optional[str] = None,
99
+ prefix: str = "",
100
+ is_redispatching_fn: bool = False,
101
+ ) -> str:
102
+ returns_type = cpp.returns_type(
103
+ self.func.returns, symint=self.symint
104
+ ).cpp_type()
105
+ cpp_args = [a.defn() for a in self.arguments()]
106
+ if is_redispatching_fn:
107
+ cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
108
+ cpp_args_str = ", ".join(cpp_args)
109
+ if name is None:
110
+ name = prefix + self.name()
111
+ return f"{returns_type} {name}({cpp_args_str})"
112
+
113
+ def ptr_type(self) -> str:
114
+ args_types_str = ", ".join(a.type for a in self.arguments())
115
+ return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str})"
116
+
117
+ # Return the C++ function type, e.g., something like int(bool)
118
+ def type(self) -> str:
119
+ args_types_str = ", ".join(a.type for a in self.arguments())
120
+ return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
121
+
122
+
123
+ # Represents group of all CppSignatures associated with a
124
+ # FunctionSchema. Right now, that's the regular, user-visible
125
+ # signature, as well as a "faithful" signature which doesn't
126
+ # have grouping.
127
+ @dataclass(frozen=True)
128
+ class CppSignatureGroup:
129
+ func: FunctionSchema
130
+ signature: CppSignature
131
+ faithful_signature: Optional[CppSignature]
132
+ symint_signature: Optional[CppSignature]
133
+ symint_faithful_signature: Optional[CppSignature]
134
+
135
+ def most_faithful_signature(self) -> CppSignature:
136
+ if self.faithful_signature:
137
+ return self.faithful_signature
138
+ else:
139
+ return self.signature
140
+
141
+ def signatures(self, *, symint: bool = True) -> Iterator[CppSignature]:
142
+ yield self.signature
143
+ if self.faithful_signature:
144
+ yield self.faithful_signature
145
+ if symint:
146
+ if self.symint_signature:
147
+ yield self.symint_signature
148
+ if self.symint_faithful_signature:
149
+ yield self.symint_faithful_signature
150
+
151
+ @staticmethod
152
+ def from_native_function(
153
+ f: NativeFunction, *, method: bool, fallback_binding: bool = False
154
+ ) -> "CppSignatureGroup":
155
+ func = f.func
156
+
157
+ def make_sig(*, faithful: bool, symint: bool) -> CppSignature:
158
+ return CppSignature(
159
+ func=func,
160
+ faithful=faithful,
161
+ symint=symint,
162
+ method=method,
163
+ fallback_binding=fallback_binding,
164
+ cpp_no_default_args=f.cpp_no_default_args,
165
+ )
166
+
167
+ def make_sigs(*, symint: bool) -> Tuple[CppSignature, Optional[CppSignature]]:
168
+ faithful_signature: Optional[CppSignature] = None
169
+ if func.arguments.tensor_options is not None or len(func.arguments.out) > 0:
170
+ faithful_signature = make_sig(faithful=True, symint=symint)
171
+ signature = make_sig(faithful=False, symint=symint)
172
+ return signature, faithful_signature
173
+
174
+ signature, faithful_signature = make_sigs(symint=False)
175
+ symint_signature: Optional[CppSignature] = None
176
+ symint_faithful_signature: Optional[CppSignature] = None
177
+ if func.has_symint():
178
+ symint_signature, symint_faithful_signature = make_sigs(symint=True)
179
+
180
+ return CppSignatureGroup(
181
+ func=func,
182
+ signature=signature,
183
+ faithful_signature=faithful_signature,
184
+ symint_signature=symint_signature,
185
+ symint_faithful_signature=symint_faithful_signature,
186
+ )
187
+
188
+
189
+ @dataclass(frozen=True)
190
+ class DispatcherSignature:
191
+ # The schema this signature is derived from
192
+ func: FunctionSchema
193
+
194
+ # Allows you to prepend an arbitrary prefix to the signature name.
195
+ # This is useful for parts of the codegen that generate wrappers around kernels,
196
+ # and need to avoid naming collisions.
197
+ prefix: str = ""
198
+
199
+ symint: bool = True
200
+
201
+ def arguments(self) -> List[Binding]:
202
+ return dispatcher.arguments(self.func, symint=self.symint)
203
+
204
+ def name(self) -> str:
205
+ return self.prefix + dispatcher.name(self.func)
206
+
207
+ def decl(self, name: Optional[str] = None) -> str:
208
+ args_str = ", ".join(a.decl() for a in self.arguments())
209
+ if name is None:
210
+ name = self.name()
211
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
212
+
213
+ def defn(
214
+ self, name: Optional[str] = None, *, is_redispatching_fn: bool = False
215
+ ) -> str:
216
+ args = [a.defn() for a in self.arguments()]
217
+ if is_redispatching_fn:
218
+ args = ["c10::DispatchKeySet dispatchKeySet"] + args
219
+ args_str = ", ".join(args)
220
+ if name is None:
221
+ name = self.name()
222
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
223
+
224
+ def exprs(self) -> List[Expr]:
225
+ return [Expr(a.name, a.nctype) for a in self.arguments()]
226
+
227
+ def returns_type(self) -> CType:
228
+ return dispatcher.returns_type(self.func.returns, symint=self.symint)
229
+
230
+ def ptr_type(self) -> str:
231
+ dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
232
+ return f"{self.returns_type().cpp_type()} (*)({dispatcher_args_types_str})"
233
+
234
+ # Return the C++ function type, e.g., something like int(bool)
235
+ def type(self) -> str:
236
+ dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
237
+ return f"{self.returns_type().cpp_type()} ({dispatcher_args_types_str})"
238
+
239
+ @staticmethod
240
+ def from_schema(
241
+ func: FunctionSchema, *, prefix: str = "", symint: bool = True
242
+ ) -> "DispatcherSignature":
243
+ return DispatcherSignature(func, prefix, symint)
244
+
245
+
246
+ @dataclass(frozen=True)
247
+ class NativeSignature:
248
+ # The schema this signature is derived from
249
+ func: FunctionSchema
250
+
251
+ symint: bool
252
+
253
+ prefix: str = ""
254
+
255
+ def name(self) -> str:
256
+ return self.prefix + native.name(self.func)
257
+
258
+ def decl(self, name: Optional[str] = None) -> str:
259
+ args_str = ", ".join(a.decl() for a in self.arguments())
260
+ if name is None:
261
+ name = self.name()
262
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
263
+
264
+ def defn(self, name: Optional[str] = None) -> str:
265
+ args_str = ", ".join(a.defn() for a in self.arguments())
266
+ if name is None:
267
+ name = self.name()
268
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
269
+
270
+ def ptr_type(self) -> str:
271
+ # don't include defaults in type signature!
272
+ args_str = ", ".join(a.defn() for a in self.arguments())
273
+ return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_str})"
274
+
275
+ def arguments(self) -> List[Binding]:
276
+ return native.arguments(self.func, symint=self.symint)
277
+
278
+ def returns_type(self) -> CType:
279
+ return native.returns_type(self.func.returns, symint=self.symint)
280
+
281
+ def dispatcher_exprs(self) -> List[Expr]:
282
+ return translate.translate(
283
+ self.arguments(), dispatcher.arguments(self.func), method=False
284
+ )
285
+
286
+
287
+ @dataclass(frozen=True)
288
+ class ViewInverseSignature:
289
+ g: NativeFunctionsViewGroup
290
+
291
+ def name(self) -> str:
292
+ assert self.g.view_copy is not None
293
+ return functionalization.name(self.g, is_reverse=True, include_namespace=False)
294
+
295
+ def decl(self) -> str:
296
+ assert self.g.view_copy is not None
297
+ return_type = functionalization.returns_type(self.g.view_copy.func)
298
+ decls = [
299
+ a.decl()
300
+ for a in functionalization.inner_arguments(
301
+ self.g.view_copy.func, is_reverse=True
302
+ )
303
+ ]
304
+ return f"static {return_type.cpp_type()} {self.name()}({', '.join(decls)});"
305
+
306
+
307
+ @dataclass(frozen=True)
308
+ class FunctionalizationLambda:
309
+ g: NativeFunctionsViewGroup
310
+
311
+ # are we generating the forward lambda or the reverse lambda?
312
+ is_reverse: bool
313
+
314
+ def captures(self) -> List[Expr]:
315
+ # The lambda lives inside of a kernel following the dispatcher API, so its outer context is the dispatcher arguments
316
+ # We also need to read the "reapply views" TLS at the time that the functionalization kernel was executed,
317
+ # and plumb it into the lambda.
318
+ outer_ctx = dispatcher.arguments(self.g.view.func) + [
319
+ functionalization.reapply_views_binding
320
+ ]
321
+ capture_bindings = functionalization.capture_arguments(
322
+ self.g.view.func, is_reverse=self.is_reverse
323
+ )
324
+ # allow_expensive_conversions is set because we want to convert
325
+ # some reference types (IntArrayRef) to value types (vector<int64_t>).
326
+ capture_exprs = translate.translate(
327
+ outer_ctx, capture_bindings, method=False, allow_expensive_conversions=True
328
+ )
329
+ return capture_exprs
330
+
331
+ def decl(self) -> str:
332
+ return_type = functionalization.returns_type(self.g.view.func)
333
+ capture_str = ", ".join(
334
+ f"{val.type.name} = {val.expr}" for val in self.captures()
335
+ )
336
+ decls = [
337
+ a.decl()
338
+ for a in functionalization.outer_arguments(is_reverse=self.is_reverse)
339
+ ]
340
+ return f"[{capture_str}]({', '.join(decls)}) -> {return_type.cpp_type()}"
341
+
342
+ def inner_call(self, *, reapply_views: Optional[bool] = None) -> str:
343
+ inner_call_name = functionalization.name(
344
+ self.g,
345
+ is_reverse=self.is_reverse,
346
+ include_namespace=True,
347
+ reapply_views=reapply_views,
348
+ )
349
+
350
+ arg_ctx = functionalization.outer_arguments(is_reverse=self.is_reverse)
351
+ capture_ctx = functionalization.capture_arguments(
352
+ self.g.view.func, is_reverse=self.is_reverse
353
+ )
354
+ full_ctx = arg_ctx + capture_ctx
355
+
356
+ assert self.g.view_copy is not None
357
+ call_bindings = functionalization.inner_arguments(
358
+ self.g.view_copy.func, is_reverse=self.is_reverse
359
+ )
360
+ maybe_index = functionalization.inner_call_index(self.g.view_copy.func)
361
+ call_exprs = [
362
+ e.expr for e in translate.translate(full_ctx, call_bindings, method=False)
363
+ ]
364
+ if not self.is_reverse and maybe_index is not None:
365
+ return f'{inner_call_name}({", ".join(call_exprs)})[{maybe_index.name}];'
366
+ else:
367
+ return f'{inner_call_name}({", ".join(call_exprs)});'
368
+
369
+ @staticmethod
370
+ def from_func(
371
+ g: NativeFunctionsViewGroup, *, is_reverse: bool
372
+ ) -> "FunctionalizationLambda":
373
+ return FunctionalizationLambda(g, is_reverse)
374
+
375
+
376
+ @dataclass(frozen=True)
377
+ class StructuredImplSignature:
378
+ g: NativeFunctionsGroup
379
+ name: str
380
+
381
+ def defn(self, name: Optional[str] = None) -> str:
382
+ args_str = ", ".join(a.defn() for a in self.arguments())
383
+ return f"TORCH_IMPL_FUNC({self.name})({args_str})"
384
+
385
+ def arguments(self) -> List[Binding]:
386
+ return structured.impl_arguments(self.g)
387
+
388
+
389
+ # Helper functions
390
+
391
+
392
+ def kernel_signature(
393
+ f: NativeFunction, backend_index: BackendIndex, *, prefix: str = ""
394
+ ) -> Union["NativeSignature", "DispatcherSignature"]:
395
+ # Note [External Backends Follow Dispatcher API]
396
+ # Kernel signatures for in-tree backends follow the "native" API,
397
+ # while kernels for out-of-tree backends follow the dispatcher API.
398
+ # See the comments in `native.py` for details, but historically there have been
399
+ # some small differences in schema convention between them and the Dispatcher API.
400
+ # Any differences that require translating between the two will results in a runtime cost,
401
+ # so we'd like to keep the differences as small as possible.
402
+ # With external backends, we'd like to enforce that they write their kernels with schemas
403
+ # that match the Dispatcher API directly, if they can.
404
+ meta = backend_index.get_kernel(f)
405
+ symint = meta is not None and meta.supports_symint()
406
+ if symint:
407
+ assert (
408
+ f.func.has_symint()
409
+ ), f"attempted to define symint kernel for {backend_index.dispatch_key} without SymInt in schema"
410
+ if backend_index.external:
411
+ return DispatcherSignature.from_schema(f.func, prefix=prefix, symint=symint)
412
+ else:
413
+ return NativeSignature(f.func, prefix=prefix, symint=symint)
414
+
415
+
416
+ # Functions only, no types
417
+ from torchgen.api import (
418
+ cpp,
419
+ dispatcher,
420
+ functionalization,
421
+ native,
422
+ structured,
423
+ translate,
424
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/types.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Where should I add a new type? `types_base.py` vs `types.py`
3
+
4
+ This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
5
+
6
+ `types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
7
+
8
+ The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
9
+ contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
10
+ if we want to generate code for another C++ library.
11
+
12
+ Add new types to `types.py` if these types are ATen/c10 related.
13
+ Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
14
+ """
15
+ from dataclasses import dataclass
16
+ from typing import Dict
17
+
18
+ from torchgen.model import BaseTy, ScalarType
19
+
20
+ from .types_base import (
21
+ BaseCppType,
22
+ BaseCType,
23
+ boolT,
24
+ byteT,
25
+ charT,
26
+ CType,
27
+ doubleT,
28
+ floatT,
29
+ int32T,
30
+ longT,
31
+ shortT,
32
+ )
33
+
34
+
35
+ TENSOR_LIST_LIKE_CTYPES = [
36
+ "at::TensorList",
37
+ "const c10::List<c10::optional<at::Tensor>> &",
38
+ "const at::ITensorListRef &",
39
+ ]
40
+
41
+
42
+ halfT = BaseCppType("at", "Half")
43
+ complexHalfT = BaseCppType(
44
+ "c10", "complex<c10::Half>"
45
+ ) # stuffing template param here is an abuse
46
+ complexFloatT = BaseCppType("c10", "complex<float>")
47
+ complexDoubleT = BaseCppType("c10", "complex<double>")
48
+ bfloat16T = BaseCppType("at", "BFloat16")
49
+ float8_e5m2T = BaseCppType("at", "Float8_e5m2")
50
+ float8_e5m2fnuzT = BaseCppType("at", "Float8_e5m2fnuz")
51
+ float8_e4m3fnT = BaseCppType("at", "Float8_e4m3fn")
52
+ float8_e4m3fnuzT = BaseCppType("at", "Float8_e4m3fnuz")
53
+ stringT = BaseCppType("c10", "string_view")
54
+ generatorT = BaseCppType("at", "Generator")
55
+ scalarTypeT = BaseCppType("at", "ScalarType")
56
+ tensorT = BaseCppType("at", "Tensor")
57
+ optionalTensorRefT = BaseCppType("at", "OptionalTensorRef")
58
+ tensorListT = BaseCppType("at", "TensorList")
59
+ iTensorListRefT = BaseCppType("at", "ITensorListRef")
60
+ iOptTensorListRefT = BaseCppType("at", "IOptTensorListRef")
61
+ dimnameT = BaseCppType("at", "Dimname")
62
+ dimnameListT = BaseCppType("at", "DimnameList")
63
+ dimVectorT = BaseCppType("at", "DimVector")
64
+ layoutT = BaseCppType("at", "Layout")
65
+ deviceT = BaseCppType("at", "Device")
66
+ deviceIndexT = BaseCppType("at", "DeviceIndex")
67
+ scalarT = BaseCppType("at", "Scalar")
68
+ optionalScalarRefT = BaseCppType("at", "OptionalScalarRef")
69
+ memoryFormatT = BaseCppType("at", "MemoryFormat")
70
+ qschemeT = BaseCppType("at", "QScheme")
71
+ storageT = BaseCppType("at", "Storage")
72
+ streamT = BaseCppType("at", "Stream")
73
+ intArrayRefT = BaseCppType("at", "IntArrayRef")
74
+ optionalIntArrayRefT = BaseCppType("at", "OptionalIntArrayRef")
75
+ optionalSymIntArrayRefT = BaseCppType("at", "OptionalSymIntArrayRef")
76
+ tensorOptionsT = BaseCppType("at", "TensorOptions")
77
+ typeAndSizeT = BaseCppType("torch::autograd::generated", "TypeAndSize")
78
+ tensorGeometryT = BaseCppType("at", "TensorGeometry")
79
+ SymIntT = BaseCppType("c10", "SymInt")
80
+ symIntArrayRefT = BaseCppType("c10", "SymIntArrayRef")
81
+
82
+ # Types representing template parameters. Technically, we probably shouldn't
83
+ # represent them this way in codegen, but it was pretty convenient.
84
+ scalar_t = BaseCppType("", "scalar_t")
85
+ opmath_t = BaseCppType("", "opmath_t")
86
+
87
+ ScalarTypeToCppMapping: Dict[ScalarType, BaseCppType] = {
88
+ ScalarType.Byte: byteT,
89
+ ScalarType.Char: charT,
90
+ ScalarType.Short: shortT,
91
+ ScalarType.Int: int32T,
92
+ ScalarType.Long: longT,
93
+ ScalarType.Half: halfT,
94
+ ScalarType.Float: floatT,
95
+ ScalarType.Double: doubleT,
96
+ ScalarType.ComplexHalf: complexHalfT,
97
+ ScalarType.ComplexFloat: complexFloatT,
98
+ ScalarType.ComplexDouble: complexDoubleT,
99
+ ScalarType.Bool: boolT,
100
+ ScalarType.Float8_e5m2: float8_e5m2T,
101
+ ScalarType.Float8_e5m2fnuz: float8_e5m2fnuzT,
102
+ ScalarType.Float8_e4m3fn: float8_e4m3fnT,
103
+ ScalarType.Float8_e4m3fnuz: float8_e4m3fnuzT,
104
+ }
105
+
106
+ BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
107
+ BaseTy.int: longT,
108
+ BaseTy.float: doubleT,
109
+ BaseTy.bool: boolT,
110
+ BaseTy.str: stringT,
111
+ BaseTy.Generator: generatorT,
112
+ BaseTy.ScalarType: scalarTypeT,
113
+ BaseTy.Tensor: tensorT,
114
+ BaseTy.Dimname: dimnameT,
115
+ BaseTy.DimVector: dimVectorT,
116
+ BaseTy.Layout: layoutT,
117
+ BaseTy.Device: deviceT,
118
+ BaseTy.DeviceIndex: deviceIndexT,
119
+ BaseTy.Scalar: scalarT,
120
+ BaseTy.MemoryFormat: memoryFormatT,
121
+ BaseTy.QScheme: qschemeT,
122
+ BaseTy.Storage: storageT,
123
+ BaseTy.Stream: streamT,
124
+ BaseTy.SymInt: SymIntT,
125
+ }
126
+
127
+ # CTypes encode C++ type structure as needed for translation.
128
+
129
+
130
+ @dataclass(frozen=True)
131
+ class OptionalCType(CType):
132
+ elem: "CType"
133
+
134
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
135
+ # Do not pass `strip_ref` recursively.
136
+ return f"c10::optional<{self.elem.cpp_type()}>"
137
+
138
+ def cpp_type_registration_declarations(self) -> str:
139
+ return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>"
140
+
141
+ def remove_const_ref(self) -> "CType":
142
+ return OptionalCType(self.elem.remove_const_ref())
143
+
144
+
145
+ @dataclass(frozen=True)
146
+ class ListCType(CType):
147
+ elem: "CType"
148
+
149
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
150
+ # Do not pass `strip_ref` recursively.
151
+ return f"c10::List<{self.elem.cpp_type()}>"
152
+
153
+ def cpp_type_registration_declarations(self) -> str:
154
+ return f"c10::List<{self.elem.cpp_type_registration_declarations()}>"
155
+
156
+ def remove_const_ref(self) -> "CType":
157
+ return ListCType(self.elem.remove_const_ref())
158
+
159
+
160
+ @dataclass(frozen=True)
161
+ class ArrayRefCType(CType):
162
+ elem: "CType"
163
+
164
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
165
+ # Do not pass `strip_ref` recursively.
166
+ return f"at::ArrayRef<{self.elem.cpp_type()}>"
167
+
168
+ def cpp_type_registration_declarations(self) -> str:
169
+ return f"ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
170
+
171
+ def remove_const_ref(self) -> "CType":
172
+ return ArrayRefCType(self.elem.remove_const_ref())
173
+
174
+
175
+ @dataclass(frozen=True)
176
+ class VectorizedCType(CType):
177
+ # This template is explicitly specialized, so the only valid
178
+ # elems are those we have specializations for (e.g., float, double, ...)
179
+ # scalar_t is also a common argument here (when we are codegen in
180
+ # a templated context)
181
+ elem: BaseCType
182
+
183
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
184
+ return f"at::vec::Vectorized<{self.elem.cpp_type()}>"
185
+
186
+ def cpp_type_registration_declarations(self) -> str:
187
+ raise NotImplementedError
188
+
189
+ def remove_const_ref(self) -> "CType":
190
+ return self
env-llmeval/lib/python3.10/site-packages/torchgen/api/types/types_base.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Where should I add a new type? `types_base.py` vs `types.py`
3
+
4
+ This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
5
+
6
+ `types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
7
+
8
+ The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
9
+ contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
10
+ if we want to generate code for another C++ library.
11
+
12
+ Add new types to `types.py` if these types are ATen/c10 related.
13
+ Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
14
+ """
15
+ from abc import ABC, abstractmethod
16
+ from dataclasses import dataclass
17
+ from enum import auto, Enum
18
+ from typing import List, Optional, Union
19
+
20
+ from torchgen.model import Argument, SelfArgument, TensorOptionsArguments
21
+
22
+ # An ArgName is just the str name of the argument in schema;
23
+ # but in some special circumstances, we may add a little extra
24
+ # context. The Enum SpecialArgName covers all of these cases;
25
+ # grep for their construction sites to see when they can occur.
26
+
27
+
28
+ class SpecialArgName(Enum):
29
+ possibly_redundant_memory_format = auto()
30
+
31
+
32
+ ArgName = Union[str, SpecialArgName]
33
+
34
+
35
+ # This class shouldn't be created directly; instead, use/create one of the singletons below.
36
+ @dataclass(frozen=True)
37
+ class BaseCppType:
38
+ ns: Optional[str]
39
+ name: str
40
+
41
+ def __str__(self) -> str:
42
+ if self.ns is None or self.ns == "":
43
+ return self.name
44
+ return f"{self.ns}::{self.name}"
45
+
46
+
47
+ # The set of all non-templated, valid, fully-qualified names of C++ types that are used in the codegen.
48
+ # Templated types get their own dataclass, mainly to make namespace parsing easier.
49
+ byteT = BaseCppType("", "uint8_t")
50
+ charT = BaseCppType("", "int8_t")
51
+ shortT = BaseCppType("", "int16_t")
52
+ # It would be more symmetric for this to be called intT, but it easy to mix
53
+ # this up with JIT int (which is int64_t in C++), so we intentionally don't
54
+ # define intT to make it obvious when you've stuffed it up
55
+ int32T = BaseCppType("", "int32_t")
56
+ longT = BaseCppType("", "int64_t")
57
+ doubleT = BaseCppType("", "double")
58
+ floatT = BaseCppType("", "float")
59
+ boolT = BaseCppType("", "bool")
60
+ voidT = BaseCppType("", "void")
61
+
62
+
63
+ class CType(ABC):
64
+ @abstractmethod
65
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
66
+ raise NotImplementedError
67
+
68
+ @abstractmethod
69
+ def cpp_type_registration_declarations(self) -> str:
70
+ raise NotImplementedError
71
+
72
+ @abstractmethod
73
+ def remove_const_ref(self) -> "CType":
74
+ return self
75
+
76
+
77
+ @dataclass(frozen=True)
78
+ class BaseCType(CType):
79
+ type: BaseCppType
80
+
81
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
82
+ return str(self.type)
83
+
84
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
85
+ # TODO: Kill this when we eventually remove it!
86
+ def cpp_type_registration_declarations(self) -> str:
87
+ return str(self.type).replace("at::", "")
88
+
89
+ def remove_const_ref(self) -> "CType":
90
+ return self
91
+
92
+
93
+ @dataclass(frozen=True)
94
+ class ConstRefCType(CType):
95
+ elem: "CType"
96
+
97
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
98
+ if strip_ref:
99
+ return self.elem.cpp_type(strip_ref=strip_ref)
100
+ return f"const {self.elem.cpp_type()} &"
101
+
102
+ def cpp_type_registration_declarations(self) -> str:
103
+ return f"const {self.elem.cpp_type_registration_declarations()} &"
104
+
105
+ def remove_const_ref(self) -> "CType":
106
+ return self.elem.remove_const_ref()
107
+
108
+
109
+ @dataclass(frozen=True)
110
+ class VectorCType(CType):
111
+ elem: "CType"
112
+
113
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
114
+ # Do not pass `strip_ref` recursively.
115
+ return f"::std::vector<{self.elem.cpp_type()}>"
116
+
117
+ def cpp_type_registration_declarations(self) -> str:
118
+ return f"::std::vector<{self.elem.cpp_type_registration_declarations()}>"
119
+
120
+ def remove_const_ref(self) -> "CType":
121
+ return VectorCType(self.elem.remove_const_ref())
122
+
123
+
124
+ @dataclass(frozen=True)
125
+ class ArrayCType(CType):
126
+ elem: "CType"
127
+ size: int
128
+
129
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
130
+ # Do not pass `strip_ref` recursively.
131
+ return f"::std::array<{self.elem.cpp_type()},{self.size}>"
132
+
133
+ def cpp_type_registration_declarations(self) -> str:
134
+ return f"::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>"
135
+
136
+ def remove_const_ref(self) -> "CType":
137
+ return ArrayCType(self.elem.remove_const_ref(), self.size)
138
+
139
+
140
+ @dataclass(frozen=True)
141
+ class TupleCType(CType):
142
+ elems: List["CType"]
143
+
144
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
145
+ # Do not pass `strip_ref` recursively.
146
+ return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
147
+
148
+ def cpp_type_registration_declarations(self) -> str:
149
+ return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
150
+
151
+ def remove_const_ref(self) -> "CType":
152
+ return TupleCType([e.remove_const_ref() for e in self.elems])
153
+
154
+
155
+ @dataclass(frozen=True)
156
+ class MutRefCType(CType):
157
+ elem: "CType"
158
+
159
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
160
+ if strip_ref:
161
+ return self.elem.cpp_type(strip_ref=strip_ref)
162
+ return f"{self.elem.cpp_type()} &"
163
+
164
+ def cpp_type_registration_declarations(self) -> str:
165
+ return f"{self.elem.cpp_type_registration_declarations()} &"
166
+
167
+ def remove_const_ref(self) -> "CType":
168
+ return self.elem.remove_const_ref()
169
+
170
+
171
+ # A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus
172
+ # semantic information about what it represents. For example, consider the
173
+ # argument "bool pin_memory"; its normal C++ type is "bool", but its C++
174
+ # semantic type also keeps track that this represents a "pin_memory"; you can't
175
+ # just use a random other boolean in a context where you need a "pin_memory"!
176
+ #
177
+
178
+
179
+ @dataclass(frozen=True)
180
+ class NamedCType:
181
+ name: ArgName
182
+ type: CType
183
+
184
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
185
+ return self.type.cpp_type(strip_ref=strip_ref)
186
+
187
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
188
+ # TODO: Kill this when we eventually remove it!
189
+ def cpp_type_registration_declarations(self) -> str:
190
+ return self.type.cpp_type_registration_declarations()
191
+
192
+ def remove_const_ref(self) -> "NamedCType":
193
+ return NamedCType(self.name, self.type.remove_const_ref())
194
+
195
+ def with_name(self, name: str) -> "NamedCType":
196
+ return NamedCType(name, self.type)
197
+
198
+
199
+ # A binding represents any C++ binding site for a formal parameter.
200
+ # We don't distinguish between binding sites for different APIs;
201
+ # instead, all of the important distinctions are encoded in CType,
202
+ # which you can use to figure out if a given Binding is appropriate
203
+ # for use in another context. (See torchgen.api.translate)
204
+
205
+
206
+ @dataclass(frozen=True)
207
+ class Binding:
208
+ name: str
209
+ nctype: NamedCType
210
+ argument: Union[Argument, TensorOptionsArguments, SelfArgument]
211
+ # TODO: maybe don't represent default here
212
+ default: Optional[str] = None
213
+
214
+ def rename(self, name: str) -> "Binding":
215
+ return Binding(
216
+ name=name,
217
+ nctype=self.nctype,
218
+ argument=self.argument,
219
+ default=self.default,
220
+ )
221
+
222
+ @property
223
+ def type(self) -> str:
224
+ return self.nctype.cpp_type()
225
+
226
+ def no_default(self) -> "Binding":
227
+ return Binding(
228
+ name=self.name,
229
+ nctype=self.nctype,
230
+ default=None,
231
+ argument=self.argument,
232
+ )
233
+
234
+ def decl(self, *, func_ptr_cast: bool = False) -> str:
235
+ mb_default = ""
236
+ if self.default is not None:
237
+ mb_default = f"={self.default}"
238
+
239
+ # casting only needs to know the type
240
+ if func_ptr_cast:
241
+ return f"{self.type}"
242
+ else:
243
+ return f"{self.type} {self.name}{mb_default}"
244
+
245
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
246
+ # TODO: Kill this when we eventually remove it!
247
+ def decl_registration_declarations(self) -> str:
248
+ type_s = self.nctype.cpp_type_registration_declarations()
249
+ mb_default = ""
250
+ if self.default is not None:
251
+ mb_default = f"={self.default}"
252
+ return f"{type_s} {self.name}{mb_default}"
253
+
254
+ def defn(self) -> str:
255
+ return f"{self.type} {self.name}"
256
+
257
+ def with_name(self, name: str) -> "Binding":
258
+ return Binding(
259
+ name=name, nctype=self.nctype, argument=self.argument, default=self.default
260
+ )
261
+
262
+
263
+ # An Expr is a C++ expression. It has a C++ string representing its syntax,
264
+ # as well as a CType saying what it provides.
265
+
266
+
267
+ @dataclass(frozen=True)
268
+ class Expr:
269
+ expr: str
270
+ type: NamedCType
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/__pycache__/parse.cpython-310.pyc ADDED
Binary file (4.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/custom_ops.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/unboxing.cpython-310.pyc ADDED
Binary file (6.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .types import *
2
+ from .signatures import * # isort:skip
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (238 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/signatures.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/__pycache__/types.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/signatures.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Set
3
+
4
+ import torchgen.api.cpp as aten_cpp
5
+
6
+ from torchgen.api.types import Binding, CType
7
+ from torchgen.model import FunctionSchema, NativeFunction
8
+
9
+ from .types import contextArg
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class ExecutorchCppSignature:
14
+ """
15
+ This signature is merely a CppSignature with Executorch types (optionally
16
+ contains KernelRuntimeContext as well). The inline definition of
17
+ CppSignature is generated in Functions.h and it's used by unboxing
18
+ functions.
19
+ """
20
+
21
+ # The schema this signature is derived from
22
+ func: FunctionSchema
23
+
24
+ # The set of C++ arguments which should not have defaults applied to them
25
+ cpp_no_default_args: Set[str]
26
+
27
+ # Allows you to prepend an arbitrary prefix to the signature name.
28
+ # This is useful for parts of the codegen that generate wrappers around kernels,
29
+ # and need to avoid naming collisions.
30
+ prefix: str = ""
31
+
32
+ def arguments(self, *, include_context: bool = True) -> List[Binding]:
33
+ return ([contextArg] if include_context else []) + et_cpp.arguments(
34
+ self.func.arguments,
35
+ faithful=True, # always faithful, out argument at the end
36
+ method=False, # method not supported
37
+ cpp_no_default_args=self.cpp_no_default_args,
38
+ )
39
+
40
+ def name(self) -> str:
41
+ return self.prefix + aten_cpp.name(
42
+ self.func,
43
+ faithful_name_for_out_overloads=True,
44
+ )
45
+
46
+ def decl(self, name: Optional[str] = None, *, include_context: bool = True) -> str:
47
+ args_str = ", ".join(
48
+ a.decl() for a in self.arguments(include_context=include_context)
49
+ )
50
+ if name is None:
51
+ name = self.name()
52
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
53
+
54
+ def defn(self, name: Optional[str] = None) -> str:
55
+ args = [a.defn() for a in self.arguments()]
56
+ args_str = ", ".join(args)
57
+ if name is None:
58
+ name = self.name()
59
+ return f"{self.returns_type().cpp_type()} {name}({args_str})"
60
+
61
+ def returns_type(self) -> CType:
62
+ return et_cpp.returns_type(self.func.returns)
63
+
64
+ @staticmethod
65
+ def from_native_function(
66
+ f: NativeFunction, *, prefix: str = ""
67
+ ) -> "ExecutorchCppSignature":
68
+ return ExecutorchCppSignature(
69
+ func=f.func, prefix=prefix, cpp_no_default_args=f.cpp_no_default_args
70
+ )
71
+
72
+
73
+ from torchgen.executorch.api import et_cpp
env-llmeval/lib/python3.10/site-packages/torchgen/executorch/api/types/types.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict
3
+
4
+ from torchgen.api.types import (
5
+ BaseCppType,
6
+ BaseCType,
7
+ Binding,
8
+ boolT,
9
+ CType,
10
+ doubleT,
11
+ Expr,
12
+ longT,
13
+ MutRefCType,
14
+ NamedCType,
15
+ )
16
+ from torchgen.model import BaseTy
17
+
18
+ halfT = BaseCppType("torch::executor", "Half")
19
+ bfloat16T = BaseCppType("torch::executor", "BFloat16")
20
+ stringT = BaseCppType("torch::executor", "string_view")
21
+ scalarTypeT = BaseCppType("torch::executor", "ScalarType")
22
+ tensorT = BaseCppType("torch::executor", "Tensor")
23
+ tensorListT = BaseCppType("torch::executor", "TensorList")
24
+ scalarT = BaseCppType("torch::executor", "Scalar")
25
+ memoryFormatT = BaseCppType("torch::executor", "MemoryFormat")
26
+ intArrayRefT = BaseCppType("torch::executor", "IntArrayRef")
27
+ optionalT = BaseCppType("torch::executor", "optional")
28
+ contextT = BaseCppType("torch::executor", "KernelRuntimeContext")
29
+
30
+ contextExpr = Expr(
31
+ expr="context",
32
+ type=NamedCType(name="context", type=MutRefCType(BaseCType(contextT))),
33
+ )
34
+
35
+ contextArg = Binding(
36
+ name="context",
37
+ nctype=contextExpr.type,
38
+ argument=None, # type: ignore[arg-type]
39
+ default=None,
40
+ )
41
+
42
+ BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
43
+ BaseTy.int: longT,
44
+ BaseTy.float: doubleT,
45
+ BaseTy.bool: boolT,
46
+ BaseTy.str: stringT,
47
+ BaseTy.ScalarType: scalarTypeT,
48
+ BaseTy.Tensor: tensorT,
49
+ BaseTy.Scalar: scalarT,
50
+ BaseTy.MemoryFormat: memoryFormatT,
51
+ }
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class OptionalCType(CType):
56
+ elem: "CType"
57
+
58
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
59
+ # Do not pass `strip_ref` recursively.
60
+ return f"torch::executor::optional<{self.elem.cpp_type()}>"
61
+
62
+ def cpp_type_registration_declarations(self) -> str:
63
+ return f"torch::executor::optional<{self.elem.cpp_type_registration_declarations()}>"
64
+
65
+ def remove_const_ref(self) -> "CType":
66
+ return OptionalCType(self.elem.remove_const_ref())
67
+
68
+
69
+ @dataclass(frozen=True)
70
+ class ArrayRefCType(CType):
71
+ elem: "CType"
72
+
73
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
74
+ # Do not pass `strip_ref` recursively.
75
+ return f"torch::executor::ArrayRef<{self.elem.cpp_type()}>"
76
+
77
+ def cpp_type_registration_declarations(self) -> str:
78
+ return f"torch::executor::ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
79
+
80
+ def remove_const_ref(self) -> "CType":
81
+ return ArrayRefCType(self.elem.remove_const_ref())
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders.cpython-310.pyc ADDED
Binary file (9.84 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/gen_mobile_upgraders_constant.cpython-310.pyc ADDED
Binary file (450 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ from enum import Enum
4
+ from pathlib import Path
5
+ from typing import Any, Dict, List
6
+
7
+ import torch
8
+ from torch.jit.generate_bytecode import generate_upgraders_bytecode
9
+
10
+ from torchgen.code_template import CodeTemplate
11
+ from torchgen.operator_versions.gen_mobile_upgraders_constant import (
12
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION,
13
+ )
14
+
15
+
16
+ class ByteCode(Enum):
17
+ instructions = 1
18
+ constants = 2
19
+ types = 3
20
+ operators = 4
21
+ register_size = 5
22
+
23
+
24
+ EXCLUDED_OP_SET = [
25
+ "aten::full.names",
26
+ "aten::full.out",
27
+ "aten::full",
28
+ ]
29
+
30
+ EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
31
+
32
+ ONE_INSTRUCTION = CodeTemplate(
33
+ """
34
+ Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
35
+ )
36
+
37
+ INSTRUCTION_LIST = CodeTemplate(
38
+ """std::vector<Instruction>({
39
+ ${instruction_list}
40
+ }), // instructions list"""
41
+ )
42
+
43
+ ONE_CONSTANT = CodeTemplate(
44
+ """
45
+ c10::IValue(${constant}),"""
46
+ )
47
+
48
+ CONSTANT_LIST = CodeTemplate(
49
+ """std::vector<c10::IValue>({
50
+ ${constant_list}
51
+ }), // constants list"""
52
+ )
53
+
54
+ CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
55
+
56
+ ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
57
+
58
+ TYPE_LIST = CodeTemplate(
59
+ """std::vector<c10::TypePtr>({
60
+ ${type_list}
61
+ }), // types list"""
62
+ )
63
+
64
+ TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
65
+
66
+ ONE_OPERATOTR_STRING = CodeTemplate(
67
+ """
68
+ OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
69
+ )
70
+
71
+ OPERATOR_STRING_LIST = CodeTemplate(
72
+ """
73
+ std::vector<OperatorString>({
74
+ ${operator_string_list}
75
+ }), // operators list"""
76
+ )
77
+
78
+ ONE_UPGRADER_FUNCTION = CodeTemplate(
79
+ """
80
+ mobile::Function::registerFunc(
81
+ "${upgrader_name}",
82
+ ${instruction_list},
83
+ ${constant_list},
84
+ ${type_list},
85
+ ${register_size}
86
+ )"""
87
+ )
88
+
89
+ ONE_UPGRADER_SRC = CodeTemplate(
90
+ """
91
+ ByteCodeFunctionWithOperator({
92
+ ${bytecode_function},
93
+ ${operator_string_list}
94
+ }),"""
95
+ )
96
+
97
+
98
+ ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
99
+ """Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
100
+ ) # noqa: E501
101
+
102
+ ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
103
+ """
104
+ {std::string("${operator_name}"),
105
+ std::vector<Upgrader>({
106
+ ${upgrader_list_in_version_map}
107
+ })},"""
108
+ )
109
+
110
+
111
+ OPERATOR_VERSION_MAP = CodeTemplate(
112
+ """
113
+ const std::unordered_map<std::string, std::vector<Upgrader>>
114
+ getOperatorVersionMapForMobile() {
115
+ static std::unordered_map<std::string, std::vector<Upgrader>>
116
+ operatorVersionMapForMobile({
117
+ ${operator_list_in_version_map}
118
+ });
119
+ return operatorVersionMapForMobile;
120
+ }
121
+ """
122
+ )
123
+
124
+
125
+ UPGRADER_CPP_SRC = CodeTemplate(
126
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION
127
+ + """
128
+ #include <caffe2/serialize/versions.h>
129
+ #include <torch/csrc/jit/mobile/upgrader_mobile.h>
130
+
131
+ namespace c10 {
132
+ TypePtr parseType(const std::string& pythonStr);
133
+ } // namespace c10
134
+
135
+ namespace torch {
136
+ namespace jit {
137
+
138
+ // clang-format off
139
+
140
+ // From operator_versions_map
141
+ ${operator_version_map}
142
+
143
+ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
144
+ auto generate_upgrader_bytecode_list = []() {
145
+ std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
146
+ ${upgrader_bytecode}
147
+ });
148
+ for (const auto& upgrader_function : upgrader_function_list) {
149
+ for (const auto& op : upgrader_function.operators) {
150
+ upgrader_function.function.append_operator(
151
+ op.name,
152
+ op.overload_name,
153
+ op.num_specified_args);
154
+ }
155
+ }
156
+ return upgrader_function_list;
157
+ };
158
+ static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
159
+ generate_upgrader_bytecode_list();
160
+ return upgraderBytecodeList;
161
+ }
162
+
163
+ // clang-format on
164
+
165
+ } // namespace jit
166
+ } // namespace torch
167
+ """
168
+ )
169
+
170
+ UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
171
+
172
+ UPGRADER_ELEMENT = CodeTemplate(
173
+ """\
174
+ Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
175
+ """
176
+ )
177
+
178
+ PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
179
+ """\
180
+ {
181
+ std::string(${operator_name}),
182
+ std::vector<Upgrader>({${upgrader_list}});
183
+ }
184
+ """
185
+ )
186
+
187
+
188
+ def construct_instruction(instruction_list_from_yaml: List[Any]) -> str:
189
+ instruction_list_part = []
190
+ for instruction in instruction_list_from_yaml:
191
+ instruction_list_part.append(
192
+ ONE_INSTRUCTION.substitute(
193
+ operator_name=instruction[0],
194
+ X=instruction[1],
195
+ N=instruction[2],
196
+ )
197
+ )
198
+ return INSTRUCTION_LIST.substitute(
199
+ instruction_list="".join(instruction_list_part).lstrip("\n")
200
+ )
201
+
202
+
203
+ def construct_constants(constants_list_from_yaml: List[Any]) -> str:
204
+ constants_list_part = []
205
+ for constant_from_yaml in constants_list_from_yaml:
206
+ convert_constant = None
207
+ if isinstance(constant_from_yaml, str):
208
+ # Add quotes if it's string
209
+ convert_constant = f'"{constant_from_yaml}"'
210
+ elif isinstance(constant_from_yaml, bool):
211
+ convert_constant = "true" if constant_from_yaml else "false"
212
+ elif constant_from_yaml is None:
213
+ convert_constant = ""
214
+ elif isinstance(constant_from_yaml, int):
215
+ convert_constant = str(constant_from_yaml)
216
+ else:
217
+ raise ValueError(
218
+ f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
219
+ "Please add change in construct_constants function in gen_mobile_upgraders.py."
220
+ )
221
+ constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
222
+ if len(constants_list_part) == 0:
223
+ return CONSTANTS_LIST_EMPTY
224
+ return CONSTANT_LIST.substitute(
225
+ constant_list="".join(constants_list_part).lstrip("\n")
226
+ )
227
+
228
+
229
+ def construct_operators(operator_list_from_yaml: List[Any]) -> str:
230
+ operator_list_part = []
231
+ for operator in operator_list_from_yaml:
232
+ operator_list_part.append(
233
+ ONE_OPERATOTR_STRING.substitute(
234
+ operator_name=operator[0],
235
+ overload_name=operator[1],
236
+ num_of_args=operator[2],
237
+ )
238
+ )
239
+ return OPERATOR_STRING_LIST.substitute(
240
+ operator_string_list="".join(operator_list_part).lstrip("\n")
241
+ )
242
+
243
+
244
+ def construct_types(types_tr_list_from_yaml: List[Any]) -> str:
245
+ types_tr_list_part = []
246
+ for types_tr in types_tr_list_from_yaml:
247
+ types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
248
+ if len(types_tr_list_part) == 0:
249
+ return TYPE_LIST_EMPTY
250
+ return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
251
+
252
+
253
+ def construct_register_size(register_size_from_yaml: int) -> str:
254
+ if not isinstance(register_size_from_yaml, int):
255
+ raise ValueError(
256
+ f"Input register size is {register_size_from_yaml} and"
257
+ "it's type is {type(register_size_from_yaml)}. An int type is expected."
258
+ )
259
+ return str(register_size_from_yaml)
260
+
261
+
262
+ def construct_version_maps(
263
+ upgrader_bytecode_function_to_index_map: Dict[str, Any]
264
+ ) -> str:
265
+ version_map = torch._C._get_operator_version_map()
266
+ sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return]
267
+ sorted_version_map = dict(sorted_version_map_)
268
+
269
+ operator_list_in_version_map_part = []
270
+ for op_name in sorted_version_map:
271
+ upgraders_in_version_map_part = []
272
+ # TODO: remove the skip after these two operators schemas are fixed
273
+ if op_name in EXCLUDED_OP_SET:
274
+ continue
275
+ upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
276
+ upgrader_entries = sorted_version_map[op_name]
277
+ assert len(upgrader_ranges) == len(upgrader_entries)
278
+ for idx, upgrader_entry in enumerate(upgrader_entries):
279
+ upgrader_name = upgrader_entry.upgrader_name
280
+ bytecode_function_index = upgrader_bytecode_function_to_index_map[
281
+ upgrader_name
282
+ ]
283
+ upgraders_in_version_map_part.append(
284
+ ONE_UPGRADER_IN_VERSION_MAP.substitute(
285
+ upgrader_min_version=upgrader_ranges[idx].min_version,
286
+ upgrader_max_version=upgrader_ranges[idx].max_version,
287
+ upgrader_name=upgrader_name,
288
+ bytecode_func_index=bytecode_function_index,
289
+ )
290
+ )
291
+ operator_list_in_version_map_part.append(
292
+ ONE_OPERATOR_IN_VERSION_MAP.substitute(
293
+ operator_name=op_name,
294
+ upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
295
+ )
296
+ )
297
+ return OPERATOR_VERSION_MAP.substitute(
298
+ operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
299
+ "\n"
300
+ )
301
+ )
302
+
303
+
304
+ def get_upgrader_bytecode_function_to_index_map(
305
+ upgrader_dict: List[Dict[str, Any]]
306
+ ) -> Dict[str, Any]:
307
+ upgrader_bytecode_function_to_index_map = {}
308
+ index = 0
309
+ for upgrader_bytecode in upgrader_dict:
310
+ for upgrader_name in upgrader_bytecode.keys():
311
+ if upgrader_name in EXCLUE_UPGRADER_SET:
312
+ continue
313
+ upgrader_bytecode_function_to_index_map[upgrader_name] = index
314
+ index += 1
315
+ return upgrader_bytecode_function_to_index_map
316
+
317
+
318
+ def write_cpp(cpp_path: str, upgrader_dict: List[Dict[str, Any]]) -> None:
319
+ body_parts = []
320
+ upgrader_bytecode_function_to_index_map = (
321
+ get_upgrader_bytecode_function_to_index_map(upgrader_dict)
322
+ )
323
+ version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
324
+ all_upgrader_src_string = []
325
+ for upgrader_bytecode in upgrader_dict:
326
+ for upgrader_name, bytecode in upgrader_bytecode.items():
327
+ # TODO: remove the skip after these two operators schemas are fixed
328
+ if upgrader_name in EXCLUE_UPGRADER_SET:
329
+ continue
330
+ instruction_list_str = ""
331
+ constant_list_str = ""
332
+ type_list_str = ""
333
+ register_size_str = ""
334
+ operator_list_str = ""
335
+ for table_name, contents in bytecode.items():
336
+ element = ByteCode[table_name]
337
+ body_string = ""
338
+ if element is ByteCode.instructions:
339
+ instruction_list_str = construct_instruction(contents)
340
+ elif element is ByteCode.constants:
341
+ constant_list_str = construct_constants(contents)
342
+ elif element is ByteCode.operators:
343
+ operator_list_str = construct_operators(contents)
344
+ elif element is ByteCode.types:
345
+ type_list_str = construct_types(contents)
346
+ elif element is ByteCode.register_size:
347
+ register_size_str = construct_register_size(contents)
348
+
349
+ one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
350
+ upgrader_name=upgrader_name,
351
+ instruction_list=instruction_list_str,
352
+ constant_list=constant_list_str,
353
+ type_list=type_list_str,
354
+ register_size=register_size_str,
355
+ )
356
+ one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
357
+ bytecode_function=one_upgrader_function_string.lstrip("\n"),
358
+ operator_string_list=operator_list_str.lstrip("\n"),
359
+ )
360
+ all_upgrader_src_string.append(one_upgrader_src_string)
361
+
362
+ upgrader_file_content = UPGRADER_CPP_SRC.substitute(
363
+ operator_version_map=version_map_src,
364
+ upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
365
+ )
366
+ body_parts.append(upgrader_file_content)
367
+ print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
368
+ with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
369
+ final_output = "".join(body_parts)
370
+ out_file.write(upgrader_file_content.encode("utf-8"))
371
+
372
+
373
+ def sort_upgrader(upgrader_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
374
+ sorted_upgrader_list = sorted(
375
+ upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
376
+ )
377
+ return sorted_upgrader_list
378
+
379
+
380
+ def main() -> None:
381
+ upgrader_list = generate_upgraders_bytecode()
382
+ sorted_upgrader_list = sort_upgrader(upgrader_list)
383
+ for up in sorted_upgrader_list:
384
+ print("after sort upgrader : ", next(iter(up)))
385
+
386
+ pytorch_dir = Path(__file__).resolve().parents[2]
387
+ upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
388
+ write_cpp(str(upgrader_path), sorted_upgrader_list)
389
+
390
+
391
+ if __name__ == "__main__":
392
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/operator_versions/gen_mobile_upgraders_constant.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
2
+ * @generated
3
+ * This is an auto-generated file. Please do not modify it by hand.
4
+ * To re-generate, please run:
5
+ * cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
6
+ */
7
+ """
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/native/native_functions.yaml ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/native/tags.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This yaml file contains all the possible tags that can be defined in `tags` in `native_functions.yaml`
2
+
3
+ - tag: inplace_view
4
+ desc: |
5
+ This tag indicates if an operator *only* modifies the tensor metadata
6
+ - tag: pt2_compliant_tag
7
+ desc: |
8
+ This tag indicates if the operator is guaranteed to
9
+ work with the PT2 compilation APIs (torch.compile,
10
+ torch.export, etc). If you add this tag to an
11
+ operator, please use
12
+ `torch.testing._internal.optest.opcheck` to test that
13
+ the operator has been registered correctly and
14
+ works with torch.compile
15
+ - tag: view_copy
16
+ desc: |
17
+ This tag indicates operators that are *_copy* variants
18
+ of view/aliasing operators. If an operator has a view_copy tag,
19
+ then it should have the name {op}_copy, where {op} is a view operator.
20
+ - tag: dynamic_output_shape
21
+ desc: |
22
+ This tag indicates if an operator's output's shape depends on input Tensor
23
+ data.
24
+ - tag: data_dependent_output
25
+ desc: |
26
+ Operator has a non-Tensor output whose value is dependent on the data
27
+ of Tensor inputs. Among other things, this implies that this operator
28
+ cannot be run with meta tensor (since data is not available), nor
29
+ can it be symbolically traced.
30
+ - tag: generated
31
+ desc: |
32
+ This tag indicates that the operator doesn't have an explicit entry in
33
+ native_functions.yaml, and instead was generated automatically by the codegen.
34
+ - tag: nondeterministic_seeded
35
+ desc: |
36
+ This tag indicates if an operator is nondeterministically seeded
37
+ (i.e., is random) such that the operator intentionally produces
38
+ different results when run twice on the same inputs, but this randomness
39
+ is controlled by a Generator which, if reseeded would give you the
40
+ same result.
41
+ - tag: nondeterministic_bitwise
42
+ desc: |
43
+ This tag indicates if an operator doesn't guarantee bitwise equivalence
44
+ across different runs of an operator with identical inputs.
45
+
46
+ # NOTE [Core ATen Ops]
47
+ - tag: core
48
+ desc: |
49
+ Core aten ops is a subset of aten ops that remains after aten-to-aten decomposition and
50
+ functionalization pass. Core aten ops are fully functional and adhere to single static
51
+ assignment (SSA): this implies there will be no `inplace` or `_out` variants in this opset.
52
+ This opset is designed to serve as the functional IR to interface with compiler backends.
53
+ In contrast to primTorch, core aten opset doesn't decompose ops into explicit
54
+ type promotion and broadcasting ops.
55
+ Core aten ops is also effectively the opset produced by torchdynamo.export(aten_graph=True),
56
+ and thus can be used as an opset for export purpose.
57
+ - tag: pointwise
58
+ desc: |
59
+ Pointwise operators are operators where each element of the output is computed only by accessing
60
+ the corresponding element of all the broadcasted inputs. The output shape will be the broadcasted
61
+ shape of the inputs.
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/ATenOpList.cpp ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/ATenOpList.h>
2
+
3
+ #include <string>
4
+ #include <cstring>
5
+ #include <utility>
6
+ #include <unordered_set>
7
+ #include <ATen/core/operator_name.h>
8
+
9
+ // ${generated_comment}
10
+
11
+ namespace at {
12
+
13
+ namespace {
14
+ struct OpNameEquals final {
15
+ bool operator()(const std::pair<const char*, const char*>& lhs, const std::pair<const char*, const char*>& rhs) const {
16
+ return 0 == strcmp(lhs.first, rhs.first) && 0 == strcmp(lhs.second, rhs.second);
17
+ }
18
+ };
19
+
20
+ struct OpNameHash final {
21
+ size_t operator()(const std::pair<const char*, const char*>& p) const {
22
+ // use std::hash<std::string> because std::hash<const char*> would hash pointers and not pointed-to strings
23
+ return std::hash<std::string>()(p.first) ^ (~ std::hash<std::string>()(p.second));
24
+ }
25
+ };
26
+ }
27
+
28
+ bool is_custom_op(const c10::OperatorName& opName) {
29
+ static std::unordered_set<std::pair<const char*, const char*>, OpNameHash, OpNameEquals> ops {
30
+ ${aten_ops}
31
+ {"", ""}
32
+ };
33
+ return ops.count(std::make_pair(
34
+ opName.name.c_str(), opName.overload_name.c_str())) == 0;
35
+ }
36
+ }
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2
+ // ${generated_comment}
3
+
4
+ #include <ATen/InferSize.h>
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/native/Resize.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Operators.h>
10
+ #else
11
+ #include <ATen/ops/clone.h>
12
+ $ops_headers
13
+ #endif
14
+
15
+ namespace at {
16
+ namespace native {
17
+
18
+ // This file contains a number of kernels for aten functions that are fully code-generated.
19
+ // TODO: rename this file to something more generic.
20
+
21
+ namespace {
22
+ at::Tensor clone_arg(const at::Tensor& t) {
23
+ return t.clone();
24
+ }
25
+
26
+ std::vector<at::Tensor> clone_arg(const at::TensorList& t_list) {
27
+ std::vector<at::Tensor> out(t_list.size());
28
+ for (const auto& i : c10::irange(t_list.size())) {
29
+ out[i] = t_list[i].clone();
30
+ }
31
+ return out;
32
+ }
33
+
34
+ // duped with gen_resize_out_helper from structured kernels
35
+ void copy_arg(const at::Tensor& dst, const at::Tensor& src) {
36
+ TORCH_CHECK(src.dtype() == dst.dtype(),
37
+ "Expected out tensor to have dtype ", src.dtype(), ", but got ", dst.dtype(), " instead");
38
+ TORCH_CHECK(src.device() == dst.device(),
39
+ "Expected out tensor to have device ", src.device(), ", but got ", dst.device(), " instead");
40
+ dst.copy_(src);
41
+ }
42
+
43
+ void copy_arg(const at::TensorList& dst, const at::TensorList& src) {
44
+ TORCH_INTERNAL_ASSERT(dst.size() == src.size());
45
+ for (const auto& i : c10::irange(dst.size())) {
46
+ copy_arg(dst[i], src[i]);
47
+ }
48
+ }
49
+
50
+ // TODO: this doesn't handle restriding empty tensors correctly; see
51
+ // gen_resize_out_helper for the correct algorithm
52
+
53
+ void resize_out_helper(const at::Tensor& dst, const at::Tensor& src) {
54
+ at::native::resize_output(dst, src.sizes());
55
+ }
56
+
57
+ void resize_out_helper(const at::TensorList& dst, const at::TensorList& src) {
58
+ TORCH_INTERNAL_ASSERT(dst.size() == src.size());
59
+ for (const auto& i : c10::irange(dst.size())) {
60
+ at::native::resize_output(dst[i], src[i].sizes());
61
+ }
62
+ }
63
+ }
64
+
65
+
66
+ ${CompositeViewCopyKernel_Definitions}
67
+
68
+ ${GeneratedCompositeFunctional_Definitions}
69
+
70
+ ${GeneratedCompositeOut_Definitions}
71
+
72
+ } // namespace native
73
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // ${generated_comment}
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace ${dispatch_namespace} {
19
+
20
+ ${dispatch_namespaced_declarations}
21
+
22
+ } // namespace ${dispatch_namespace}
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ ${inline_headers}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunctions_inl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // ${generated_comment}
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_${dispatch_namespace}_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ ${DispatchKeyFunctions_inl_includes}
20
+
21
+
22
+ ${dispatch_namespaced_declarations}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.cpp ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // ${generated_comment}
2
+ ${includes}
3
+ ${native_functions_include}
4
+
5
+ namespace {
6
+ ${helper_fns}
7
+ } // namespace
8
+
9
+ ${namespace_prologue}
10
+
11
+ ${native_function_definitions}
12
+
13
+ ${namespace_epilogue}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Function.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+ ${static_dispatch_ops_headers}
19
+
20
+ ${operator_includes}
21
+
22
+ namespace at {
23
+
24
+ ${function_definitions}
25
+
26
+ }
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/Tensor.h>
6
+
7
+ namespace at {
8
+ namespace functionalization {
9
+
10
+ struct FunctionalInverses {
11
+
12
+ ${view_inverse_declarations}
13
+
14
+ };
15
+ }
16
+ }
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.cpp ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <array>
2
+
3
+ #include <ATen/Functions.h>
4
+ #include <ATen/Utils.h>
5
+ #include <c10/core/Allocator.h>
6
+
7
+ namespace at {
8
+
9
+ Tensor TensorMaker::make_tensor() {
10
+ AutoDispatchBelowADInplaceOrView guard{}; // TODO: Remove.
11
+ tracer::impl::NoTracerDispatchMode tracer_guard{};
12
+
13
+ check_size_nonnegative(sizes_);
14
+
15
+ TORCH_CHECK_VALUE(
16
+ !deleter_ || !ctx_,
17
+ "The deleter and context arguments are mutually exclusive.");
18
+
19
+ if (device_ == nullopt) {
20
+ device_ = globalContext().getDeviceFromPtr(data_, opts_.device().type());
21
+ }
22
+
23
+ if (opts_.device().has_index()) {
24
+ // clang-format off
25
+ TORCH_CHECK_VALUE(
26
+ opts_.device() == *device_,
27
+ "Specified device ", opts_.device(), " does not match device of data ", *device_);
28
+ // clang-format on
29
+ }
30
+
31
+ std::size_t size_bytes = computeStorageSize();
32
+
33
+ DataPtr data_ptr{};
34
+ if (deleter_) {
35
+ data_ptr = makeDataPtrFromDeleter();
36
+ } else {
37
+ data_ptr = makeDataPtrFromContext();
38
+ }
39
+
40
+ TORCH_CHECK(!resizeable_ || allocator_ != nullptr, "Must specify an allocator with allocator() if you want to use resizeable_storage()");
41
+ Storage storage{Storage::use_byte_size_t{}, size_bytes, std::move(data_ptr), /*allocator=*/allocator_, /*resizeable=*/resizeable_};
42
+
43
+ Tensor tensor = detail::make_tensor<TensorImpl>(
44
+ std::move(storage), opts_.computeDispatchKey(), opts_.dtype());
45
+
46
+ TensorImpl* tensor_impl = tensor.unsafeGetTensorImpl();
47
+ if (strides_) {
48
+ tensor_impl->set_sizes_and_strides(sizes_, *strides_);
49
+ } else {
50
+ tensor_impl->set_sizes_contiguous(sizes_);
51
+ }
52
+ if (storage_offset_) {
53
+ tensor_impl->set_storage_offset(*storage_offset_);
54
+ }
55
+
56
+ return tensor;
57
+ }
58
+
59
+ std::size_t TensorMaker::computeStorageSize() const noexcept {
60
+ std::size_t itemsize = opts_.dtype().itemsize();
61
+
62
+ if (strides_) {
63
+ auto storage_size = detail::computeStorageNbytes(sizes_, *strides_, itemsize);
64
+ if (storage_offset_) {
65
+ storage_size += storage_offset_.value();
66
+ }
67
+ return storage_size;
68
+ }
69
+
70
+ std::size_t size = 1;
71
+ for (std::int64_t s : sizes_) {
72
+ size *= static_cast<std::size_t>(s);
73
+ }
74
+ auto storage_size = size * itemsize;
75
+ if (storage_offset_) {
76
+ storage_size += storage_offset_.value();
77
+ }
78
+ return storage_size;
79
+ }
80
+
81
+ inline DataPtr TensorMaker::makeDataPtrFromDeleter() const {
82
+ return InefficientStdFunctionContext::makeDataPtr(data_, deleter_, *device_);
83
+ }
84
+
85
+ inline DataPtr TensorMaker::makeDataPtrFromContext() noexcept {
86
+ return DataPtr{data_, ctx_.release(), ctx_.get_deleter(), *device_};
87
+ }
88
+
89
+ IntArrayRef TensorMaker::makeTempSizes() const noexcept {
90
+ static std::int64_t zeros[5] = {0, 0, 0, 0, 0};
91
+ if (opts_.has_memory_format()) {
92
+ MemoryFormat format = *opts_.memory_format_opt();
93
+ if (format == MemoryFormat::ChannelsLast) {
94
+ return IntArrayRef(zeros, 4);
95
+ }
96
+ if (format == MemoryFormat::ChannelsLast3d) {
97
+ return IntArrayRef(zeros, 5);
98
+ }
99
+ }
100
+ return IntArrayRef(zeros, 1);
101
+ }
102
+
103
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
17
+ see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21
+ //
22
+ // In ATen, certain generated headers files include the definitions of
23
+ // every single operator in PyTorch. Unfortunately this means every
24
+ // time an operator signature is updated or changed in
25
+ // native_functions.yaml, you (and every other PyTorch developer) need
26
+ // to recompile every source file that includes any of these headers.
27
+ //
28
+ // To break up these header dependencies, and improve incremental
29
+ // build times for all PyTorch developers. These headers are split
30
+ // into per-operator headers in the `ATen/ops` folder. This limits
31
+ // incremental builds to only changes to methods of `Tensor`, or files
32
+ // that use the specific operator being changed. With `at::sum` as an
33
+ // example, you should include
34
+ //
35
+ // <ATen/ops/sum.h> // instead of ATen/Functions.h
36
+ // <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
37
+ // <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
38
+ // <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
39
+ //
40
+ // However, even if you're careful to use this in your own code.
41
+ // `Functions.h` might be included indirectly through another header
42
+ // without you realising. To avoid this, you can add
43
+ //
44
+ // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45
+ //
46
+ // to the top of your source file. This way any time the non-specific
47
+ // headers are included, the compiler will error out.
48
+ //
49
+ // Also, be aware that `ops` are not available in all build
50
+ // configurations (namely fb-internal) so you must guard these
51
+ // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52
+ //
53
+ // #ifndef AT_PER_OPERATOR_HEADERS
54
+ // #include <ATen/Functions.h>
55
+ // #else
56
+ // #include <ATen/ops/sum.h>
57
+ // #endif
58
+
59
+ #include <ATen/Context.h>
60
+ #include <ATen/DeviceGuard.h>
61
+ #include <ATen/TensorUtils.h>
62
+ #include <ATen/TracerMode.h>
63
+ #include <ATen/core/Generator.h>
64
+ #include <ATen/core/Reduction.h>
65
+ #include <c10/core/SymInt.h>
66
+ #include <ATen/core/Tensor.h>
67
+ #include <c10/core/Scalar.h>
68
+ #include <c10/core/Storage.h>
69
+ #include <c10/core/TensorOptions.h>
70
+ #include <c10/util/Deprecated.h>
71
+ #include <c10/util/Optional.h>
72
+ #include <c10/util/OptionalArrayRef.h>
73
+
74
+ #include <ATen/ops/from_blob.h>
75
+ #include <ATen/ops/tensor.h>
76
+
77
+ ${Functions_includes}
78
+
79
+ namespace at {
80
+
81
+ ${Functions_declarations}
82
+
83
+ // Special C++ only overloads for std()-like functions (See gh-40287)
84
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
85
+ // So, for example std(0) would select the std(unbiased=False) overload
86
+ TORCH_API inline Tensor var(const Tensor& self, int dim) {
87
+ return at::var(self, IntArrayRef{dim});
88
+ }
89
+ TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
90
+ return at::var_mean(self, IntArrayRef{dim});
91
+ }
92
+ TORCH_API inline Tensor std(const Tensor& self, int dim) {
93
+ return at::std(self, IntArrayRef{dim});
94
+ }
95
+ TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
96
+ return at::std_mean(self, IntArrayRef{dim});
97
+ }
98
+
99
+ inline int64_t numel(const Tensor& tensor) {
100
+ return tensor.numel();
101
+ }
102
+
103
+ inline int64_t size(const Tensor& tensor, int64_t dim) {
104
+ return tensor.size(dim);
105
+ }
106
+
107
+ inline int64_t stride(const Tensor& tensor, int64_t dim) {
108
+ return tensor.stride(dim);
109
+ }
110
+
111
+ inline bool is_complex(const Tensor& tensor) {
112
+ return tensor.is_complex();
113
+ }
114
+
115
+ inline bool is_floating_point(const Tensor& tensor) {
116
+ return tensor.is_floating_point();
117
+ }
118
+
119
+ inline bool is_signed(const Tensor& tensor) {
120
+ return tensor.is_signed();
121
+ }
122
+
123
+ inline bool is_inference(const Tensor& tensor) {
124
+ return tensor.is_inference();
125
+ }
126
+
127
+ inline bool _is_zerotensor(const Tensor& tensor) {
128
+ return tensor._is_zerotensor();
129
+ }
130
+
131
+ inline bool is_conj(const Tensor& tensor) {
132
+ return tensor.is_conj();
133
+ }
134
+
135
+ inline Tensor conj(const Tensor& tensor) {
136
+ return tensor.conj();
137
+ }
138
+
139
+ inline bool is_neg(const Tensor& tensor) {
140
+ return tensor.is_neg();
141
+ }
142
+
143
+ }
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyNonNativeIr.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ ${lazy_non_native_ir_inc}
4
+
5
+ // This file contains autogenerated LazyTensor Non Native IR nodes
6
+
7
+ ${namespace_prologue}
8
+
9
+ ${non_native_ir_nodes}
10
+
11
+ ${namespace_epilogue}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunctions.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
17
+ and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ #include <c10/core/Scalar.h>
21
+ #include <c10/core/Storage.h>
22
+ #include <c10/core/TensorOptions.h>
23
+ #include <c10/util/Deprecated.h>
24
+ #include <c10/util/Optional.h>
25
+ #include <c10/core/QScheme.h>
26
+ #include <ATen/core/Reduction.h>
27
+ #include <ATen/core/Tensor.h>
28
+ #include <tuple>
29
+ #include <vector>
30
+
31
+ ${NativeFunctions_includes}
32
+
33
+ ${NativeFunctions_declarations}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunction.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ ${meta_function_declarations}
21
+
22
+ } // namespace native
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/core/IListRef.h>
7
+ #include <ATen/TensorMeta.h>
8
+ #include <ATen/TensorIterator.h>
9
+
10
+ ${NativeMetaFunctions_includes}
11
+
12
+ namespace at {
13
+
14
+ namespace meta {
15
+
16
+ ${NativeMetaFunctions_declarations}
17
+
18
+ } // namespace meta
19
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operator.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+ ${declarations}
17
+
18
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Tensor.h>
2
+ #include <ATen/core/dispatch/Dispatcher.h>
3
+
4
+ // ${generated_comment}
5
+ // NOTE See [Sharded File] comment in VariableType
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Operators.h>
9
+ #else
10
+ ${operator_headers}
11
+ #endif
12
+
13
+ ${static_dispatch_extra_headers}
14
+
15
+ namespace at { namespace _ops {
16
+
17
+ ${definitions}
18
+
19
+ }} // namespace at::_ops