applied-ai-018 commited on
Commit
71a0112
·
verified ·
1 Parent(s): 7a1062e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torchgen/api/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torchgen/api/cpp.py +467 -0
  13. env-llmeval/lib/python3.10/site-packages/torchgen/api/dispatcher.py +118 -0
  14. env-llmeval/lib/python3.10/site-packages/torchgen/api/functionalization.py +176 -0
  15. env-llmeval/lib/python3.10/site-packages/torchgen/api/meta.py +12 -0
  16. env-llmeval/lib/python3.10/site-packages/torchgen/api/native.py +153 -0
  17. env-llmeval/lib/python3.10/site-packages/torchgen/api/python.py +1481 -0
  18. env-llmeval/lib/python3.10/site-packages/torchgen/api/translate.py +430 -0
  19. env-llmeval/lib/python3.10/site-packages/torchgen/api/ufunc.py +209 -0
  20. env-llmeval/lib/python3.10/site-packages/torchgen/api/unboxing.py +248 -0
  21. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h +19 -0
  22. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h +19 -0
  23. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h +24 -0
  24. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h +17 -0
  25. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp +54 -0
  26. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel +4 -0
  27. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl +14 -0
  39. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py +31 -0
  40. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py +129 -0
  41. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py +613 -0
  42. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py +1377 -0
  43. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py +115 -0
  44. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py +2164 -0
  45. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/load_derivatives.py +1011 -0
  46. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp +35 -0
  47. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp +20 -0
  48. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h +51 -0
  49. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp +40 -0
  50. env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.cpp +65 -0
env-llmeval/lib/python3.10/site-packages/torchgen/api/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/autograd.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (9.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/dispatcher.cpython-310.pyc ADDED
Binary file (2.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/lazy.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc ADDED
Binary file (417 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/structured.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/translate.cpython-310.pyc ADDED
Binary file (7.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/ufunc.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/__pycache__/unboxing.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/api/cpp.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Set, Union
2
+
3
+ from torchgen import local
4
+ from torchgen.api.types import (
5
+ ArgName,
6
+ ArrayCType,
7
+ ArrayRefCType,
8
+ BaseCType,
9
+ BaseTypeToCppMapping,
10
+ Binding,
11
+ boolT,
12
+ ConstRefCType,
13
+ CType,
14
+ dimnameListT,
15
+ intArrayRefT,
16
+ iTensorListRefT,
17
+ ListCType,
18
+ longT,
19
+ MutRefCType,
20
+ NamedCType,
21
+ OptionalCType,
22
+ optionalIntArrayRefT,
23
+ optionalSymIntArrayRefT,
24
+ scalarT,
25
+ SpecialArgName,
26
+ symIntArrayRefT,
27
+ SymIntT,
28
+ tensorListT,
29
+ tensorOptionsT,
30
+ tensorT,
31
+ TupleCType,
32
+ VectorCType,
33
+ voidT,
34
+ )
35
+ from torchgen.model import (
36
+ Argument,
37
+ Arguments,
38
+ BaseTy,
39
+ BaseType,
40
+ FunctionSchema,
41
+ ListType,
42
+ NativeFunction,
43
+ OptionalType,
44
+ Return,
45
+ SelfArgument,
46
+ TensorOptionsArguments,
47
+ Type,
48
+ )
49
+ from torchgen.utils import assert_never
50
+
51
+ # This file describes the translation of JIT schema to the public C++
52
+ # API, which is what people use when they call functions like at::add.
53
+ #
54
+ # Prominent characteristics of the C++ API:
55
+ #
56
+ # - dtype, layout, device and pin_memory are collected into
57
+ # a single C++ type TensorOptions (the native functions API
58
+ # also has this, but tensor options is really most relevant
59
+ # for the C++ API; it makes calling kwarg factory functions
60
+ # pleasant)
61
+ #
62
+ # - defaulting lives here (in fact, the dispatcher is completely
63
+ # oblivious of defaults!)
64
+ #
65
+ # BTW: policy on name collisions: we try not to have types with
66
+ # collisions, but functions are fair game to collide
67
+
68
+
69
+ def name(
70
+ func: FunctionSchema,
71
+ *,
72
+ faithful_name_for_out_overloads: bool = False,
73
+ symint_overload: bool = False,
74
+ ) -> str:
75
+ name = str(func.name.name)
76
+ if symint_overload:
77
+ name += "_symint"
78
+ if func.is_out_fn():
79
+ if faithful_name_for_out_overloads:
80
+ name += "_outf"
81
+ else:
82
+ name += "_out"
83
+
84
+ return name
85
+
86
+
87
+ # Translation of "value types" in JIT schema to C++ API type. Value
88
+ # types look the same no matter if they are argument types or return
89
+ # types. Returns None if the type in question is not a value type.
90
+ def valuetype_type(
91
+ t: Type,
92
+ *,
93
+ binds: ArgName,
94
+ remove_non_owning_ref_types: bool = False,
95
+ symint: bool = False,
96
+ ) -> Optional[NamedCType]:
97
+ if isinstance(t, BaseType):
98
+ if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
99
+ return None
100
+ elif str(t) == "SymInt":
101
+ if symint:
102
+ return NamedCType(binds, BaseCType(SymIntT))
103
+ else:
104
+ return NamedCType(binds, BaseCType(longT))
105
+ if remove_non_owning_ref_types:
106
+ if t.name == BaseTy.str:
107
+ raise AssertionError(
108
+ "string ref->value conversion: not implemented yet"
109
+ )
110
+ # All other BaseType currently map directly to BaseCppTypes.
111
+ return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
112
+ elif isinstance(t, OptionalType):
113
+ elem = valuetype_type(t.elem, binds=binds, symint=symint)
114
+ if elem is None:
115
+ return None
116
+ return NamedCType(binds, OptionalCType(elem.type))
117
+ elif isinstance(t, ListType):
118
+ if str(t.elem) == "bool":
119
+ assert t.size is not None
120
+ return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size))
121
+ else:
122
+ return None
123
+ else:
124
+ raise AssertionError(f"unrecognized type {repr(t)}")
125
+
126
+
127
+ # Translation of types occurring in JIT arguments to a C++ argument type.
128
+ # If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
129
+ # For example, we'll return std::vector<int> instead of IntArrayRef.
130
+ # See Note [translation from C++ reference to value types]
131
+ def argumenttype_type(
132
+ t: Type,
133
+ *,
134
+ mutable: bool,
135
+ binds: ArgName,
136
+ remove_non_owning_ref_types: bool = False,
137
+ symint: bool = False,
138
+ ) -> NamedCType:
139
+ # If it's a value type, do the value type translation
140
+ r = valuetype_type(
141
+ t,
142
+ binds=binds,
143
+ symint=symint,
144
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
145
+ )
146
+ if r is not None:
147
+ return r
148
+
149
+ if isinstance(t, BaseType):
150
+ if t.name == BaseTy.Tensor:
151
+ if mutable and not local.use_const_ref_for_mutable_tensors():
152
+ return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
153
+ else:
154
+ return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
155
+ elif t.name == BaseTy.Scalar:
156
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
157
+ else:
158
+ raise AssertionError(f"base type should have been value type {t}")
159
+ elif isinstance(t, OptionalType):
160
+ if str(t.elem) == "Tensor":
161
+ if mutable and not local.use_const_ref_for_mutable_tensors():
162
+ return NamedCType(
163
+ binds, MutRefCType(BaseCType(tensorT))
164
+ ) # TODO: fix this discrepancy
165
+ else:
166
+ return NamedCType(
167
+ binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
168
+ )
169
+ elif str(t.elem) == "Scalar":
170
+ return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
171
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
172
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
173
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
174
+ if symint:
175
+ return NamedCType(binds, BaseCType(optionalSymIntArrayRefT))
176
+ else:
177
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
178
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
179
+ return NamedCType(binds, OptionalCType(elem.type))
180
+ elif isinstance(t, ListType):
181
+ # TODO: remove these special cases, ArrayRef fallthrough works fine
182
+ if str(t.elem) == "int":
183
+ if remove_non_owning_ref_types:
184
+ return NamedCType(binds, VectorCType(BaseCType(longT)))
185
+ else:
186
+ return NamedCType(binds, BaseCType(intArrayRefT))
187
+ if str(t.elem) == "SymInt":
188
+ if remove_non_owning_ref_types:
189
+ if symint:
190
+ return NamedCType(binds, VectorCType(BaseCType(SymIntT)))
191
+ else:
192
+ return NamedCType(binds, VectorCType(BaseCType(longT)))
193
+ else:
194
+ if symint:
195
+ return NamedCType(binds, BaseCType(symIntArrayRefT))
196
+ else:
197
+ return NamedCType(binds, BaseCType(intArrayRefT))
198
+ if str(t.elem) == "Tensor":
199
+ if local.use_ilistref_for_tensor_lists():
200
+ return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
201
+ else:
202
+ return NamedCType(binds, BaseCType(tensorListT))
203
+ elif str(t.elem) == "Scalar":
204
+ return NamedCType(binds, ArrayRefCType(BaseCType(scalarT)))
205
+ elif str(t.elem) == "Dimname":
206
+ return NamedCType(binds, BaseCType(dimnameListT))
207
+ elif str(t.elem) == "Tensor?":
208
+ return NamedCType(
209
+ binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
210
+ )
211
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
212
+ return NamedCType(binds, ArrayRefCType(elem.type))
213
+ else:
214
+ raise AssertionError(f"unrecognized type {repr(t)}")
215
+
216
+
217
+ # Translate a JIT argument into its C++ type
218
+ def argument_type(a: Argument, *, binds: ArgName, symint: bool = False) -> NamedCType:
219
+ return argumenttype_type(a.type, mutable=a.is_write, symint=symint, binds=binds)
220
+
221
+
222
+ # Translation of a (non-multi) return type from JIT to C++
223
+ # N.B: returntype_type returns a CType, not a NamedCType.
224
+ # This is mostly because of the mismatch between return types and return names.
225
+ # e.g. a function with a return type of 'void' has 0 return names,
226
+ # and a function with a return type of 'std::tuple' has >1 return name.
227
+ def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType:
228
+ # placeholder is ignored
229
+ # NB: symint is ALWAYS respected for return types. So symint argument
230
+ # here is IGNORED
231
+ r = valuetype_type(t, binds="__placeholder__", symint=True)
232
+ if r is not None:
233
+ return r.type
234
+
235
+ if isinstance(t, BaseType):
236
+ if t.name == BaseTy.Tensor:
237
+ if mutable:
238
+ if local.use_const_ref_for_mutable_tensors():
239
+ return ConstRefCType(BaseCType(tensorT))
240
+ else:
241
+ return MutRefCType(BaseCType(tensorT))
242
+ else:
243
+ # Note [Tensor Copy Returns]
244
+ # Currently, we use "Argument.is_write" to determine
245
+ # whether or not Tensor return types should be copies or references.
246
+ # If that ever changes, take a look at other locations of this note!
247
+ return BaseCType(tensorT)
248
+ elif t.name == BaseTy.Scalar:
249
+ return BaseCType(scalarT)
250
+ elif isinstance(t, ListType):
251
+ assert (
252
+ not mutable
253
+ ), "Native functions should never return a mutable tensor list. They should return void."
254
+ elem = returntype_type(t.elem, mutable=False)
255
+ assert t.size is None, f"fixed size list returns not supported: {t}"
256
+ return VectorCType(elem)
257
+ elif isinstance(t, OptionalType):
258
+ elem = returntype_type(t.elem, mutable=mutable)
259
+ if str(t.elem) == "Tensor":
260
+ return OptionalCType(elem)
261
+
262
+ raise AssertionError(f"unrecognized return type {t}")
263
+
264
+
265
+ # Translation of a single return to its C++ type
266
+ def return_type(r: Return, *, symint: bool = False) -> CType:
267
+ return returntype_type(r.type, mutable=r.is_write, symint=symint)
268
+
269
+
270
+ # Translation of a full (possibly multi) return from JIT to its C++ type
271
+ def returns_type(rs: Sequence[Return], *, symint: bool = False) -> CType:
272
+ if len(rs) == 0:
273
+ return BaseCType(voidT)
274
+ elif len(rs) == 1:
275
+ return return_type(rs[0], symint=symint)
276
+ else:
277
+ return TupleCType([return_type(r, symint=symint) for r in rs])
278
+
279
+
280
+ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
281
+ returns: List[str] = []
282
+ for i, r in enumerate(f.func.returns):
283
+ # If we have an inplace function, the return argument is
284
+ # implicitly named self.
285
+ # TODO: Consider incorporating this into the data model
286
+ if f.func.name.name.inplace:
287
+ assert i == 0, "illegal inplace function with multiple returns"
288
+ name = "self"
289
+ # If we are out function, the name is the name of the
290
+ # corresponding output function (r.name will get recorded
291
+ # in field_name later.)
292
+ elif f.func.is_out_fn():
293
+ name = f.func.arguments.out[i].name
294
+ # If the return argument is explicitly named...
295
+ elif r.name:
296
+ name_conflict = any(
297
+ r.name == a.name for a in f.func.schema_order_arguments()
298
+ )
299
+ if name_conflict and not f.func.is_out_fn():
300
+ name = f"{r.name}_return"
301
+ else:
302
+ name = r.name
303
+ # If there is no explicit name and no fallback name was passed in, we just name the output result,
304
+ # unless it's a multi-return, in which case it's result0,
305
+ # result1, etc (zero-indexed)
306
+ else:
307
+ name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
308
+ returns.append(name)
309
+ return returns
310
+
311
+
312
+ JIT_TO_CPP_DEFAULT = {
313
+ "False": "false",
314
+ "True": "true",
315
+ "None": "c10::nullopt", # UGH this one is type directed
316
+ "Mean": "at::Reduction::Mean",
317
+ "[]": "{}",
318
+ "contiguous_format": "MemoryFormat::Contiguous",
319
+ "long": "at::kLong",
320
+ }
321
+
322
+
323
+ # Convert a JIT default into C++ expression representing the default
324
+ def default_expr(d: str, t: Type, *, symint: bool) -> str:
325
+ if d == "None" and str(t) == "Tensor?":
326
+ return "{}"
327
+ if isinstance(t, BaseType) and t.name is BaseTy.str:
328
+ # Schema allows single quotes but C++ needs double
329
+ if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
330
+ s = ""
331
+ i = 1
332
+ while i + 1 < len(d):
333
+ if d[i] != "\\":
334
+ if d[i] == '"':
335
+ s += '\\"'
336
+ else:
337
+ s += d[i]
338
+ i += 1
339
+ else:
340
+ if d[i + 1] == "'":
341
+ s += "'"
342
+ else:
343
+ s += d[i : i + 2]
344
+ i += 2
345
+
346
+ return f'"{s}"'
347
+
348
+ if isinstance(t, OptionalType):
349
+ if d == "None":
350
+ return "c10::nullopt"
351
+
352
+ return default_expr(d, t.elem, symint=symint)
353
+
354
+ if isinstance(t, ListType):
355
+ if d.startswith("[") and d.endswith("]"):
356
+ return "{" + d[1:-1] + "}"
357
+ elif symint and d.isdigit() and str(t.elem) == "SymInt":
358
+ return f"c10::SymInt({d})"
359
+ elif t.size is None:
360
+ # NOTE: Sized lists can have scalar defaults
361
+ raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
362
+
363
+ return JIT_TO_CPP_DEFAULT.get(d, d)
364
+
365
+
366
+ # Convert an argument into its C++ API form
367
+
368
+
369
+ def argument(
370
+ a: Union[Argument, TensorOptionsArguments, SelfArgument],
371
+ *,
372
+ cpp_no_default_args: Set[str],
373
+ method: bool,
374
+ faithful: bool,
375
+ symint: bool = False,
376
+ has_tensor_options: bool,
377
+ ) -> List[Binding]:
378
+ def sub_argument(
379
+ a: Union[Argument, TensorOptionsArguments, SelfArgument]
380
+ ) -> List[Binding]:
381
+ return argument(
382
+ a,
383
+ cpp_no_default_args=cpp_no_default_args,
384
+ method=method,
385
+ faithful=faithful,
386
+ symint=symint,
387
+ has_tensor_options=has_tensor_options,
388
+ )
389
+
390
+ if isinstance(a, Argument):
391
+ binds: ArgName
392
+ if a.name == "memory_format" and has_tensor_options:
393
+ binds = SpecialArgName.possibly_redundant_memory_format
394
+ else:
395
+ binds = a.name
396
+ default: Optional[str] = None
397
+ if a.name not in cpp_no_default_args and a.default is not None:
398
+ default = default_expr(a.default, a.type, symint=symint)
399
+ return [
400
+ Binding(
401
+ nctype=argument_type(a, binds=binds, symint=symint),
402
+ name=a.name,
403
+ default=default,
404
+ argument=a,
405
+ )
406
+ ]
407
+ elif isinstance(a, TensorOptionsArguments):
408
+ if faithful:
409
+ return (
410
+ sub_argument(a.dtype)
411
+ + sub_argument(a.layout)
412
+ + sub_argument(a.device)
413
+ + sub_argument(a.pin_memory)
414
+ )
415
+ else:
416
+ default = None
417
+ # Enforced by NativeFunction.__post_init__
418
+ assert "options" not in cpp_no_default_args
419
+ if all(x.default == "None" for x in a.all()):
420
+ default = "{}"
421
+ elif a.dtype.default == "long":
422
+ default = "at::kLong" # TODO: this is wrong
423
+ return [
424
+ Binding(
425
+ nctype=NamedCType("options", BaseCType(tensorOptionsT)),
426
+ name="options",
427
+ default=default,
428
+ argument=a,
429
+ )
430
+ ]
431
+ elif isinstance(a, SelfArgument):
432
+ if method:
433
+ # Caller is responsible for installing implicit this in context!
434
+ return []
435
+ else:
436
+ return sub_argument(a.argument)
437
+ else:
438
+ assert_never(a)
439
+
440
+
441
+ def arguments(
442
+ arguments: Arguments,
443
+ *,
444
+ faithful: bool,
445
+ symint: bool = False,
446
+ method: bool,
447
+ cpp_no_default_args: Set[str],
448
+ ) -> List[Binding]:
449
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
450
+ if faithful:
451
+ args.extend(arguments.non_out)
452
+ args.extend(arguments.out)
453
+ else:
454
+ args.extend(arguments.out)
455
+ args.extend(arguments.non_out)
456
+ return [
457
+ r.no_default() if faithful else r
458
+ for a in args
459
+ for r in argument(
460
+ a,
461
+ faithful=faithful,
462
+ symint=symint,
463
+ method=method,
464
+ has_tensor_options=arguments.tensor_options is not None,
465
+ cpp_no_default_args=cpp_no_default_args,
466
+ )
467
+ ]
env-llmeval/lib/python3.10/site-packages/torchgen/api/dispatcher.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import List, Sequence, Union
3
+
4
+ from torchgen.api import cpp
5
+
6
+ from torchgen.api.types import ArgName, Binding, CType, NamedCType
7
+ from torchgen.model import (
8
+ Argument,
9
+ FunctionSchema,
10
+ Return,
11
+ SelfArgument,
12
+ TensorOptionsArguments,
13
+ Type,
14
+ )
15
+ from torchgen.utils import assert_never, concatMap
16
+
17
+ # This file describes the translation of JIT schema to the dispatcher
18
+ # API, the *unboxed* calling convention by which invocations through
19
+ # the dispatcher are made. Historically, the dispatcher API matched
20
+ # the C++ API, but with the establishment of the boxed API, we've
21
+ # made changes to the dispatcher API to so that the unboxed API
22
+ # better aligns with the boxed API. The dispatcher API hooks heavily
23
+ # into our template based boxing/unboxing machinery, so changes
24
+ # to this convention will usually need template updates too.
25
+ #
26
+ # Prominent characteristics of the dispatcher API:
27
+ #
28
+ # - dtype, layout, device and pin_memory are represented as separate
29
+ # arguments.
30
+ #
31
+
32
+
33
+ def name(func: FunctionSchema) -> str:
34
+ return cpp.name(func)
35
+
36
+
37
+ def argumenttype_type(
38
+ t: Type,
39
+ *,
40
+ mutable: bool,
41
+ binds: ArgName,
42
+ remove_non_owning_ref_types: bool = False,
43
+ symint: bool = True,
44
+ ) -> NamedCType:
45
+ # This is a faux amis. If it makes sense in the future to add
46
+ # more special cases here, or invert things so cpp.argument_type
47
+ # calls this, or just completely inline the function, please do
48
+ # it.
49
+ return cpp.argumenttype_type(
50
+ t,
51
+ mutable=mutable,
52
+ binds=binds,
53
+ symint=symint,
54
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
55
+ )
56
+
57
+
58
+ def argument_type(
59
+ a: Argument,
60
+ *,
61
+ binds: ArgName,
62
+ remove_non_owning_ref_types: bool = False,
63
+ symint: bool = True,
64
+ ) -> NamedCType:
65
+ return argumenttype_type(
66
+ a.type,
67
+ mutable=a.is_write,
68
+ binds=binds,
69
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
70
+ symint=symint,
71
+ )
72
+
73
+
74
+ def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
75
+ # At present, there is no difference. But there could be!
76
+ return cpp.returns_type(rs, symint=symint)
77
+
78
+
79
+ def jit_arguments(func: FunctionSchema) -> List[Argument]:
80
+ def to_argument(
81
+ a: Union[Argument, TensorOptionsArguments, SelfArgument]
82
+ ) -> List[Argument]:
83
+ if isinstance(a, Argument):
84
+ return [a]
85
+ elif isinstance(a, SelfArgument):
86
+ return [a.argument]
87
+ elif isinstance(a, TensorOptionsArguments):
88
+ return [a.dtype, a.layout, a.device, a.pin_memory]
89
+ else:
90
+ assert_never(a)
91
+
92
+ return list(
93
+ concatMap(
94
+ to_argument,
95
+ itertools.chain(
96
+ func.arguments.positional, func.arguments.kwarg_only, func.arguments.out
97
+ ),
98
+ )
99
+ )
100
+
101
+
102
+ def argument(
103
+ a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
104
+ ) -> Binding:
105
+ return Binding(
106
+ nctype=argument_type(
107
+ a,
108
+ binds=a.name,
109
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
110
+ symint=symint,
111
+ ),
112
+ name=a.name,
113
+ argument=a,
114
+ )
115
+
116
+
117
+ def arguments(func: FunctionSchema, *, symint: bool = True) -> List[Binding]:
118
+ return [argument(a, symint=symint) for a in jit_arguments(func)]
env-llmeval/lib/python3.10/site-packages/torchgen/api/functionalization.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from torchgen.api import dispatcher
4
+ from torchgen.api.types import (
5
+ BaseCType,
6
+ Binding,
7
+ boolT,
8
+ ConstRefCType,
9
+ CType,
10
+ longT,
11
+ NamedCType,
12
+ tensorT,
13
+ )
14
+ from torchgen.model import (
15
+ Argument,
16
+ BaseTy,
17
+ BaseType,
18
+ FunctionSchema,
19
+ NativeFunctionsViewGroup,
20
+ )
21
+
22
+
23
+ # This file describes the translation of JIT schema to API's used
24
+ # when creating view lambdas that are used by the functionalization pass.
25
+ # There are two types of lambdas: forward lambdas and reverse lambdas.
26
+ # These API's mostly follow the dispatcher API, with a few quirks:
27
+ # - The lambda capture has to convert reference types to value types
28
+ # - While the forward lambda just directly calls into the at::_ops API
29
+ # (following the dispatcher convention), the logic here for the reverse lambda
30
+ # is responsible for generating both the call-site, and the declarations
31
+ # (which are implemented manually in the at::functionalization::impl namespace).
32
+
33
+ # The lambdas generated for each view op in the functionalization pass are of the form
34
+ # [capture_arguments](outer_arguments) -> returns_type {
35
+ # return name(inner_arguments);
36
+ # }
37
+
38
+ # Define some specific lambda input arguments.
39
+ base_binding = Binding(
40
+ name="base",
41
+ nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))),
42
+ argument=Argument(
43
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
44
+ ),
45
+ default=None,
46
+ )
47
+ mutated_view_binding = Binding(
48
+ name="mutated_view",
49
+ nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))),
50
+ argument=Argument(
51
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
52
+ ),
53
+ default=None,
54
+ )
55
+ mutated_view_idx_binding = Binding(
56
+ name="mutated_view_idx",
57
+ nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)),
58
+ argument=Argument(
59
+ name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
60
+ ),
61
+ default=None,
62
+ )
63
+ reapply_views_binding = Binding(
64
+ name="reapply_views",
65
+ nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)),
66
+ argument=Argument(
67
+ name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None
68
+ ),
69
+ default=None,
70
+ )
71
+
72
+
73
+ # The lambda capture itself doesn't have a name.
74
+ # The name returned here corresponds to the name of the inner function called by the lambda.
75
+ def name(
76
+ g: NativeFunctionsViewGroup,
77
+ *,
78
+ is_reverse: bool,
79
+ include_namespace: bool,
80
+ reapply_views: Optional[bool] = None,
81
+ ) -> str:
82
+ if reapply_views is None:
83
+ # reapply_views is only important for the fwd lambda,
84
+ # since we always plumb the runtime "reapply_views" argument into the reverse function.
85
+ assert is_reverse
86
+ if is_reverse:
87
+ # for the reverse: the name of the inverse function always involves "view_copy",
88
+ # and we plumb the "reapply_views" flag into that function.
89
+ # (We could avoid doing that, but that would require writing out twice as many view inverse functions).
90
+ assert g.view_copy is not None
91
+ api_name = g.view_copy.func.name.unambiguous_name()
92
+ # in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't)
93
+ if include_namespace:
94
+ return f"at::functionalization::FunctionalInverses::{api_name}_inverse"
95
+ else:
96
+ return f"{api_name}_inverse"
97
+ # in the forward case, we just directly call into the at::_ops API (so we always need the namespace)
98
+ assert include_namespace
99
+ assert g.view_copy is not None
100
+ api_name = (
101
+ g.view.func.name.unambiguous_name()
102
+ if reapply_views
103
+ else g.view_copy.func.name.unambiguous_name()
104
+ )
105
+ return f"at::_ops::{api_name}::call"
106
+
107
+
108
+ def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]:
109
+ # capture arguments include all arguments except `self`.
110
+ # Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture),
111
+ # So any reference types (IntArrayRef) need to be converted to value types (vector<int64_t>)
112
+ args = func.arguments.flat_all
113
+ assert args[0].type == BaseType(BaseTy.Tensor)
114
+ non_self_args = args[1:]
115
+ non_self_value_bindings = [
116
+ dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args
117
+ ]
118
+ all_bindings = [reapply_views_binding] + non_self_value_bindings
119
+ return all_bindings
120
+
121
+
122
+ def returns_type(func: FunctionSchema) -> CType:
123
+ # Assertion: all view ops return tensor-like outputs
124
+ assert len(func.returns) >= 1
125
+ for ret in func.returns:
126
+ assert ret.type.is_tensor_like()
127
+ # However, the return type of the lambda is always an individual tensor.
128
+ # For multi-tensor outputs, each tensor needs to be tracked individually.
129
+ return BaseCType(tensorT)
130
+
131
+
132
+ def outer_arguments(*, is_reverse: bool) -> List[Binding]:
133
+ if is_reverse:
134
+ return [base_binding, mutated_view_binding, mutated_view_idx_binding]
135
+ else:
136
+ return [base_binding, mutated_view_idx_binding]
137
+
138
+
139
+ def inner_call_index(func: FunctionSchema) -> Optional[Binding]:
140
+ # For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output.
141
+ # When we replay a view op that returns multiple tensors, we need to index into the output appropriately
142
+ if len(func.returns) > 1 or (
143
+ len(func.returns) == 1 and func.returns[0].type.is_list_like()
144
+ ):
145
+ return mutated_view_idx_binding
146
+ return None
147
+
148
+
149
+ def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]:
150
+ args = func.arguments.flat_all
151
+ assert args[0].type == BaseType(BaseTy.Tensor)
152
+ non_self_args = args[1:]
153
+ # The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API.
154
+ # Both of these follow the dispatcher API.
155
+ non_self_bindings = [dispatcher.argument(a) for a in non_self_args]
156
+ if not is_reverse:
157
+ # the forward lambda swaps out the original tensor argument with the lambd arg "base"
158
+ return [base_binding] + non_self_bindings
159
+ else:
160
+ # the reverse lambda does the same, but with an additional "mutated_view" arg
161
+ # additionally, we have a calling convention: for view ops that return multiple tensor outputs
162
+ # their corresponding view_inverse function takes in an additional index argument.
163
+ index_binding = inner_call_index(func)
164
+ if index_binding is not None:
165
+ return [
166
+ base_binding,
167
+ mutated_view_binding,
168
+ reapply_views_binding,
169
+ index_binding,
170
+ ] + non_self_bindings
171
+ else:
172
+ return [
173
+ base_binding,
174
+ mutated_view_binding,
175
+ reapply_views_binding,
176
+ ] + non_self_bindings
env-llmeval/lib/python3.10/site-packages/torchgen/api/meta.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchgen.model import NativeFunctionsGroup
2
+
3
+ # Follows dispatcher calling convention, but:
4
+ # - Mutable arguments not allowed. Meta functions are always
5
+ # written in functional form. Look at FunctionSchema.signature()
6
+ # - No tensor returns; instead we return a TensorMeta describing
7
+ # the tensor in question
8
+
9
+
10
+ def name(g: NativeFunctionsGroup) -> str:
11
+ # use the overload name from the functional version
12
+ return str(g.functional.func.name).replace(".", "_")
env-llmeval/lib/python3.10/site-packages/torchgen/api/native.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Union
2
+
3
+ from torchgen import local
4
+ from torchgen.api import cpp
5
+
6
+ from torchgen.api.types import (
7
+ ArgName,
8
+ BaseCType,
9
+ Binding,
10
+ boolT,
11
+ ConstRefCType,
12
+ CType,
13
+ deviceT,
14
+ layoutT,
15
+ ListCType,
16
+ MutRefCType,
17
+ NamedCType,
18
+ OptionalCType,
19
+ scalarT,
20
+ scalarTypeT,
21
+ tensorT,
22
+ )
23
+ from torchgen.model import (
24
+ Argument,
25
+ FunctionSchema,
26
+ Return,
27
+ SelfArgument,
28
+ TensorOptionsArguments,
29
+ Type,
30
+ )
31
+ from torchgen.utils import assert_never
32
+
33
+ # This file describes the translation of JIT schema to the native functions API.
34
+ # This looks a lot like the C++ API (which makes historical sense, because the
35
+ # idea was you wrote native functions to implement functions in the C++ API),
36
+ # but over time we have evolved the C++ API without actually changing our
37
+ # native:: kernels. The intention is to make native API and dispatcher API
38
+ # line up as closely as possible, since this results in the least overhead
39
+ # (no translation is needed from dispatcher API to native API).
40
+ #
41
+ # NB: this is symint aware, you will get the non-SymInt variant for some
42
+ # dispatch entries and SymInt for others.
43
+
44
+
45
+ def name(func: FunctionSchema) -> str:
46
+ name = str(func.name.name)
47
+ # TODO: delete this!
48
+ if func.is_out_fn():
49
+ name += "_out"
50
+ if func.name.overload_name:
51
+ name += f"_{func.name.overload_name}"
52
+ return name
53
+
54
+
55
+ def argumenttype_type(
56
+ t: Type, *, mutable: bool, binds: ArgName, symint: bool
57
+ ) -> NamedCType:
58
+ if str(t) == "Tensor?":
59
+ tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
60
+ if mutable and not local.use_const_ref_for_mutable_tensors():
61
+ return NamedCType(binds, MutRefCType(tensor_type))
62
+ else:
63
+ return NamedCType(binds, ConstRefCType(tensor_type))
64
+ elif str(t) == "Tensor?[]":
65
+ return NamedCType(
66
+ binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
67
+ )
68
+ elif str(t) == "Scalar":
69
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
70
+ elif str(t) == "Scalar?":
71
+ return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
72
+ return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
73
+
74
+
75
+ def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
76
+ return cpp.returns_type(rs, symint=symint)
77
+
78
+
79
+ def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
80
+ return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
81
+
82
+
83
+ def argument(
84
+ a: Union[Argument, SelfArgument, TensorOptionsArguments],
85
+ *,
86
+ is_out: bool,
87
+ symint: bool,
88
+ ) -> List[Binding]:
89
+ # Ideally, we NEVER default native functions. However, there are a number
90
+ # of functions that call native:: directly and rely on the defaulting
91
+ # existing. So for BC, we generate defaults for non-out variants (but not
92
+ # for out variants, where it is impossible to generate an appropriate
93
+ # default)
94
+ should_default = not is_out
95
+ if isinstance(a, Argument):
96
+ default: Optional[str] = None
97
+ if should_default and a.default is not None:
98
+ default = cpp.default_expr(a.default, a.type, symint=symint)
99
+ return [
100
+ Binding(
101
+ nctype=argument_type(a, binds=a.name, symint=symint),
102
+ name=a.name,
103
+ default=default,
104
+ argument=a,
105
+ )
106
+ ]
107
+ elif isinstance(a, SelfArgument):
108
+ # Erase SelfArgument from the distinction
109
+ return argument(a.argument, is_out=is_out, symint=symint)
110
+ elif isinstance(a, TensorOptionsArguments):
111
+ default = None
112
+ if should_default:
113
+ default = "{}"
114
+ # TODO: Not sure why the arguments assigned here are for
115
+ # TensorOptionsArguments and not the constituent pieces. It seems
116
+ # to matter
117
+ return [
118
+ Binding(
119
+ nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
120
+ name="dtype",
121
+ default=default,
122
+ argument=a,
123
+ ),
124
+ Binding(
125
+ nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
126
+ name="layout",
127
+ default=default,
128
+ argument=a,
129
+ ),
130
+ Binding(
131
+ nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
132
+ name="device",
133
+ default=default,
134
+ argument=a,
135
+ ),
136
+ Binding(
137
+ nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
138
+ name="pin_memory",
139
+ default=default,
140
+ argument=a,
141
+ ),
142
+ ]
143
+ else:
144
+ assert_never(a)
145
+
146
+
147
+ def arguments(func: FunctionSchema, *, symint: bool) -> List[Binding]:
148
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
149
+ args.extend(func.arguments.non_out)
150
+ args.extend(func.arguments.out)
151
+ return [
152
+ r for arg in args for r in argument(arg, symint=symint, is_out=func.is_out_fn())
153
+ ]
env-llmeval/lib/python3.10/site-packages/torchgen/api/python.py ADDED
@@ -0,0 +1,1481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
3
+
4
+ from torchgen.api import cpp
5
+
6
+ from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
7
+ from torchgen.gen import pythonify_default
8
+ from torchgen.model import (
9
+ Argument,
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ ListType,
14
+ NativeFunction,
15
+ OptionalType,
16
+ Return,
17
+ Type,
18
+ Variant,
19
+ )
20
+
21
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
22
+ #
23
+ # Data Models
24
+ #
25
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
26
+ #
27
+ # [Notes] python binding codegen
28
+ #
29
+ # The Python binding codegen produces code that takes the input list of
30
+ # PyObjects, finds the matching ATen C++ function using PythonArgParser,
31
+ # converts the PyObjects into C++ types and calls the ATen C++ function:
32
+ #
33
+ # +--------+ parsing +------------------------+ binding +-----------------------+
34
+ # | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch |
35
+ # +--------+ +------------------------+ +-----------------------+
36
+ #
37
+ # The following examples demonstrate the data models the Python binding
38
+ # codegen needs to deal with and the tasks it needs to accomplish. It
39
+ # helps understand the purpose of the new data types we introduced below.
40
+ #
41
+ # - Function Schema (source of truth)
42
+ #
43
+ # aten::empty.names(int[] size, *, Dimname[]? names,
44
+ # ScalarType? dtype=None, Layout? layout=None,
45
+ # Device? device=None, bool? pin_memory=None,
46
+ # MemoryFormat? memory_format=None) -> Tensor
47
+ #
48
+ # - Python Signature
49
+ #
50
+ # It's used to generate input schema string for PythonArgParser.
51
+ # Note: TensorOptions fields are reordered and the additional
52
+ # 'requires_grad' field is added:
53
+ #
54
+ # empty(IntArrayRef size, *, DimnameList? names,
55
+ # MemoryFormat? memory_format=None, ScalarType dtype=None,
56
+ # Layout layout=torch.strided, Device device=None,
57
+ # bool pin_memory=False, bool requires_grad=False)
58
+ #
59
+ # - C++ Signature
60
+ #
61
+ # It's used to generate C++ lambda formals & dispatch call.
62
+ # Note: the scattered TensorOptions fields are packed into 'options'.
63
+ #
64
+ # auto dispatch_empty =
65
+ # [](IntArrayRef size, c10::optional<DimnameList> names,
66
+ # const TensorOptions & options,
67
+ # c10::optional<MemoryFormat> memory_format) -> Tensor {
68
+ # pybind11::gil_scoped_release no_gil;
69
+ # return torch::empty(size, names, options, memory_format);
70
+ # };
71
+ #
72
+ # - Binding between Python Arguments and C++ Arguments
73
+ #
74
+ # Given a set of Python Arguments in scope, we need produce the
75
+ # binding expressions that translate the Python API into C++ API:
76
+ #
77
+ # Python Args Cpp Args Binding Exprs
78
+ # -----------------------------------------------------------------
79
+ # 0: size size '_r.intlist(0)'
80
+ # 1: names names 'names' [special init]
81
+ # 2: memory_format -------+
82
+ # 3: dtype -----+-|--> options 'options' [special packing]
83
+ # 4: layout / |
84
+ # 5: device / +--> memory_format '_r.memoryformatOptional(2)'
85
+ # 6: pin_memory /
86
+ # 7: requires_grad -+
87
+ #
88
+ # So the full dispatch expression would look like:
89
+ #
90
+ # dispatch_empty(_r.intlist(0), names, options,
91
+ # _r.memoryformatOptional(2))
92
+ #
93
+ # Where does 'names' come from? It involves special local init:
94
+ #
95
+ # auto __names = _r.toDimnameListOptional(1);
96
+ # c10::optional<DimnameList> names =
97
+ # __names ? c10::make_optional(DimnameList(__names.value()))
98
+ # : c10::nullopt;
99
+ #
100
+ # Where does 'options' come from? It involves special local init
101
+ # for TensorOptions. Note that Python side has the additional
102
+ # 'requires_grad' field:
103
+ #
104
+ # const auto options = TensorOptions()
105
+ # .dtype(_r.scalartype(3))
106
+ # .device(_r.device(5))
107
+ # .layout(_r.layoutOptional(4))
108
+ # .requires_grad(_r.toBool(7))
109
+ # .pinned_memory(_r.toBool(6));
110
+ #
111
+ # In some other cases one Python Argument can map to multiple C++
112
+ # Arguments. For example:
113
+ #
114
+ # aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False)
115
+ # -> (Tensor values, Tensor indices)
116
+ #
117
+ # Python Args Cpp Args Binding Exprs
118
+ # ---------------------------------------------------------------------
119
+ # +----> max 'out[0]'
120
+ # /-----> max_values 'out[1]
121
+ # 0: input / self '_r.tensor(0)'
122
+ # 1: dim / dim '_r.dimname(1)'
123
+ # 2: keepdim / keepdim '_r.toBool(2)'
124
+ # 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)'
125
+ #
126
+ # As demonstrated above, the binding can involve reordering,
127
+ # packing, unpacking and special local inits.
128
+ #
129
+ #
130
+ # Let's look at a concrete example:
131
+ #
132
+ # static PythonArgParser parser({
133
+ # "abs(Tensor input, *, Tensor out=None)",
134
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
135
+ # ^
136
+ # +--- Python Schema, represented by PythonSignature and PythonArgument
137
+ #
138
+ # }, /*traceable=*/true);
139
+ #
140
+ # ParsedArgs<2> parsed_args;
141
+ # auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
142
+ #
143
+ # ...
144
+ #
145
+ # if (_r.isNone(1)) {
146
+ # ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out')
147
+ # represented by PythonArgParserOutputExpr
148
+ #
149
+ # // aten::abs(Tensor self) -> Tensor
150
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
151
+ # ^
152
+ # +--- NativeFunction schema, base version
153
+ #
154
+ # auto dispatch_abs = [](const Tensor & self) -> Tensor {
155
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
156
+ # ^
157
+ # +--- dispatch_lambda_args / dispatch_lambda_return_str
158
+ # generated from NativeFunction / CppSignature
159
+ # (deprecated PythonSignature is special)
160
+ # arguments are represented by DispatchLambdaArgument
161
+ #
162
+ # pybind11::gil_scoped_release no_gil;
163
+ # return self.abs();
164
+ # ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs
165
+ # generated from NativeFunction / CppSignature
166
+ # };
167
+ # return wrap(dispatch_abs(_r.tensor(0)));
168
+ # ~~~~~~~~~~~~~
169
+ # ^
170
+ # +--- dispatch_lambda_exprs
171
+ # binding PythonArgParserOutputExpr (python args)
172
+ # and DispatchLambdaArgument (c++ args)
173
+ #
174
+ # } else {
175
+ # // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
176
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
177
+ # ^
178
+ # +--- NativeFunction schema, out-variant
179
+ #
180
+ # auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor {
181
+ # pybind11::gil_scoped_release no_gil;
182
+ # return at::abs_out(out, self);
183
+ # };
184
+ # return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0)));
185
+ # }
186
+ #
187
+ #
188
+ # [Notes] python interface codegen
189
+ # The python dataclasses below are used used to generate both python binding code
190
+ # and pyi type hint signatures.
191
+ # In theory these two should look very similar, but there are number of differences
192
+ # in how pyi signatures vs. python_arg_parser signatures are generated.
193
+ # These differences have been encapsulated in signature_str() vs. signature_str_pyi()
194
+ # to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments.
195
+ # For examples, only pyi signatures include return types.
196
+
197
+
198
+ @dataclass(frozen=True)
199
+ class PythonReturns:
200
+ returns: Tuple[Return, ...]
201
+
202
+
203
+ @dataclass(frozen=True)
204
+ class PythonArgument:
205
+ name: str
206
+ type: Type
207
+ default: Optional[str]
208
+
209
+ # Used to generate the default init expr for some PythonArgParser outputs, e.g.:
210
+ #
211
+ # _r.layoutWithDefault(3, layout_from_backend(self.options().backend())))
212
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
213
+ # ^
214
+ # +--- default_init str
215
+ default_init: Optional[str]
216
+
217
+ # Compute argument formal for python argument parsing.
218
+ # Needs to be consistent with torch/csrc/utils/python_arg_parser.h.
219
+ def argument_str(self, *, method: bool = False, symint: bool = True) -> str:
220
+ type_str = (
221
+ argument_type_str(self.type, symint=symint)
222
+ .replace("const ", "")
223
+ .replace(" &", "")
224
+ )
225
+
226
+ name = self.name
227
+ # s/self/input/ outside method bindings
228
+ # [old codegen] TODO: remove this? doesn't rename in codegen, it's just
229
+ # for the parse string
230
+ if name == "self" and type_str in ["Tensor", "Number"] and not method:
231
+ name = "input"
232
+
233
+ # add default
234
+ if self.default is not None:
235
+ default = {
236
+ "nullptr": "None",
237
+ "c10::nullopt": "None",
238
+ "{}": "None",
239
+ }.get(self.default, self.default)
240
+ return f"{type_str} {name}={default}"
241
+ else:
242
+ return f"{type_str} {name}"
243
+
244
+ def argument_str_pyi(
245
+ self, *, method: bool = False, deprecated: bool = False
246
+ ) -> str:
247
+ type_str = argument_type_str_pyi(self.type)
248
+
249
+ name = self.name
250
+ # s/self/input/ outside method bindings
251
+ # [old codegen] TODO: remove this? doesn't rename in codegen, it's just
252
+ # for the parse string
253
+ if name == "self" and type_str == "Tensor" and not method and not deprecated:
254
+ name = "input"
255
+
256
+ if name == "from": # from is a Python keyword...
257
+ name += "_"
258
+
259
+ # pyi merges the _out and functional variants into the same signature, with an optional out arg
260
+ if name == "out" and type_str == "Tensor" and not deprecated:
261
+ type_str = "Optional[" + type_str + "]"
262
+
263
+ # pyi deprecated signatures don't get defaults for their out arg
264
+ treat_as_no_default = (
265
+ deprecated
266
+ and isinstance(self, PythonOutArgument)
267
+ and self.default == "None"
268
+ )
269
+
270
+ # add default
271
+ if self.default is not None and not treat_as_no_default:
272
+ if (
273
+ isinstance(self.type, ListType)
274
+ and self.type.elem == BaseType(BaseTy.int)
275
+ and self.default.startswith("{")
276
+ and self.default.endswith("}")
277
+ ):
278
+ default = "(" + self.default[1:-1] + ")"
279
+ else:
280
+ default = {
281
+ "nullptr": "None",
282
+ "c10::nullopt": "None",
283
+ "{}": "None",
284
+ "MemoryFormat::Contiguous": "contiguous_format",
285
+ "QScheme::PER_TENSOR_AFFINE": "per_tensor_affine",
286
+ }.get(self.default, self.default)
287
+ return f"{name}: {type_str} = {default}"
288
+ else:
289
+ return f"{name}: {type_str}"
290
+
291
+
292
+ @dataclass(frozen=True)
293
+ class PythonOutArgument(PythonArgument):
294
+ # In Python signature multiple output fields are packed into one 'out' argument.
295
+ # When binding to C++, it's first binded to a local 'out' variable:
296
+ # 'auto out = _r.tensorlist_n<2>(2);',
297
+ # then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
298
+ # TODO: maybe don't need keep scattered out fields for python signature?
299
+ outputs: Tuple[PythonArgument, ...]
300
+
301
+ @staticmethod
302
+ def from_outputs(
303
+ outputs: Tuple[PythonArgument, ...]
304
+ ) -> Optional["PythonOutArgument"]:
305
+ if not outputs:
306
+ return None
307
+
308
+ size = len(outputs)
309
+ if size == 1:
310
+ return PythonOutArgument(
311
+ name=outputs[0].name,
312
+ type=outputs[0].type,
313
+ default="None",
314
+ default_init=None,
315
+ outputs=outputs,
316
+ )
317
+ elif size > 1:
318
+ if any(not a.type.is_tensor_like() for a in outputs):
319
+ raise RuntimeError(f"Unsupported output type: {outputs}")
320
+ return PythonOutArgument(
321
+ name="out",
322
+ # TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
323
+ type=ListType(BaseType(BaseTy.Tensor), size),
324
+ default="None",
325
+ default_init=None,
326
+ outputs=outputs,
327
+ )
328
+ raise AssertionError(r"Unexpected PythonOutArgument size")
329
+
330
+
331
+ @dataclass(frozen=True)
332
+ class PythonSignature:
333
+ # Base operator name, without inplace/outplace suffix.
334
+ name: str
335
+
336
+ # Positional arguments.
337
+ # TODO: create a dedicated SelfArgument type for 'self'?
338
+ input_args: Tuple[PythonArgument, ...]
339
+
340
+ # Keyword arguments excluding the 'out' argument and scattered kwargs belonging
341
+ # to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
342
+ input_kwargs: Tuple[PythonArgument, ...]
343
+
344
+ output_args: Optional[PythonOutArgument]
345
+
346
+ # Return types, which are only used by pyi
347
+ returns: PythonReturns
348
+
349
+ # These are scattered kwargs arguments belonging to TensorOptions.
350
+ # When binding to C++, they are packed into a TensorOptions object 'options'.
351
+ # It's possible that the C++ signature doesn't take TensorOptions object (e.g.
352
+ # for out variant), in which case they will be used as scattered fields without
353
+ # being packed into 'options'.
354
+ # TODO: maybe create a PythonTensorOptionsArgument?
355
+ tensor_options_args: Tuple[PythonArgument, ...]
356
+
357
+ # method or function signature?
358
+ method: bool
359
+
360
+ @property
361
+ def deprecated(self) -> bool:
362
+ return False
363
+
364
+ def arguments(
365
+ self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
366
+ ) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]:
367
+ result: List[Union[PythonArgument, PythonOutArgument]] = []
368
+ result.extend(self.input_args)
369
+ result.extend(self.input_kwargs)
370
+ if self.output_args is not None and not skip_outputs:
371
+ result.append(self.output_args)
372
+ if not skip_tensor_options:
373
+ result.extend(self.tensor_options_args)
374
+ return tuple(result)
375
+
376
+ def arguments_count(self) -> int:
377
+ return len(self.arguments())
378
+
379
+ def output_idx(self) -> int:
380
+ return len(self.input_args) + len(self.input_kwargs)
381
+
382
+ # [old codegen] Compute the Python function signature for argument parsing,
383
+ # as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
384
+ # this is NOT the same type signature as specified by PEP 484
385
+ # as understood by mypy; our format was independently developed
386
+ # and has some quirks to make it more suitable specifically
387
+ # for error parsing.
388
+ #
389
+ # For a translation to mypy-valid type signatures, see
390
+ # signature_str_pyi().
391
+ def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
392
+ args = self.arguments(skip_outputs=skip_outputs)
393
+ schema_formals: List[str] = [
394
+ a.argument_str(method=self.method, symint=symint) for a in args
395
+ ]
396
+ positional_argc = len(self.input_args)
397
+ if len(schema_formals) > positional_argc:
398
+ schema_formals.insert(positional_argc, "*")
399
+
400
+ return f'{self.name}({", ".join(schema_formals)})'
401
+
402
+ def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
403
+ args = self.arguments(skip_outputs=skip_outputs)
404
+ schema_formals: List[str] = [
405
+ a.argument_str_pyi(method=self.method) for a in args
406
+ ]
407
+ positional_argc = len(self.input_args)
408
+ if len(schema_formals) > positional_argc:
409
+ schema_formals.insert(positional_argc, "*")
410
+
411
+ # only pyi signatures include returns
412
+ returns_str = returns_str_pyi(self)
413
+ # pyi also includes self (with no typing/defaults) for methods
414
+ if self.method:
415
+ schema_formals.insert(0, "self")
416
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
417
+
418
+ def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
419
+ # only pyi uses vararg signatures
420
+ args = self.arguments(skip_outputs=skip_outputs)
421
+ schema_formals: List[str] = [
422
+ a.argument_str_pyi(method=self.method) for a in args
423
+ ]
424
+ # vararg only applies to pyi signatures. vararg variants are not generated for all signatures
425
+ num_args = self.arguments_count()
426
+ num_positionalargs = len(self.input_args)
427
+
428
+ have_vararg_version = False
429
+ if num_args > 0:
430
+ vararg_type = args[0].type
431
+ if (
432
+ isinstance(vararg_type, ListType)
433
+ and str(vararg_type.elem) in ["int", "SymInt"]
434
+ and num_positionalargs == 1
435
+ ):
436
+ have_vararg_version = True
437
+
438
+ if not have_vararg_version:
439
+ return None
440
+ # Below are the major changes in vararg vs. regular pyi signatures
441
+ # vararg signatures also omit the asterix
442
+ schema_formals[0] = "*" + args[0].name + ": _int"
443
+
444
+ returns_str = returns_str_pyi(self)
445
+ # pyi also includes self (with no typing/defaults) for methods
446
+ if self.method:
447
+ schema_formals.insert(0, "self")
448
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
449
+
450
+
451
+ # The deprecated python signature involves some special logic, so create a
452
+ # dedicated data model to store these extra properties.
453
+ @dataclass(frozen=True)
454
+ class PythonSignatureDeprecated(PythonSignature):
455
+ # Schema for the deprecated function
456
+ deprecated_schema: FunctionSchema
457
+
458
+ # The deprecated signature might miss some arguments that the corresponding
459
+ # C++ signature expects. We need store the constant default values to pass in.
460
+ # For example:
461
+ # [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2)
462
+ # [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
463
+ # [func call]: self.addmm(mat1, mat2, beta, 1)
464
+ # We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case.
465
+ deprecated_args_exprs: Tuple[str, ...]
466
+
467
+ @property
468
+ def deprecated(self) -> bool:
469
+ return True
470
+
471
+ def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
472
+ return (
473
+ PythonSignature.signature_str(
474
+ self, skip_outputs=skip_outputs, symint=symint
475
+ )
476
+ + "|deprecated"
477
+ )
478
+
479
+ def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
480
+ args = self.arguments(skip_outputs=skip_outputs)
481
+ schema_formals: List[str] = [
482
+ a.argument_str_pyi(method=self.method, deprecated=True) for a in args
483
+ ]
484
+ positional_argc = len(self.input_args)
485
+ if len(schema_formals) > positional_argc:
486
+ schema_formals.insert(positional_argc, "*")
487
+
488
+ returns_str = returns_str_pyi(self)
489
+ return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
490
+
491
+ def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
492
+ # the codegen doesn't include vararg variants for deprecated signatures
493
+ return None
494
+
495
+
496
+ # This struct is used to hold the PythonSignature and its corresponding
497
+ # NativeFunction BEFORE grouping base and out-variant functions.
498
+ # Why not store NativeFunction in PythonSignature or construct PythonSignature
499
+ # from NativeFunction? Because they are not 1-1 mapped.
500
+ # One native function could have both deprecated and non-deprecated python
501
+ # signatures - NativeFunction doesn't contain information to construct the
502
+ # deprecated python signature.
503
+ # One python signature is used to handle both the base and the out-variant
504
+ # function - see 'PythonSignatureGroup'.
505
+ @dataclass(frozen=True)
506
+ class PythonSignatureNativeFunctionPair:
507
+ signature: PythonSignature
508
+ function: NativeFunction
509
+
510
+
511
+ # We merge pairs of functions with signatures that are equivalent mod
512
+ # output arguments, and use a single entry in the python_arg_parser sig
513
+ # list for both (output arguments become optional).
514
+ @dataclass(frozen=True)
515
+ class PythonSignatureGroup:
516
+ # The signature used for Python argument parsing. The outplace signature
517
+ # is preferred if exists, because it can be used to parse inputs for both
518
+ # the out-place variant and the base version (with output omitted).
519
+ signature: PythonSignature
520
+
521
+ # The regular ATen declaration (e.g. conv2d)
522
+ base: NativeFunction
523
+
524
+ # The out variant (e.g. conv2d_out)
525
+ outplace: Optional[NativeFunction]
526
+
527
+ @classmethod
528
+ def from_pairs(
529
+ cls,
530
+ functional: PythonSignatureNativeFunctionPair,
531
+ out: Optional[PythonSignatureNativeFunctionPair],
532
+ ) -> "PythonSignatureGroup":
533
+ if out is None:
534
+ return PythonSignatureGroup(
535
+ signature=functional.signature,
536
+ base=functional.function,
537
+ outplace=None,
538
+ )
539
+
540
+ # prefer the signature with optional out=... arguments because it's the
541
+ # superset that can be used to parse input for both base and outplace.
542
+ signature_kwargs = out.signature.__dict__.copy()
543
+
544
+ # Out overloads in C++ don't have TensorOptions arguments,
545
+ # so take these from the functional variant
546
+ signature_kwargs[
547
+ "tensor_options_args"
548
+ ] = functional.signature.tensor_options_args
549
+
550
+ return PythonSignatureGroup(
551
+ signature=type(out.signature)(**signature_kwargs),
552
+ base=functional.function,
553
+ outplace=out.function,
554
+ )
555
+
556
+
557
+ # C++ function dispatch is wrapped in a lambda function. The lambda function
558
+ # has almost the same signature as the C++ function, only with some small
559
+ # variants - see details below.
560
+ # This data model is used to represent arguments of the lambda function
561
+ # signature.
562
+ @dataclass(frozen=True)
563
+ class DispatchLambdaArgument:
564
+ name: str
565
+ type_str: str
566
+ is_out_arg: bool
567
+
568
+
569
+ # To pass PyObjects arguments to C++ function (via the lambda wrapper),
570
+ # we need first convert PyObjects into simple C++ objects. This work
571
+ # is done by PythonArgParser.
572
+ # This data model is used to represent the output of PythonArgParser.
573
+ # It has 1-1 mapping with PythonArgument in PythonSignature.
574
+ @dataclass(frozen=True)
575
+ class PythonArgParserOutputExpr:
576
+ # argument name
577
+ name: str
578
+
579
+ # RHS expression to reference PythonArgParser output.
580
+ expr: str
581
+
582
+ # In some special cases we need create different expr, e.g.:
583
+ # '_r.isNone(1)' instead of '_r.tensor(1)'.
584
+ index: int
585
+
586
+ # The python argument it maps to.
587
+ argument: PythonArgument
588
+
589
+ @property
590
+ def is_none_expr(self) -> str:
591
+ return f"_r.isNone({self.index})"
592
+
593
+
594
+ # To pass PythonArgParser output to the lambda wrapper, we need bind
595
+ # PythonArgParserOutputExpr to DispatchLambdaArgument.
596
+ # They are not always 1-1 mapped, e.g. scattered TensorOptions fields
597
+ # need be packed into a TensorOptions object, which is the argument
598
+ # that the lambda function wrapper takes.
599
+ @dataclass(frozen=True)
600
+ class DispatchLambdaArgumentExprs:
601
+ # The exprs that provide the binding for lambda arguments, e.g.:
602
+ #
603
+ # 'self' -> '_r.tensor(0)'
604
+ # 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
605
+ # 'options' -> 'options'
606
+ #
607
+ # It has 1-1 mapping with DispatchLambdaArgument.
608
+ exprs: Sequence[str]
609
+
610
+ # Special local inits, which might introduce new variables that
611
+ # the 'exprs' above reference, e.g.:
612
+ #
613
+ # 'auto out = _r.tensorlist_n<2>(2);'
614
+ #
615
+ inits: Sequence[str]
616
+
617
+
618
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
619
+ #
620
+ # Helper Functions
621
+ #
622
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
623
+
624
+
625
+ def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
626
+ return CppSignatureGroup.from_native_function(f, method=method).signature
627
+
628
+
629
+ def has_tensor_options(f: NativeFunction) -> bool:
630
+ return f.func.arguments.tensor_options is not None
631
+
632
+
633
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
634
+ #
635
+ # Python Signature
636
+ #
637
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
638
+
639
+
640
+ # 'simple_type' was introduced by the old codegen, which is slightly
641
+ # different from the python schema type, e.g.: doesn't have '?' suffix
642
+ # for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
643
+ def argument_type_str(
644
+ t: Type, *, simple_type: bool = False, symint: bool = True
645
+ ) -> str:
646
+ if isinstance(t, BaseType):
647
+ if t.name == BaseTy.Tensor:
648
+ return "Tensor"
649
+ elif t.name == BaseTy.int:
650
+ return "int64_t"
651
+ elif t.name == BaseTy.float:
652
+ return "double"
653
+ elif t.name == BaseTy.str:
654
+ return "c10::string_view"
655
+ elif t.name in [
656
+ BaseTy.bool,
657
+ BaseTy.QScheme,
658
+ BaseTy.Scalar,
659
+ BaseTy.ScalarType,
660
+ BaseTy.Generator,
661
+ BaseTy.Storage,
662
+ BaseTy.Layout,
663
+ BaseTy.Device,
664
+ BaseTy.DeviceIndex,
665
+ BaseTy.MemoryFormat,
666
+ BaseTy.Dimname,
667
+ BaseTy.Stream,
668
+ BaseTy.ConstQuantizerPtr,
669
+ BaseTy.SymInt,
670
+ ]:
671
+ # These python schema type names line up with their function schema names
672
+ return t.name.name
673
+
674
+ elif isinstance(t, OptionalType):
675
+ if str(t.elem) == "Tensor":
676
+ # Is it desired to keep '?' for simple_type with new style dispatcher?
677
+ return "Tensor?"
678
+ elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
679
+ return f"{elem}?"
680
+ elif isinstance(t, ListType):
681
+ size = t.size if not simple_type else None
682
+ if str(t.elem) == "bool":
683
+ assert t.size is not None
684
+ return f"::std::array<bool,{t.size}>"
685
+ elif str(t.elem) == "int":
686
+ return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
687
+ elif str(t.elem) == "SymInt":
688
+ if symint:
689
+ return (
690
+ f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
691
+ )
692
+ else:
693
+ return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
694
+ elif str(t.elem) == "Tensor":
695
+ return f"TensorList[{size}]" if size is not None else "TensorList"
696
+ elif str(t.elem) == "Scalar":
697
+ return f"ScalarList[{size}]" if size is not None else "ScalarList"
698
+ elif str(t.elem) == "Tensor?":
699
+ if simple_type:
700
+ return "c10::List<c10::optional<Tensor>>"
701
+ else:
702
+ return "const c10::List<c10::optional<Tensor>> &"
703
+ elif str(t.elem) == "Dimname":
704
+ return f"DimnameList[{size}]" if size is not None else "DimnameList"
705
+ elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
706
+ return f"ArrayRef<{elem}>"
707
+
708
+ raise RuntimeError(f"unrecognized type {repr(t)}")
709
+
710
+
711
+ def argument_type_size(t: Type) -> Optional[int]:
712
+ l = t.is_list_like()
713
+ if l is not None and str(l.elem) != "bool":
714
+ return l.size
715
+ else:
716
+ return None
717
+
718
+
719
+ def argument(a: Argument) -> PythonArgument:
720
+ return PythonArgument(
721
+ name=a.name,
722
+ type=a.type,
723
+ # TODO: directly translate a.default to python default
724
+ default=str(
725
+ pythonify_default(cpp.default_expr(a.default, a.type, symint=False))
726
+ )
727
+ if a.default is not None
728
+ else None,
729
+ default_init=None,
730
+ )
731
+
732
+
733
+ # Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
734
+ def signature(
735
+ f: NativeFunction, *, method: bool = False, pyi: bool = False
736
+ ) -> PythonSignature:
737
+ return signature_from_schema(
738
+ f.func, category_override=f.category_override, method=method, pyi=pyi
739
+ )
740
+
741
+
742
+ def signature_from_schema(
743
+ func: FunctionSchema,
744
+ *,
745
+ category_override: Optional[str],
746
+ method: bool = False,
747
+ pyi: bool = False,
748
+ ) -> PythonSignature:
749
+ args: List[Argument] = []
750
+ args.extend(func.arguments.pre_self_positional)
751
+ # Skip SelfArgument if this is method.
752
+ if not method and func.arguments.self_arg is not None:
753
+ args.append(func.arguments.self_arg.argument)
754
+ args.extend(func.arguments.post_self_positional)
755
+ args.extend(func.arguments.pre_tensor_options_kwarg_only)
756
+ # Skip TensorOptionsArguments. Python side TensorOptions
757
+ # arguments are created based on different rules - see below.
758
+ args.extend(func.arguments.post_tensor_options_kwarg_only)
759
+ args.extend(func.arguments.out)
760
+
761
+ input_arg_set = {a.name for a in func.arguments.flat_positional}
762
+ kwarg_only_set = {a.name for a in func.arguments.flat_kwarg_only}
763
+ out_arg_set = {a.name for a in func.arguments.out}
764
+
765
+ input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
766
+ input_kwargs = tuple(
767
+ map(argument, filter(lambda a: a.name in kwarg_only_set, args))
768
+ )
769
+ outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
770
+
771
+ # Reintroduce the scattered fields of TensorOptions for Python.
772
+ # Compared to the cpp counterpart, the python arguments have new property
773
+ # (default_init) and a new argument 'requires_grad', which require some
774
+ # special handlings.
775
+ # [old codegen] TODO: because these aren't guaranteed to be 100% faithful
776
+ # to the original versions in the yaml, this recreation is a potential
777
+ # source of drift between eager and JIT. Pull this logic out to a shared place.
778
+
779
+ has_tensor_input_arg = any(
780
+ a.type.is_tensor_like() for a in func.arguments.flat_non_out
781
+ )
782
+ if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
783
+ raise ValueError(
784
+ "argument named requires_grad is reserved, should not explicitly add it in the schema"
785
+ )
786
+
787
+ # [old codegen] this probably won't work if one of the returns is not a tensor,
788
+ # but it will produce a compile-time error that is obvious.
789
+ has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
790
+
791
+ name: str = cpp.name(func)
792
+ is_factory_function = category_override == "factory" or (
793
+ has_tensor_return and not has_tensor_input_arg
794
+ )
795
+ is_like_or_new_function = (
796
+ category_override in ("new", "like")
797
+ or name.startswith("new_")
798
+ or name.endswith("_like")
799
+ )
800
+
801
+ tensor_options_args: List[PythonArgument] = []
802
+ if is_factory_function or is_like_or_new_function:
803
+
804
+ def topt_default_init(name: str) -> Optional[str]:
805
+ topt_args = func.arguments.tensor_options
806
+ if topt_args is None:
807
+ return None
808
+ a = getattr(topt_args, name)
809
+ if a.default is None or a.default == "None":
810
+ return None
811
+ return cpp.default_expr(a.default, a.type, symint=False)
812
+
813
+ tensor_options_args.append(
814
+ PythonArgument(
815
+ name="dtype",
816
+ type=OptionalType(BaseType(BaseTy.ScalarType)),
817
+ default="None",
818
+ default_init=(
819
+ None if is_like_or_new_function else topt_default_init("dtype")
820
+ ),
821
+ )
822
+ )
823
+ tensor_options_args.append(
824
+ PythonArgument(
825
+ name="layout",
826
+ type=OptionalType(BaseType(BaseTy.Layout)),
827
+ default="None",
828
+ default_init=(
829
+ None if is_like_or_new_function else topt_default_init("layout")
830
+ ),
831
+ )
832
+ )
833
+ tensor_options_args.append(
834
+ PythonArgument(
835
+ name="device",
836
+ type=OptionalType(BaseType(BaseTy.Device)),
837
+ default="None",
838
+ default_init=(
839
+ None
840
+ if is_like_or_new_function
841
+ else (
842
+ topt_default_init("device")
843
+ or "torch::tensors::get_default_device()"
844
+ )
845
+ ),
846
+ )
847
+ )
848
+ tensor_options_args.append(
849
+ PythonArgument(
850
+ name="pin_memory",
851
+ type=OptionalType(BaseType(BaseTy.bool)),
852
+ default="False",
853
+ default_init=None,
854
+ )
855
+ )
856
+ tensor_options_args.append(
857
+ PythonArgument(
858
+ name="requires_grad",
859
+ type=OptionalType(BaseType(BaseTy.bool)),
860
+ default="False",
861
+ default_init=None,
862
+ )
863
+ )
864
+
865
+ returns = PythonReturns(returns=func.returns)
866
+
867
+ return PythonSignature(
868
+ name=str(func.name.name),
869
+ input_args=input_args,
870
+ input_kwargs=input_kwargs,
871
+ output_args=PythonOutArgument.from_outputs(outputs),
872
+ tensor_options_args=tuple(tensor_options_args),
873
+ returns=returns,
874
+ method=method,
875
+ )
876
+
877
+
878
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
879
+ #
880
+ # Python Interface
881
+ #
882
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
883
+
884
+
885
+ def namedtuple_fieldnames(returns: Tuple[Return, ...]) -> List[str]:
886
+ if len(returns) <= 1 or all(r.name is None for r in returns):
887
+ return []
888
+ else:
889
+ if any(r.name is None for r in returns):
890
+ # When building on Windows, `PyStructSequence_UnnamedField` could not be
891
+ # resolved by the linker for some reason, which cause error in building:
892
+ #
893
+ # python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
894
+ # PyStructSequence_UnnamedField
895
+ #
896
+ # Thus, at this point in time, we do not support unnamed
897
+ # fields in namedtuple; you must either name all fields,
898
+ # or none of them.
899
+ raise ValueError("Unnamed field is not supported by codegen")
900
+
901
+ return [str(r.name) for r in returns]
902
+
903
+
904
+ def argument_type_str_pyi(t: Type) -> str:
905
+ add_optional = False
906
+ if isinstance(t, OptionalType):
907
+ t = t.elem
908
+ add_optional = True
909
+
910
+ if isinstance(t, BaseType):
911
+ if t.name in [BaseTy.int, BaseTy.DeviceIndex]:
912
+ ret = "_int"
913
+ if t.name == BaseTy.SymInt:
914
+ ret = "Union[_int, SymInt]"
915
+ elif t.name == BaseTy.float:
916
+ ret = "_float"
917
+ elif t.name == BaseTy.str:
918
+ ret = "str"
919
+ elif t.name == BaseTy.Scalar:
920
+ ret = "Union[Number, _complex]"
921
+ elif t.name == BaseTy.ScalarType:
922
+ ret = "_dtype"
923
+ elif t.name == BaseTy.bool:
924
+ ret = "_bool"
925
+ elif t.name == BaseTy.QScheme:
926
+ ret = "_qscheme"
927
+ elif t.name == BaseTy.Layout:
928
+ ret = "_layout"
929
+ elif t.name == BaseTy.Device:
930
+ ret = "Optional[DeviceLikeType]"
931
+ elif t.name == BaseTy.MemoryFormat:
932
+ ret = "memory_format"
933
+ elif t.name == BaseTy.Dimname:
934
+ ret = "Union[str, ellipsis, None]"
935
+ elif t.name == BaseTy.Storage:
936
+ ret = "Union[Storage, UntypedStorage]"
937
+ elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Stream]:
938
+ # These python schema type names line up with their function schema names
939
+ ret = t.name.name
940
+
941
+ elif isinstance(t, ListType):
942
+ if str(t.elem) == "int":
943
+ ret = "Union[_int, _size]" if t.size is not None else "_size"
944
+ elif t.is_tensor_like():
945
+ # TODO: this doesn't seem right...
946
+ # Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
947
+ # It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
948
+ if isinstance(t.elem, OptionalType):
949
+ add_optional = True
950
+ ret = (
951
+ "Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
952
+ if t.size is not None
953
+ else "Union[Tuple[Tensor, ...], List[Tensor]]"
954
+ )
955
+ elif str(t.elem) == "float":
956
+ ret = "Sequence[_float]"
957
+ elif str(t.elem) == "SymInt" and t.size is not None:
958
+ elem = argument_type_str_pyi(t.elem)
959
+ ret = f"Union[{elem}, Sequence[{elem}]]"
960
+ else:
961
+ elem = argument_type_str_pyi(t.elem)
962
+ ret = f"Sequence[{elem}]"
963
+
964
+ else:
965
+ raise RuntimeError(f"unrecognized type {repr(t)}")
966
+
967
+ if add_optional:
968
+ ret = "Optional[" + ret + "]"
969
+
970
+ return ret
971
+
972
+
973
+ def return_type_str_pyi(t: Type) -> str:
974
+ # Where arguments are open to accepting Union, return types should return
975
+ # concrete types
976
+
977
+ if isinstance(t, OptionalType):
978
+ inner = return_type_str_pyi(t.elem)
979
+ return f"Optional[{inner}]"
980
+
981
+ if isinstance(t, BaseType):
982
+ if t.name == BaseTy.Device:
983
+ return "_device"
984
+ elif t.name == BaseTy.Dimname:
985
+ ret = "Optional[str]"
986
+ else:
987
+ return argument_type_str_pyi(t)
988
+
989
+ if isinstance(t, ListType):
990
+ inner = return_type_str_pyi(t.elem)
991
+ return f"List[{inner}]"
992
+
993
+ return argument_type_str_pyi(t)
994
+
995
+
996
+ def returns_named_tuple_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]:
997
+ python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
998
+ namedtuple_name = signature.name
999
+ field_names = namedtuple_fieldnames(signature.returns.returns)
1000
+ if field_names:
1001
+ namedtuple_def_lines = [f"class {namedtuple_name}(NamedTuple):"]
1002
+ namedtuple_def_lines.extend(
1003
+ f" {name}: {typ}" for name, typ in zip(field_names, python_returns)
1004
+ )
1005
+ namedtuple_def_lines.append("") # add an extra newline
1006
+ namedtuple_def = "\n".join(namedtuple_def_lines)
1007
+ # Example:
1008
+ # namedtuple_def = (
1009
+ # "class max(NamedTuple):\n"
1010
+ # " values: Tensor\n"
1011
+ # " indices: Tensor\n"
1012
+ # )
1013
+ return namedtuple_name, namedtuple_def
1014
+ return None
1015
+
1016
+
1017
+ def returns_str_pyi(signature: PythonSignature) -> str:
1018
+ field_names = namedtuple_fieldnames(signature.returns.returns)
1019
+ if field_names:
1020
+ return f"torch.return_types.{signature.name}"
1021
+
1022
+ python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
1023
+ if len(python_returns) > 1:
1024
+ return "Tuple[" + ", ".join(python_returns) + "]"
1025
+ if len(python_returns) == 1:
1026
+ return python_returns[0]
1027
+ return "None"
1028
+
1029
+
1030
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1031
+ #
1032
+ # C++ Function Dispatch
1033
+ #
1034
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1035
+ # This section provides APIs to generate the code that does C++ function
1036
+ # dispatch. The C++ function call is wrapped by a lambda function.
1037
+ # For example:
1038
+ #
1039
+ # // aten::selu_(Tensor(a!) self) -> Tensor(a!)
1040
+ # auto dispatch_selu_ = [](Tensor self) -> Tensor {
1041
+ # pybind11::gil_scoped_release no_gil;
1042
+ # return at::selu_(self);
1043
+ # };
1044
+ #
1045
+ # The lambda function's signature follows the C++ signature in common
1046
+ # cases, e.g.:
1047
+ #
1048
+ # // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
1049
+ # [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
1050
+ #
1051
+ # For out variant the 'out' argument's type is changed from 'Tensor &'
1052
+ # to 'Tensor'. It's because when calling the lambda it passes in the
1053
+ # PythonArgParser output '_r.tensor(3)', which is stack allocated object
1054
+ # and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
1055
+ #
1056
+ # // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1057
+ # [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
1058
+ #
1059
+ # For multi-output case it can keep using reference type because the
1060
+ # PythonArgParser output has been unpacked to local variables, e.g.:
1061
+ #
1062
+ # // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
1063
+ # // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
1064
+ # [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple<Tensor,Tensor>
1065
+ #
1066
+ # For deprecated python signature, it should follow deprecated python arg order.
1067
+ # TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
1068
+
1069
+
1070
+ def dispatch_lambda_args(
1071
+ ps: PythonSignature, f: NativeFunction, symint: bool = True
1072
+ ) -> Tuple[DispatchLambdaArgument, ...]:
1073
+ if isinstance(ps, PythonSignatureDeprecated):
1074
+ schema = ps.deprecated_schema
1075
+ else:
1076
+ schema = f.func
1077
+
1078
+ # Start with cpp arguments - dispatch lambda signature always include 'self'
1079
+ cpp_args = cpp.arguments(
1080
+ arguments=schema.arguments,
1081
+ faithful=False,
1082
+ symint=symint,
1083
+ method=False,
1084
+ cpp_no_default_args=f.cpp_no_default_args,
1085
+ )
1086
+ out_args: Set[str] = {a.name for a in schema.arguments.out}
1087
+
1088
+ # Convert from cpp argument to lambda argument
1089
+ def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
1090
+ type_str = cpp_arg.type
1091
+ is_out_arg = cpp_arg.name in out_args
1092
+ if ps.method and cpp_arg.name == "self":
1093
+ # For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
1094
+ type_str = "const at::Tensor &"
1095
+ else:
1096
+ # For other cases we need prevent dangling refs to temps (unless it's
1097
+ # unpacked scattered output)
1098
+ # The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
1099
+ # TODO: avoid this special handling?
1100
+ ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
1101
+ if ensure_temp_safe:
1102
+ type_str = {
1103
+ "at::Tensor &": "at::Tensor",
1104
+ }.get(type_str, type_str)
1105
+ return DispatchLambdaArgument(
1106
+ name=cpp_arg.name,
1107
+ type_str=type_str,
1108
+ is_out_arg=is_out_arg,
1109
+ )
1110
+
1111
+ return tuple(map(dispatch_lambda_arg, cpp_args))
1112
+
1113
+
1114
+ # [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
1115
+ # it's enough to just extend the list here. Before you do this, make sure
1116
+ # to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
1117
+ SUPPORTED_RETURN_TYPES = {
1118
+ "at::Tensor",
1119
+ "::std::tuple<at::Tensor,at::Tensor>",
1120
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
1121
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
1122
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
1123
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
1124
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
1125
+ "::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
1126
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
1127
+ "::std::tuple<at::Tensor,at::Tensor,double,at::Tensor,int64_t>",
1128
+ "::std::tuple<double,int64_t>",
1129
+ "::std::tuple<at::Tensor,::std::vector<at::Tensor>>",
1130
+ "::std::vector<at::Tensor>",
1131
+ # Needed for flash attention forw/backward
1132
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor>",
1133
+ "at::Scalar",
1134
+ "bool",
1135
+ "int64_t",
1136
+ "void*",
1137
+ "void",
1138
+ "at::QScheme",
1139
+ "double",
1140
+ "at::IntArrayRef",
1141
+ "at::ScalarType",
1142
+ "at::Stream",
1143
+ }
1144
+
1145
+
1146
+ def dispatch_lambda_return_str(f: NativeFunction) -> str:
1147
+ # [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
1148
+ # because the dispatch lambdas take mutable arguments *by value*, not
1149
+ # by reference. If you then return a reference to such an argument, you
1150
+ # will now have a pointer to a dangling stack entry. Not good.
1151
+ #
1152
+ # You want:
1153
+ #
1154
+ # auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
1155
+ # ^^^^^^
1156
+ #
1157
+ # *not*
1158
+ #
1159
+ # auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
1160
+ # ^^^^^^^
1161
+ #
1162
+ # (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
1163
+ # codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
1164
+ # mutable reference to temporary. Maybe we could assign it to a
1165
+ # variable itself.)
1166
+ returns_without_annotation = tuple(
1167
+ Return(r.name, r.type, None) for r in f.func.returns
1168
+ )
1169
+ return_str = cpp.returns_type(returns_without_annotation, symint=True).cpp_type()
1170
+ if return_str not in SUPPORTED_RETURN_TYPES:
1171
+ raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
1172
+ return return_str
1173
+
1174
+
1175
+ def cpp_dispatch_target(f: NativeFunction) -> str:
1176
+ symint = f.func.has_symint()
1177
+ name = cpp.name(f.func, symint_overload=symint)
1178
+ if Variant.method in f.variants:
1179
+ return f"self.{name}"
1180
+ if Variant.function in f.variants:
1181
+ if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
1182
+ namespace = "torch"
1183
+ else:
1184
+ namespace = "at"
1185
+ return f"{namespace}::{name}"
1186
+ raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
1187
+
1188
+
1189
+ def cpp_dispatch_exprs(
1190
+ f: NativeFunction,
1191
+ *,
1192
+ python_signature: Optional[PythonSignature] = None,
1193
+ ) -> Tuple[str, ...]:
1194
+ cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
1195
+
1196
+ exprs: Tuple[str, ...] = tuple()
1197
+ if not isinstance(python_signature, PythonSignatureDeprecated):
1198
+ # By default the exprs are consistent with the C++ signature.
1199
+ exprs = tuple(a.name for a in cpp_args)
1200
+ else:
1201
+ # For deprecated python signature we may need fill in some constants.
1202
+ exprs = tuple(
1203
+ filter(
1204
+ lambda n: n != "out" or f.func.is_out_fn(),
1205
+ python_signature.deprecated_args_exprs,
1206
+ )
1207
+ )
1208
+
1209
+ if Variant.method in f.variants:
1210
+ exprs = tuple(filter("self".__ne__, exprs))
1211
+
1212
+ return exprs
1213
+
1214
+
1215
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1216
+ #
1217
+ # Python / C++ Args Binding
1218
+ #
1219
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1220
+
1221
+
1222
+ # We explicitly enumerate the PythonArgParser unpacking methods for all
1223
+ # supported types. This might be more verbose than necessary, partially
1224
+ # because of the irregularity of unpacking method naming, partially
1225
+ # because we want to mimic the old codegen behavior - to reject
1226
+ # unexpected and/or unsupported cases which the old codegen rejects.
1227
+ # For certain cases it is intentionally more restrictive than necessary,
1228
+ # e.g.: it doesn't accepts doublelist with definite size.
1229
+ def arg_parser_unpack_method(
1230
+ t: Type, default: Optional[str], default_init: Optional[str], *, symint: bool = True
1231
+ ) -> str:
1232
+ has_default_init = default_init is not None
1233
+ if has_default_init and str(t) not in (
1234
+ "ScalarType?",
1235
+ "ScalarType",
1236
+ "Device",
1237
+ "Device?",
1238
+ "Layout",
1239
+ "Layout?",
1240
+ "bool",
1241
+ "bool?",
1242
+ ):
1243
+ raise RuntimeError(f"type '{t}' does not supported unpacking with default")
1244
+
1245
+ if isinstance(t, BaseType):
1246
+ if t.name in [
1247
+ BaseTy.Tensor,
1248
+ BaseTy.Stream,
1249
+ BaseTy.Storage,
1250
+ BaseTy.Scalar,
1251
+ BaseTy.Dimname,
1252
+ ]:
1253
+ # These unpack methods line up with their schema names
1254
+ return t.name.name.lower()
1255
+ elif t.name == BaseTy.ScalarType:
1256
+ return "scalartypeWithDefault" if has_default_init else "scalartype"
1257
+ elif t.name == BaseTy.Device:
1258
+ return "deviceWithDefault" if has_default_init else "device"
1259
+ elif t.name == BaseTy.DeviceIndex:
1260
+ return "toInt64"
1261
+ elif t.name == BaseTy.int:
1262
+ return "toInt64"
1263
+ elif t.name == BaseTy.SymInt:
1264
+ return "toSymInt" if symint else "toInt64"
1265
+ elif t.name == BaseTy.bool:
1266
+ return "toBoolWithDefault" if has_default_init else "toBool"
1267
+ elif t.name == BaseTy.float:
1268
+ return "toDouble"
1269
+ elif t.name == BaseTy.str:
1270
+ return "stringView"
1271
+ elif t.name == BaseTy.Layout:
1272
+ return "layoutWithDefault" if has_default_init else "layout"
1273
+ elif t.name == BaseTy.MemoryFormat:
1274
+ return "memoryformat"
1275
+
1276
+ elif isinstance(t, OptionalType):
1277
+ if str(t.elem) == "Tensor":
1278
+ return "optionalTensor"
1279
+ elif str(t.elem) == "Generator":
1280
+ return "generator"
1281
+ elif str(t.elem) == "Dimname[]":
1282
+ return "toDimnameListOptional"
1283
+ elif not has_default_init and default in (None, "None", "c10::nullopt"):
1284
+ # If default is None: append 'Optional' to elem's unpacking method
1285
+ return (
1286
+ arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional"
1287
+ )
1288
+ else:
1289
+ # Otherwise, load as underlying type with default
1290
+ return arg_parser_unpack_method(
1291
+ t.elem, default, default_init, symint=symint
1292
+ )
1293
+
1294
+ elif isinstance(t, ListType):
1295
+ if str(t.elem) == "Tensor":
1296
+ # accept and use definite size
1297
+ return f"tensorlist_n<{t.size}>" if t.size is not None else "tensorlist"
1298
+ elif str(t.elem) == "Tensor?":
1299
+ return "list_of_optional_tensors"
1300
+ elif str(t.elem) == "Dimname":
1301
+ # accept definite size
1302
+ return "dimnamelist"
1303
+ elif str(t.elem) == "int":
1304
+ # accept definite size
1305
+ return "intlist"
1306
+ elif str(t.elem) == "float":
1307
+ return "doublelist"
1308
+ elif str(t.elem) == "SymInt":
1309
+ # accept definite size
1310
+ return "symintlist" if symint else "intlist"
1311
+ elif str(t.elem) == "Scalar":
1312
+ return "scalarlist"
1313
+ raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
1314
+
1315
+
1316
+ # Return RHS expression for python argument using PythonArgParser output.
1317
+ # e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
1318
+ def arg_parser_output_expr(
1319
+ arg_index: int, a: PythonArgument, *, symint: bool = True
1320
+ ) -> PythonArgParserOutputExpr:
1321
+ has_default = a.default_init is not None
1322
+ unpack_method = arg_parser_unpack_method(
1323
+ t=a.type, default=a.default, default_init=a.default_init, symint=symint
1324
+ )
1325
+ default = f", {a.default_init}" if has_default else ""
1326
+ expr = f"_r.{unpack_method}({arg_index}{default})"
1327
+
1328
+ return PythonArgParserOutputExpr(
1329
+ name=a.name,
1330
+ expr=expr,
1331
+ index=arg_index,
1332
+ argument=a,
1333
+ )
1334
+
1335
+
1336
+ # Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
1337
+ def arg_parser_output_exprs(
1338
+ ps: PythonSignature, f: NativeFunction, *, symint: bool = True
1339
+ ) -> Dict[str, PythonArgParserOutputExpr]:
1340
+ return {
1341
+ e.name: e
1342
+ for i, a in enumerate(ps.arguments())
1343
+ for e in (arg_parser_output_expr(i, a, symint=symint),)
1344
+ }
1345
+
1346
+
1347
+ # argument name to type for scattered tensor options fields
1348
+ TENSOR_OPTIONS_FIELDS = {
1349
+ "dtype": "ScalarType?",
1350
+ "device": "Device?",
1351
+ "layout": "Layout?",
1352
+ "pin_memory": "bool?",
1353
+ "requires_grad": "bool?",
1354
+ }
1355
+
1356
+
1357
+ # bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
1358
+ def dispatch_lambda_exprs(
1359
+ ps: PythonSignature, f: NativeFunction, *, symint: bool = True
1360
+ ) -> DispatchLambdaArgumentExprs:
1361
+ # This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
1362
+ # 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
1363
+ # outputs.
1364
+ arg_parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
1365
+ lambda_args = dispatch_lambda_args(ps, f, symint=symint)
1366
+ inits: List[str] = []
1367
+ lambda_args_exprs: Dict[str, str] = {}
1368
+
1369
+ has_toptions = has_tensor_options(f)
1370
+
1371
+ # 1. special inits/unpacking to provide binding exprs for lambda arguments.
1372
+ for a in ps.arguments(skip_tensor_options=True):
1373
+ name = a.name
1374
+ arg_parser_expr = arg_parser_outputs[a.name].expr
1375
+
1376
+ if has_toptions and name == "self":
1377
+ # TODO: why this needs to be special case?
1378
+ inits.extend(
1379
+ [
1380
+ f"auto self = {arg_parser_expr};",
1381
+ ]
1382
+ )
1383
+ lambda_args_exprs[name] = name
1384
+ elif (
1385
+ isinstance(a, PythonOutArgument)
1386
+ and len(a.outputs) > 1
1387
+ and f.func.is_out_fn()
1388
+ ):
1389
+ inits.extend(
1390
+ [
1391
+ f"auto out = {arg_parser_expr};",
1392
+ ]
1393
+ )
1394
+ for i, out_arg in enumerate(a.outputs):
1395
+ lambda_args_exprs[out_arg.name] = f"out[{i}]"
1396
+ elif str(a.type) == "Dimname[]?":
1397
+ # [old codegen]
1398
+ # TODO: make this part of something more general, or get rid of it.
1399
+ # optional<ArrayRef<T>> are special. The PythonArgParser returns an
1400
+ # optional<vector<T>>, which cannot be implicitly converted to
1401
+ # optional<ArrayRef<T>>. One needs to unwrap the optional and rewrap.
1402
+ inits.extend(
1403
+ [
1404
+ f"auto __{name} = {arg_parser_expr};",
1405
+ f"c10::optional<DimnameList> {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950
1406
+ ]
1407
+ )
1408
+ lambda_args_exprs[name] = name
1409
+ else:
1410
+ # default case - directly using PythonArgParser output expr
1411
+ lambda_args_exprs[name] = arg_parser_expr
1412
+
1413
+ # method's self is passed directly to python binding, rather than parsed
1414
+ if ps.method:
1415
+ lambda_args_exprs["self"] = "self"
1416
+
1417
+ # 2. special packing/checking for TensorOptions.
1418
+ tensor_options_args_names = [a.name for a in ps.tensor_options_args]
1419
+ if has_toptions:
1420
+ if f.func.is_out_fn():
1421
+ raise RuntimeError(f"{f.func}: tensor options with output arg")
1422
+ for a in ps.tensor_options_args:
1423
+ if a.name not in TENSOR_OPTIONS_FIELDS:
1424
+ raise RuntimeError(
1425
+ f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
1426
+ )
1427
+ if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
1428
+ raise RuntimeError(
1429
+ f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
1430
+ )
1431
+ if not all(
1432
+ a in tensor_options_args_names for a in TENSOR_OPTIONS_FIELDS.keys()
1433
+ ):
1434
+ raise RuntimeError(
1435
+ f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
1436
+ )
1437
+
1438
+ inits.append(
1439
+ f"""\
1440
+ const auto options = TensorOptions()
1441
+ .dtype({arg_parser_outputs['dtype'].expr})
1442
+ .device({arg_parser_outputs['device'].expr})
1443
+ .layout({arg_parser_outputs['layout'].expr})
1444
+ .requires_grad({arg_parser_outputs['requires_grad'].expr})
1445
+ .pinned_memory({arg_parser_outputs['pin_memory'].expr});
1446
+ torch::utils::maybe_initialize_cuda(options);
1447
+ """
1448
+ )
1449
+ lambda_args_exprs["options"] = "options"
1450
+
1451
+ # 3. special case - access scattered TensorOptions fields without packing
1452
+ # TODO: maybe move to the generator side as it's not related to binding.
1453
+ if not has_toptions and tensor_options_args_names:
1454
+ if "dtype" in tensor_options_args_names:
1455
+ # we're an output-arg variant, check these args against output tensor
1456
+ if not f.func.is_out_fn():
1457
+ raise RuntimeError(
1458
+ f"{f.func}: dtype in tensor_options_args without output arg"
1459
+ )
1460
+ if not all(a in tensor_options_args_names for a in ("layout", "device")):
1461
+ raise RuntimeError(
1462
+ f"{f.func}: incomplete tensor options for output check"
1463
+ )
1464
+
1465
+ inits.append(
1466
+ f"""\
1467
+ check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
1468
+ {arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
1469
+ {arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
1470
+ """
1471
+ )
1472
+ # we'll set requires_grad on outgoing tensor
1473
+ if "requires_grad" not in tensor_options_args_names:
1474
+ raise RuntimeError(
1475
+ f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
1476
+ )
1477
+
1478
+ return DispatchLambdaArgumentExprs(
1479
+ exprs=tuple(lambda_args_exprs[a.name] for a in lambda_args),
1480
+ inits=inits,
1481
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/api/translate.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, NoReturn, Sequence, Union
2
+
3
+ from torchgen.api.types import (
4
+ ArrayRefCType,
5
+ BaseCType,
6
+ Binding,
7
+ boolT,
8
+ ConstRefCType,
9
+ deviceT,
10
+ Expr,
11
+ intArrayRefT,
12
+ iOptTensorListRefT,
13
+ layoutT,
14
+ ListCType,
15
+ longT,
16
+ memoryFormatT,
17
+ MutRefCType,
18
+ NamedCType,
19
+ opmath_t,
20
+ OptionalCType,
21
+ optionalIntArrayRefT,
22
+ optionalScalarRefT,
23
+ optionalSymIntArrayRefT,
24
+ optionalTensorRefT,
25
+ scalar_t,
26
+ scalarT,
27
+ scalarTypeT,
28
+ SpecialArgName,
29
+ symIntArrayRefT,
30
+ SymIntT,
31
+ tensorOptionsT,
32
+ tensorT,
33
+ VectorCType,
34
+ )
35
+
36
+ # This file implements a small program synthesis engine that implements
37
+ # conversions between one API to another.
38
+ #
39
+ # The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
40
+ # represents a C++ type, plus semantic information about what it represents.
41
+ # For example, consider the argument "bool pin_memory"; its normal C++ type is
42
+ # "bool", but its C++ semantic type also keeps track that this represents a
43
+ # "pin_memory"; you can't just use a random other boolean in a context where you
44
+ # need a "pin_memory"!
45
+ #
46
+ # The translator takes a list of needed NamedCTypes, and then figures out how
47
+ # to construct expressions with these NamedCTypes from the given bindings. Many
48
+ # of these expressions are trivial (I need a Tensor other; there's a Tensor
49
+ # other scope); others are more nontrivial and may require packing/unpacking.
50
+ # Some examples of non-trivial action:
51
+ #
52
+ # - Need the "dtype" binding? Well, maybe "dtype" isn't available
53
+ # in the context, instead, "options" is, and you need to extract
54
+ # it from there. (Gather)
55
+ #
56
+ # - Need the "context" binding? Well, maybe "context" isn't available
57
+ # in the context, and you need to construct it from "dtype", "device",
58
+ # etc. (Scatter)
59
+ #
60
+ # - Need the "memory_format" binding? Well, actually, it's available
61
+ # from both "memory_format" and "options", so you had better make sure
62
+ # they are consistent. (Join)
63
+
64
+ options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
65
+
66
+ out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
67
+
68
+ longVec_ctype = VectorCType(BaseCType(longT))
69
+ longSymVec_ctype = VectorCType(BaseCType(SymIntT))
70
+ optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
71
+ optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
72
+ optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
73
+
74
+
75
+ class UnsatError(RuntimeError):
76
+ pass
77
+
78
+
79
+ # Given a set of in-scope bindings and a set of target bindings, synthesize
80
+ # a list of expressions that uses only the in-scope bindings (bindings) that
81
+ # have all of the types of goals. You may want to use this function if
82
+ # you're generating code for a function like:
83
+ #
84
+ # void f({args}) {
85
+ # g({exprs}); // g is a different API
86
+ # }
87
+ #
88
+ # and you need to generate "exprs".
89
+ #
90
+ # Typically, a list of Bindings is convenient to get (you usually call something
91
+ # like arguments() to get them); but technically you only need less information:
92
+ # for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
93
+ # 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
94
+ # something more complicated, e.g., tracking the set of bindings in a context,
95
+ # you may find using these smaller types more convenient.
96
+ def translate(
97
+ bindings: Sequence[Union[Expr, Binding]],
98
+ goals: Sequence[Union[NamedCType, Binding]],
99
+ *,
100
+ method: bool = False,
101
+ allow_expensive_conversions: bool = False,
102
+ ) -> List[Expr]:
103
+ binding_exprs: List[Expr] = []
104
+ for b in bindings:
105
+ if isinstance(b, Binding):
106
+ binding_exprs.append(
107
+ Expr(
108
+ expr=b.name,
109
+ type=b.nctype,
110
+ )
111
+ )
112
+ else:
113
+ binding_exprs.append(b)
114
+
115
+ goal_ctypes: List[NamedCType] = []
116
+ for g in goals:
117
+ if isinstance(g, Binding):
118
+ goal_ctypes.append(g.nctype)
119
+ else:
120
+ goal_ctypes.append(g)
121
+
122
+ # Add all the bindings to the context
123
+ ctx: Dict[NamedCType, str] = {}
124
+ for b in binding_exprs:
125
+ ctx[b.type] = b.expr
126
+
127
+ # While we're at it, do some simple forward inference, looking through
128
+ # constructors.
129
+ #
130
+ # NB: When should you do forward inference versus backward inference?
131
+ # The general idea:
132
+ #
133
+ # - Backward inference WHEN the goal gets smaller
134
+ # - Forward inference WHEN the hypothesis gets smaller
135
+ #
136
+ # This helps ensure termination: backward inference starts with a goal
137
+ # and tries to make it simpler and simpler until it's trivial; if the
138
+ # goal can grow in size, we blow up to a really huge goal size.
139
+ # Similarly, with forward inference we take hypotheses and decompose
140
+ # them into simpler hypotheses; if hypotheses could expand in size,
141
+ # we also have potential nontermination. (In the code below, forward
142
+ # inference is only ever carried out at a single step, but you could
143
+ # imagine repeated application of forward inference being profitable.)
144
+ #
145
+ # A good starting point in the literature for exploring more about proof
146
+ # search are these lecture notes
147
+ # https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
148
+ #
149
+ # TODO: My kingdom for a pattern matcher
150
+ # https://www.python.org/dev/peps/pep-0634/
151
+ #
152
+ # TODO: This could get us in recomputation trouble if b.expr is nontrivial.
153
+ # Fix this by implementing some sort of sharing so that if multiple
154
+ # goals share the same expression, we only compute it once. This seems
155
+ # to matter in practice as compiler is often unwilling to CSE nontrivial
156
+ # expressions like scalar.to<scalar_t>()
157
+ t = b.type
158
+ if (
159
+ isinstance(t, ConstRefCType)
160
+ and isinstance(t.elem, OptionalCType)
161
+ and isinstance(t.elem.elem, BaseCType)
162
+ and str(t.elem.elem.type) == "at::Tensor"
163
+ ):
164
+ ctx[
165
+ NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
166
+ ] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
167
+
168
+ if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
169
+ ctx[
170
+ NamedCType(t.name, BaseCType(optionalTensorRefT))
171
+ ] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
172
+
173
+ if t.type == ConstRefCType(BaseCType(scalarT)):
174
+ ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
175
+
176
+ if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
177
+ ctx[
178
+ NamedCType(t.name, BaseCType(optionalScalarRefT))
179
+ ] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
180
+
181
+ if t.type == BaseCType(scalar_t):
182
+ ctx[
183
+ NamedCType(t.name, BaseCType(opmath_t))
184
+ ] = f"static_cast<opmath_t>({b.expr})"
185
+
186
+ # [Note: IOptTensorListRef]
187
+ if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
188
+ ctx[
189
+ NamedCType(t.name, BaseCType(iOptTensorListRefT))
190
+ ] = f"at::IOptTensorListRef({b.expr})"
191
+
192
+ # Add implicit bindings if the generated code is inside a Tensor method
193
+ if method:
194
+ ctx[
195
+ NamedCType("self", MutRefCType(BaseCType(tensorT)))
196
+ ] = "const_cast<Tensor&>(*this)"
197
+ ctx[
198
+ NamedCType("self", ConstRefCType(BaseCType(tensorT)))
199
+ ] = "const_cast<Tensor&>(*this)"
200
+ # This is better! Byte-for-byte compat
201
+ # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
202
+
203
+ def unsat(goal: NamedCType) -> NoReturn:
204
+ ctx_desc = "\n".join(
205
+ f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
206
+ )
207
+ raise UnsatError(
208
+ f"""
209
+ Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
210
+ When I failed, the following bindings were available in the context:
211
+
212
+ {ctx_desc}
213
+
214
+ This probably means there is a missing rule in the rules of torchgen.api.translate.
215
+ Check this module for more information.
216
+ """
217
+ )
218
+
219
+ # A shitty backtracking search implementation. It's shitty because it
220
+ # does backtracking via stack (bad idea!) and for the most part tries to
221
+ # avoid backtracking. In particular, if
222
+ # direct=True, we won't try to do any fancy synthesis, just trivial
223
+ # conversions (e.g., "T a" is OK for "const T& a"). So all of the
224
+ # existing rules in this function simply try to solve immediately,
225
+ # and bail if things don't work out.
226
+ def solve(goal: NamedCType, *, direct: bool) -> str:
227
+ def direct_solve(goal: NamedCType) -> str:
228
+ return solve(goal, direct=True)
229
+
230
+ if goal in ctx:
231
+ # Trivial
232
+ return ctx[goal]
233
+
234
+ # const & is satisfied with mutable &
235
+ if isinstance(goal.type, ConstRefCType):
236
+ try:
237
+ # WARNING: not strictly decreasing; be careful not
238
+ # to add a direct conversion that goes satisfies
239
+ # mutable& with const&
240
+ return solve(
241
+ NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
242
+ )
243
+ except UnsatError:
244
+ pass
245
+
246
+ # mutable & is satisfied with value
247
+ if isinstance(goal.type, MutRefCType):
248
+ try:
249
+ return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
250
+ except UnsatError:
251
+ pass
252
+
253
+ # TODO: These are referentially equal, shouldn't have to do this;
254
+ # ensuring we don't use type synonym IntArrayRef in codegen would
255
+ # help
256
+ if goal.type == ArrayRefCType(BaseCType(longT)):
257
+ return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
258
+
259
+ if direct:
260
+ unsat(goal)
261
+
262
+ # For now, all of these rules are mutually exclusive.
263
+ if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
264
+ memory_format = direct_solve(
265
+ NamedCType(
266
+ SpecialArgName.possibly_redundant_memory_format,
267
+ OptionalCType(BaseCType(memoryFormatT)),
268
+ )
269
+ )
270
+ # No need to join "memory_format" and "options" if the target API takes "options" directly.
271
+ # Otherwise it will cause the redundant memory_format error.
272
+ if options_ctype in goal_ctypes:
273
+ return memory_format
274
+ try:
275
+ options = direct_solve(options_ctype)
276
+ return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
277
+ except UnsatError:
278
+ return memory_format
279
+ elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
280
+ dtype = direct_solve(
281
+ NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
282
+ )
283
+ pin_memory = direct_solve(
284
+ NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
285
+ )
286
+ device = direct_solve(
287
+ NamedCType("device", OptionalCType(BaseCType(deviceT)))
288
+ )
289
+ layout = direct_solve(
290
+ NamedCType("layout", OptionalCType(BaseCType(layoutT)))
291
+ )
292
+ return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
293
+
294
+ elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
295
+ try:
296
+ options = direct_solve(options_ctype)
297
+ return f"optTypeMetaToScalarType({options}.dtype_opt())"
298
+ except UnsatError:
299
+ out_tensor = direct_solve(out_tensor_ctype)
300
+ return f"{out_tensor}.scalar_type()"
301
+
302
+ elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
303
+ try:
304
+ options = direct_solve(options_ctype)
305
+ return f"{options}.layout_opt()"
306
+ except UnsatError:
307
+ out_tensor = direct_solve(out_tensor_ctype)
308
+ return f"{out_tensor}.layout()"
309
+
310
+ elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
311
+ try:
312
+ options = direct_solve(options_ctype)
313
+ return f"{options}.device_opt()"
314
+ except UnsatError:
315
+ out_tensor = direct_solve(out_tensor_ctype)
316
+ return f"{out_tensor}.device()"
317
+
318
+ elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
319
+ try:
320
+ options = direct_solve(options_ctype)
321
+ return f"{options}.pinned_memory_opt()"
322
+ except UnsatError:
323
+ # If we're calling a factory op from its out= variant,
324
+ # We don't actually care about the value of pin_memory.
325
+ out_tensor = direct_solve(out_tensor_ctype)
326
+ return "c10::nullopt"
327
+
328
+ # We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
329
+ elif goal.type == BaseCType(intArrayRefT):
330
+ try:
331
+ return direct_solve(NamedCType(goal.name, longVec_ctype))
332
+ except UnsatError:
333
+ # We can also go SymIntArrayRef -> IntArrayRef
334
+ symIntArrayRef_type = direct_solve(
335
+ NamedCType(goal.name, BaseCType(symIntArrayRefT))
336
+ )
337
+ return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
338
+ elif goal.type == BaseCType(symIntArrayRefT):
339
+ try:
340
+ r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
341
+ return f"c10::fromIntArrayRefSlow({r})"
342
+ except UnsatError:
343
+ return direct_solve(NamedCType(goal.name, longSymVec_ctype))
344
+ elif goal.type == BaseCType(SymIntT):
345
+ return direct_solve(NamedCType(goal.name, BaseCType(longT)))
346
+ elif goal.type == OptionalCType(BaseCType(SymIntT)):
347
+ argname = direct_solve(
348
+ NamedCType(goal.name, OptionalCType(BaseCType(longT)))
349
+ )
350
+ return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt"
351
+ elif goal.type == BaseCType(longT):
352
+ symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
353
+ return f"{symInt_type}.guard_int(__FILE__, __LINE__)"
354
+ elif goal.type == OptionalCType(BaseCType(longT)):
355
+ argname = direct_solve(
356
+ NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
357
+ )
358
+ return f"{argname}.has_value() ? c10::make_optional({argname}->guard_int(__FILE__, __LINE__)) : c10::nullopt"
359
+ elif goal.type == BaseCType(optionalIntArrayRefT):
360
+ try:
361
+ return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
362
+ except UnsatError:
363
+ argname = direct_solve(
364
+ NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
365
+ )
366
+ return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt"
367
+ elif goal.type == BaseCType(optionalSymIntArrayRefT):
368
+ # TODO: You might also want to solve this from longSymVec_ctype or
369
+ # an optional version of it
370
+ argname = direct_solve(
371
+ NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
372
+ )
373
+ return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt"
374
+ elif goal.type == BaseCType(optionalScalarRefT):
375
+ return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
376
+ elif goal.type == BaseCType(optionalTensorRefT):
377
+ return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
378
+
379
+ # Note [translation from C++ reference to value types]
380
+ # The below cases are all for when we have an argument with a reference type,
381
+ # and a corresponding goal with a value type.
382
+ # These are needed when we populate the inputs to a lambda capture and we need
383
+ # to guarantee the lifetime of each captured argument.
384
+ # We guard it with an explicit kwarg because converting to a value type is expensive
385
+ # (O(n)) to convert from IntArrayRef to vector<int>),
386
+ # so the caller of translate() should be explicit that they need it.
387
+ if allow_expensive_conversions:
388
+ if goal.type == VectorCType(BaseCType(longT)):
389
+ intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
390
+ argname = direct_solve(intArrayRef_ctype)
391
+ return f"{argname}.vec()"
392
+ if goal.type == VectorCType(BaseCType(SymIntT)):
393
+ symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
394
+ argname = direct_solve(symIntArrayRef_ctype)
395
+ return f"{argname}.vec()"
396
+ elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
397
+ optionalIntArrayRef_ctype = NamedCType(
398
+ goal.name, BaseCType(optionalIntArrayRefT)
399
+ )
400
+ argname = direct_solve(optionalIntArrayRef_ctype)
401
+ return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
402
+ elif goal.type == OptionalCType(BaseCType(scalarT)):
403
+ optionalScalarRef_ctype = NamedCType(
404
+ goal.name, BaseCType(optionalScalarRefT)
405
+ )
406
+ argname = direct_solve(optionalScalarRef_ctype)
407
+ return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
408
+ elif goal.type == OptionalCType(BaseCType(scalarT)):
409
+ optionalTensorRef_ctype = NamedCType(
410
+ goal.name, BaseCType(optionalTensorRefT)
411
+ )
412
+ argname = direct_solve(optionalTensorRef_ctype)
413
+ return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
414
+ # Technically, we also need to handle cases of C++ containers holding reference types.
415
+ # But there currently aren't any ops that require lambda capture codegen
416
+ # With arguments like std::vector<IntArrayRef>.
417
+ # If that changes, we'll have to add the translation here.
418
+
419
+ # We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
420
+ # We could probably generalize this to non-tensor types too.
421
+ if goal.type == MutRefCType(BaseCType(tensorT)):
422
+ const_ref_tensor_ctype = NamedCType(
423
+ goal.name, ConstRefCType(BaseCType(tensorT))
424
+ )
425
+ argname = direct_solve(const_ref_tensor_ctype)
426
+ return f"const_cast<Tensor&>({argname})"
427
+
428
+ unsat(goal)
429
+
430
+ return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
env-llmeval/lib/python3.10/site-packages/torchgen/api/ufunc.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional
3
+
4
+ import torchgen.api.types as api_types
5
+
6
+ from torchgen.api import cpp, structured
7
+ from torchgen.api.types import (
8
+ ArgName,
9
+ BaseCppType,
10
+ BaseCType,
11
+ Binding,
12
+ ConstRefCType,
13
+ CType,
14
+ NamedCType,
15
+ scalarT,
16
+ )
17
+ from torchgen.model import (
18
+ Argument,
19
+ BaseTy,
20
+ BaseType,
21
+ DispatchKey,
22
+ FunctionSchema,
23
+ NativeFunctionsGroup,
24
+ Type,
25
+ )
26
+
27
+
28
+ def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str:
29
+ assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas"
30
+ return f"ufunc_{func.name.name}_{dispatch_key}"
31
+
32
+
33
+ def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str:
34
+ return schema_kernel_name(g.out.func, dispatch_key)
35
+
36
+
37
+ # Tensors are omitted (as they are stored in TensorIterator), everything else is
38
+ # passed along (technically, we can pass tensors along too, it just wastes
39
+ # argument registers)
40
+ #
41
+ # NB: used for CPU only
42
+ def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]:
43
+ # Dispatch stubs are always plain ints
44
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
45
+ if r is not None:
46
+ return r
47
+
48
+ if t == BaseType(BaseTy.Scalar):
49
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
50
+ elif t == BaseType(BaseTy.Tensor):
51
+ return None
52
+ else:
53
+ raise AssertionError(f"unrecognized type {repr(t)}")
54
+
55
+
56
+ def opmath_type(scalar_t: BaseCppType) -> BaseCppType:
57
+ if scalar_t == api_types.scalar_t:
58
+ return api_types.opmath_t
59
+ raise NotImplementedError
60
+
61
+
62
+ # NB: Tensors in constructor are stored in opmath_t, not scalar_t
63
+ # because Tensor in constructor = its a scalar tensor partially applied =
64
+ # it can be higher precision and we want to compute in that higher precision
65
+ #
66
+ # NB: CUDA only
67
+ def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType:
68
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
69
+ if r is not None:
70
+ return r
71
+
72
+ if t == BaseType(BaseTy.Scalar):
73
+ return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
74
+ elif t == BaseType(BaseTy.Tensor):
75
+ return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
76
+ else:
77
+ raise AssertionError(f"unrecognized type {repr(t)}")
78
+
79
+
80
+ # Only Tensors ever get passed directly to operator()
81
+ #
82
+ # NB: CUDA only
83
+ # (Actually, this works for CPU too)
84
+ def ufunctor_apply_type(
85
+ t: Type, *, binds: ArgName, scalar_t: BaseCppType
86
+ ) -> NamedCType:
87
+ if t == BaseType(BaseTy.Tensor):
88
+ return NamedCType(binds, BaseCType(scalar_t))
89
+ else:
90
+ raise AssertionError(f"unrecognized type {repr(t)}")
91
+
92
+
93
+ # The actual ufunc template function the user writes. Everything here
94
+ # is done in the computation type. compute_t is opmath_t in CUDA and scalar_t
95
+ # in CPU
96
+ def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType:
97
+ r = cpp.valuetype_type(t, binds=binds, symint=False)
98
+ if r is not None:
99
+ return r
100
+
101
+ if t == BaseType(BaseTy.Scalar):
102
+ return NamedCType(binds, compute_t)
103
+ elif t == BaseType(BaseTy.Tensor):
104
+ return NamedCType(binds, compute_t)
105
+ else:
106
+ raise AssertionError(f"unrecognized type {repr(t)}")
107
+
108
+
109
+ def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
110
+ return Binding(
111
+ nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t),
112
+ name=a.name,
113
+ default=None,
114
+ argument=a,
115
+ )
116
+
117
+
118
+ def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
119
+ return Binding(
120
+ nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t),
121
+ name=a.name,
122
+ default=None,
123
+ argument=a,
124
+ )
125
+
126
+
127
+ def ufunc_argument(a: Argument, compute_t: CType) -> Binding:
128
+ return Binding(
129
+ nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t),
130
+ name=a.name,
131
+ default=None,
132
+ argument=a,
133
+ )
134
+
135
+
136
+ @dataclass(frozen=True)
137
+ class UfunctorBindings:
138
+ ctor: List[Binding]
139
+ apply: List[Binding]
140
+
141
+
142
+ # ufunctors are a CUDA-only concept representing functors that take some of
143
+ # their arguments on a host-side constructor, and the rest in the device-side
144
+ # apply. E.g.,
145
+ #
146
+ # template <typename scalar_t>
147
+ # struct CUDAFunctorOnSelf_add {
148
+ # using opmath_t = at::opmath_type<scalar_t>;
149
+ # opmath_t other_;
150
+ # opmath_t alpha_;
151
+ # CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
152
+ # __device__ scalar_t operator()(scalar_t self) {
153
+ # return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
154
+ # }
155
+ # };
156
+ #
157
+ # The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
158
+ # to the operator() definition
159
+ def ufunctor_arguments(
160
+ g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType
161
+ ) -> UfunctorBindings:
162
+ ctor = []
163
+ apply = []
164
+ for a in g.functional.func.arguments.flat_non_out:
165
+ if a.type.is_tensor_like():
166
+ if scalar_tensor_idx == 0:
167
+ # put it in the ctor anyway
168
+ ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
169
+ scalar_tensor_idx = None
170
+ else:
171
+ if scalar_tensor_idx is not None:
172
+ scalar_tensor_idx -= 1
173
+ apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
174
+ else:
175
+ ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
176
+ assert scalar_tensor_idx is None
177
+ return UfunctorBindings(ctor=ctor, apply=apply)
178
+
179
+
180
+ # ufuncs are the inner loop template functions that you wrote in ufunc/add.h
181
+ # which do the actual computation in question. E.g.,
182
+ #
183
+ # template <typename T>
184
+ # C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
185
+ # return self + alpha * other;
186
+ # }
187
+ #
188
+ # In this file, we refer to T as compute_t which is bound by caller
189
+ def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]:
190
+ return [
191
+ ufunc_argument(a, compute_t=compute_t)
192
+ for a in g.functional.func.arguments.flat_non_out
193
+ ]
194
+
195
+
196
+ # Stubs are the DispatchStub trampolines that CPU kernels use to get to their
197
+ # vectorized versions. E.g.,
198
+ #
199
+ # using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
200
+ # DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
201
+ def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]:
202
+ # stubs drop all tensor arguments (they are implicit in the TensorIterator
203
+ # argument and keep everything else)
204
+ return [
205
+ r
206
+ for a in g.out.func.arguments.flat_non_out
207
+ if not a.type.is_tensor_like()
208
+ for r in structured.argument(a)
209
+ ]
env-llmeval/lib/python3.10/site-packages/torchgen/api/unboxing.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+
3
+ from torchgen.api import cpp
4
+ from torchgen.api.types import Binding, CppSignatureGroup, CType
5
+ from torchgen.model import (
6
+ Argument,
7
+ BaseTy,
8
+ BaseType,
9
+ ListType,
10
+ NativeFunction,
11
+ OptionalType,
12
+ Type,
13
+ )
14
+
15
+ # This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
16
+ # ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
17
+ # an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
18
+ # job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register
19
+ # a fixed set of operators known at compile time and thus can save some time in runtime initialization phase.
20
+ #
21
+ # Here's an example on how the codegen works:
22
+ #
23
+ # - Function Schema (source of truth)
24
+ #
25
+ # aten::empty.names(int[] size, *, Dimname[]? names,
26
+ # ScalarType? dtype=None, Layout? layout=None,
27
+ # Device? device=None, bool? pin_memory=None,
28
+ # MemoryFormat? memory_format=None) -> Tensor
29
+ # - Argument Conversion
30
+ # Generates C++ code to convert an ivalue (from stack) to its underlying C++ type.
31
+ # - int[] size
32
+ # ```cpp
33
+ # const c10::List<c10::IValue> size_list_in = (std::move(peek(stack, 0, 7))).toList();
34
+ #
35
+ # std::vector<int64_t> size_vec;
36
+ # for (c10::IValue size_elem: size_list_in) {
37
+ # int64_t size_base = size_elem.to<int64_t>();
38
+ # size_vec.push_back(size_base);
39
+ # }
40
+ # at::ArrayRef<int64_t> size_list_out(size_vec);
41
+ # ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack.
42
+ # Will be passed to unboxed kernel.
43
+ # ```
44
+ # - Dimname[]? names
45
+ # ```cpp
46
+ # c10::optional<c10::IValue> names_opt = (std::move(peek(stack, 1, 7))).toOptional<c10::IValue>();
47
+ # c10::optional<at::ArrayRef<at::Dimname>> names_opt_out;
48
+ # if (names_opt.has_value()) {
49
+ # ~~~~~~~~~~~ <-- Unwrapping optional shell
50
+ # const c10::IValue names_opt_in = names_opt.value();
51
+ # const c10::List<c10::IValue> names_list_in = names_opt_in.toList();
52
+ #
53
+ # std::vector<at::Dimname> names_vec;
54
+ # for (c10::IValue names_elem: names_list_in) {
55
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one.
56
+ # at::Dimname names_base = names_elem.to<at::Dimname>();
57
+ # names_vec.push_back(names_base);
58
+ # }
59
+ # at::ArrayRef<at::Dimname> names_list_out(names_vec);
60
+ #
61
+ # names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>(names_list_out);
62
+ # } else {
63
+ # names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>();
64
+ # }
65
+ # ```
66
+ # - ScalarType? dtype (similarly for the rest of the arguments)
67
+ # ```cpp
68
+ # c10::optional<c10::IValue> dtype_opt = (std::move(peek(stack, 2, 7))).toOptional<c10::IValue>();
69
+ # c10::optional<at::ScalarType> dtype_opt_out;
70
+ # if (dtype_opt.has_value()) {
71
+ # const c10::IValue dtype_opt_in = dtype_opt.value();
72
+ # at::ScalarType dtype_base = dtype_opt_in.to<at::ScalarType>();
73
+ # ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it
74
+ # directly using ".to<T>()" API.
75
+ # dtype_opt_out = c10::optional<at::ScalarType>(dtype_base);
76
+ # } else {
77
+ # dtype_opt_out = c10::optional<at::ScalarType>();
78
+ # }
79
+ # ```
80
+ #
81
+ # - Unboxed Kernel Call
82
+ # ```cpp
83
+ # auto result_ = torch::empty(
84
+ # size_list_out,
85
+ # names_opt_out,
86
+ # options,
87
+ # memory_format_opt_out
88
+ # );
89
+ # ```
90
+ #
91
+ # - Push Result Back to Stack
92
+ # ```cpp
93
+ # drop(stack, 7);
94
+ # pack(stack, std::move(result_));
95
+ # ```
96
+ connector = "\n\t"
97
+
98
+
99
+ # Return unboxing function name for a NativeFunction
100
+ def name(f: NativeFunction) -> str:
101
+ return f.func.name.unambiguous_name()
102
+
103
+
104
+ # Convert all the arguments in a NativeFunction to C++ code
105
+ def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]:
106
+ # we need the 'self' argument so method needs to be False
107
+ args = (
108
+ CppSignatureGroup.from_native_function(f, method=False)
109
+ .most_faithful_signature()
110
+ .arguments()
111
+ )
112
+ code_list = [
113
+ f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));"
114
+ for i in range(len(args))
115
+ ] + [""]
116
+ binding_list = []
117
+ for arg in args:
118
+ # expecting only Argument
119
+ if not isinstance(arg.argument, Argument):
120
+ raise Exception(
121
+ f"Unexpected argument type, expecting `Argument` but got {arg}"
122
+ )
123
+ argument: Argument = arg.argument
124
+ unboxed_name, _, code, decl = argumenttype_ivalue_convert(
125
+ argument.type,
126
+ argument.name,
127
+ mutable=argument.is_write,
128
+ )
129
+ code_list.extend(decl)
130
+ code_list.extend(code)
131
+ binding_list.append(arg.with_name(unboxed_name))
132
+ return binding_list, code_list
133
+
134
+
135
+ # Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
136
+ # (1) the C++ code necessary to unbox the argument
137
+ # (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
138
+ def argumenttype_ivalue_convert(
139
+ t: Type, arg_name: str, *, mutable: bool = False
140
+ ) -> Tuple[str, CType, List[str], List[str]]:
141
+ # Unboxing is for mobile, which doesn't care about SymInts
142
+ ctype = cpp.argumenttype_type(
143
+ t=t, mutable=mutable, binds=arg_name, symint=False
144
+ ).type
145
+
146
+ if isinstance(t, BaseType):
147
+ out_name = f"{arg_name}_base"
148
+ code, decl = _gen_code_base_type(
149
+ arg_name=arg_name, out_name=out_name, ctype=ctype
150
+ )
151
+ elif isinstance(t, OptionalType):
152
+ out_name = f"{arg_name}_opt_out"
153
+ code, decl = _gen_code_optional_type(
154
+ arg_name=arg_name,
155
+ out_name=out_name,
156
+ t=t,
157
+ ctype=ctype,
158
+ )
159
+ elif isinstance(t, ListType):
160
+ out_name = f"{arg_name}_list_out"
161
+ code, decl = _gen_code_list_type(
162
+ arg_name=arg_name,
163
+ out_name=out_name,
164
+ t=t,
165
+ ctype=ctype,
166
+ )
167
+ else:
168
+ raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
169
+ return out_name, ctype, code, decl
170
+
171
+
172
+ def _gen_code_base_type(
173
+ arg_name: str, out_name: str, ctype: CType
174
+ ) -> Tuple[List[str], List[str]]:
175
+ return [
176
+ f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
177
+ ], []
178
+
179
+
180
+ def _gen_code_optional_type(
181
+ arg_name: str, out_name: str, t: OptionalType, ctype: CType
182
+ ) -> Tuple[List[str], List[str]]:
183
+ in_name = f"{arg_name}_opt_in"
184
+ res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name)
185
+ return (
186
+ f"""
187
+ c10::optional<c10::IValue> {arg_name}_opt = {arg_name}.toOptional<c10::IValue>();
188
+ {ctype.cpp_type(strip_ref=True)} {out_name};
189
+ if ({arg_name}_opt.has_value()) {{
190
+ const c10::IValue {in_name} = {arg_name}_opt.value();
191
+ {connector.join(res_code)}
192
+ {out_name} = {ctype.cpp_type(strip_ref=True)}({res_name});
193
+ }} else {{
194
+ {out_name} = {ctype.cpp_type(strip_ref=True)}();
195
+ }}
196
+ """.split(
197
+ "\n"
198
+ ),
199
+ decl,
200
+ )
201
+
202
+
203
+ def _gen_code_list_type(
204
+ arg_name: str, out_name: str, t: ListType, ctype: CType
205
+ ) -> Tuple[List[str], List[str]]:
206
+ in_name = f"{arg_name}_list_in"
207
+ elem_name = f"{arg_name}_elem"
208
+ code = [f"const c10::List<c10::IValue> {in_name} = {arg_name}.toList();"]
209
+ res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name)
210
+ # handle list type with size, e.g., bool[4]
211
+ if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size:
212
+ code.extend(
213
+ f"""
214
+ {ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name});
215
+ """.split(
216
+ "\n"
217
+ )
218
+ )
219
+ # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
220
+ elif isinstance(t.elem, OptionalType):
221
+ code.extend(
222
+ f"""
223
+ {ctype.cpp_type(strip_ref=True)} {out_name};
224
+ for (c10::IValue {elem_name}: {in_name}) {{
225
+ {connector.join(res_code)}
226
+ {out_name}.push_back({res_name});
227
+ }}
228
+ """.split(
229
+ "\n"
230
+ )
231
+ )
232
+ else:
233
+ # use ArrayRef as default.
234
+ vec_name = arg_name + "_vec"
235
+ # need to bring vector instantiation out of scope so that ArrayRef has valid data
236
+ decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};")
237
+ code.extend(
238
+ f"""
239
+ for (c10::IValue {elem_name}: {in_name}) {{
240
+ {connector.join(res_code)}
241
+ {vec_name}.push_back({res_name});
242
+ }}
243
+ {ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
244
+ """.split(
245
+ "\n"
246
+ )
247
+ )
248
+ return code, decl
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // an external backend might generate file within its code tree
4
+ // and check all the source files within the tree with clang-format.
5
+ // so, disable it since the backend might have a different config.
6
+ // clang-format off
7
+
8
+ // ${generated_comment}
9
+
10
+ #include <ATen/Tensor.h>
11
+
12
+ ${namespace_prologue}
13
+
14
+ struct ${class_name} {
15
+
16
+ ${dispatch_declarations}
17
+
18
+ };
19
+ ${namespace_epilogue}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains autogenerated LazyTensor IR nodes
4
+ ${lazy_ir_sysinc}
5
+ ${lazy_ir_inc}
6
+
7
+ ${namespace_prologue}
8
+ using at::operator<<;
9
+
10
+ // kNullValue is used to contribute a static hash value any time
11
+ // a node has an Optional<Value> input that is nullopt. It is important
12
+ // to differentiate between HASH(nullopt, something) and HASH(something, nullopt),
13
+ // and using kNullValue in the hash function in the order of arguments
14
+ // serves this purpose.
15
+ static const torch::lazy::Value kNullValue = torch::lazy::Value();
16
+
17
+ ${ir_declarations}
18
+
19
+ ${namespace_epilogue}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ // Forward declarations of any types needed in the operator signatures.
14
+ // We can't directly include these classes because it will cause circular include dependencies.
15
+ // This file is included by TensorBody.h, which defines the Tensor class.
16
+ #include <ATen/core/ATen_fwd.h>
17
+
18
+ ${MethodOperators_includes}
19
+
20
+ namespace at {
21
+ namespace _ops {
22
+ ${MethodOperators_declarations}
23
+ } // namespace _ops
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunction.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ ${extra_includes}
16
+
17
+ ${native_function_declarations}
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchKey.cpp ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // required for old g++ to compile PRId64 macros, see
2
+ // https://github.com/pytorch/pytorch/issues/3571
3
+ // for context
4
+ #ifndef __STDC_FORMAT_MACROS
5
+ #define __STDC_FORMAT_MACROS
6
+ #endif
7
+
8
+ // an external backend might generate file within its code tree
9
+ // and check all the source files within the tree with clang-format.
10
+ // so, disable it since the backend might have a different config.
11
+ // clang-format off
12
+
13
+ // NOTE: This condition is true for all PyTorch internal libraries, it
14
+ // just excludes external projects such as torch_xla which
15
+ // re-use some of the PyTorch codegen machinery.
16
+ #if defined(CAFFE2_BUILD_MAIN_LIB) || \
17
+ defined(TORCH_CUDA_BUILD_MAIN_LIB) || \
18
+ defined(TORCH_HIP_BUILD_MAIN_LIB) || \
19
+ defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
20
+ defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
21
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
22
+ #endif
23
+
24
+ // ${generated_comment}
25
+
26
+ #include <c10/core/TensorImpl.h>
27
+ #include <c10/core/Allocator.h>
28
+ #include <ATen/DeviceGuard.h>
29
+ #include <ATen/NamedTensorUtils.h>
30
+ #include <ATen/Utils.h>
31
+ #include <ATen/WrapDimUtils.h>
32
+ #include <ATen/Dispatch.h>
33
+ #include <c10/util/ExclusivelyOwned.h>
34
+ #include <c10/util/Half.h>
35
+ #include <c10/core/UndefinedTensorImpl.h>
36
+ #include <c10/util/Optional.h>
37
+ #include <ATen/Tensor.h>
38
+ #include <ATen/native/Resize.h>
39
+
40
+ #include <cstddef>
41
+ #include <functional>
42
+ #include <memory>
43
+ #include <utility>
44
+
45
+ #include <ATen/Config.h>
46
+ #include <ATen/core/op_registration/adaption.h>
47
+ #include <torch/library.h>
48
+ $extra_cuda_headers
49
+ $external_backend_headers
50
+ $dispatch_headers
51
+ $ops_headers
52
+
53
+ // See template file RegisterDispatchDefinitions.ini
54
+ $dispatch_definitions
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ load("//:tools/bazel.bzl", "rules")
2
+ load(":build.bzl", "define_targets")
3
+
4
+ define_targets(rules = rules)
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc ADDED
Binary file (3.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc ADDED
Binary file (46.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def define_targets(rules):
2
+ rules.py_library(
3
+ name = "autograd",
4
+ srcs = rules.glob(["*.py"]),
5
+ data = rules.glob([
6
+ "*.yaml",
7
+ "templates/*",
8
+ ]),
9
+ visibility = ["//:__subpackages__"],
10
+ deps = [
11
+ rules.requirement("PyYAML"),
12
+ "//torchgen",
13
+ ],
14
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import Callable
3
+
4
+ from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
5
+ from torchgen.context import native_function_manager
6
+ from torchgen.utils import T
7
+
8
+
9
+ # Like tools.api.context.with_native_function, but for
10
+ # NativeFunctionWithDifferentiabilityInfo.
11
+ def with_native_function_with_differentiability_info(
12
+ func: Callable[[NFWDI], T]
13
+ ) -> Callable[[NFWDI], T]:
14
+ @functools.wraps(func)
15
+ def wrapper(f: NFWDI) -> T:
16
+ with native_function_manager(f.func):
17
+ return func(f)
18
+
19
+ return wrapper
20
+
21
+
22
+ # Like the above but with an additional dispatch key string argument
23
+ def with_native_function_with_differentiability_info_and_key(
24
+ func: Callable[[NFWDI, str], T]
25
+ ) -> Callable[[NFWDI, str], T]:
26
+ @functools.wraps(func)
27
+ def wrapper(f: NFWDI, key: str) -> T:
28
+ with native_function_manager(f.func):
29
+ return func(f, key)
30
+
31
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_annotated_fn_args.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For procedural tests needed for __torch_function__, we use this function
3
+ to export method names and signatures as needed by the tests in
4
+ test/test_overrides.py.
5
+
6
+ python -m tools.autograd.gen_annotated_fn_args \
7
+ aten/src/ATen/native/native_functions.yaml \
8
+ aten/src/ATen/native/tags.yaml \
9
+ $OUTPUT_DIR \
10
+ tools/autograd
11
+
12
+ Where $OUTPUT_DIR is where you would like the files to be
13
+ generated. In the full build system, OUTPUT_DIR is
14
+ torch/testing/_internal/generated
15
+ """
16
+
17
+ import argparse
18
+ import os
19
+ import textwrap
20
+ from collections import defaultdict
21
+
22
+ from typing import Any, Dict, List, Sequence
23
+
24
+ import torchgen.api.python as python
25
+ from torchgen.context import with_native_function
26
+
27
+ from torchgen.gen import parse_native_yaml
28
+ from torchgen.model import Argument, BaseOperatorName, NativeFunction
29
+ from torchgen.utils import FileManager
30
+
31
+ from .gen_python_functions import (
32
+ is_py_fft_function,
33
+ is_py_linalg_function,
34
+ is_py_nn_function,
35
+ is_py_special_function,
36
+ is_py_torch_function,
37
+ is_py_variable_method,
38
+ should_generate_py_binding,
39
+ )
40
+
41
+
42
+ def gen_annotated(
43
+ native_yaml_path: str, tags_yaml_path: str, out: str, autograd_dir: str
44
+ ) -> None:
45
+ native_functions = parse_native_yaml(
46
+ native_yaml_path, tags_yaml_path
47
+ ).native_functions
48
+ mappings = (
49
+ (is_py_torch_function, "torch._C._VariableFunctions"),
50
+ (is_py_nn_function, "torch._C._nn"),
51
+ (is_py_linalg_function, "torch._C._linalg"),
52
+ (is_py_special_function, "torch._C._special"),
53
+ (is_py_fft_function, "torch._C._fft"),
54
+ (is_py_variable_method, "torch.Tensor"),
55
+ )
56
+ annotated_args: List[str] = []
57
+ for pred, namespace in mappings:
58
+ groups: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
59
+ for f in native_functions:
60
+ if not should_generate_py_binding(f) or not pred(f):
61
+ continue
62
+ groups[f.func.name.name].append(f)
63
+ for group in groups.values():
64
+ for f in group:
65
+ annotated_args.append(f"{namespace}.{gen_annotated_args(f)}")
66
+
67
+ template_path = os.path.join(autograd_dir, "templates")
68
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
69
+ fm.write_with_template(
70
+ "annotated_fn_args.py",
71
+ "annotated_fn_args.py.in",
72
+ lambda: {
73
+ "annotated_args": textwrap.indent("\n".join(annotated_args), " "),
74
+ },
75
+ )
76
+
77
+
78
+ @with_native_function
79
+ def gen_annotated_args(f: NativeFunction) -> str:
80
+ def _get_kwargs_func_exclusion_list() -> List[str]:
81
+ # functions that currently don't work with kwargs in test_overrides.py
82
+ return [
83
+ "diagonal",
84
+ "round_",
85
+ "round",
86
+ "scatter_",
87
+ ]
88
+
89
+ def _add_out_arg(
90
+ out_args: List[Dict[str, Any]], args: Sequence[Argument], *, is_kwarg_only: bool
91
+ ) -> None:
92
+ for arg in args:
93
+ if arg.default is not None:
94
+ continue
95
+ out_arg: Dict[str, Any] = {}
96
+ out_arg["is_kwarg_only"] = str(is_kwarg_only)
97
+ out_arg["name"] = arg.name
98
+ out_arg["simple_type"] = python.argument_type_str(
99
+ arg.type, simple_type=True
100
+ )
101
+ size_t = python.argument_type_size(arg.type)
102
+ if size_t:
103
+ out_arg["size"] = size_t
104
+ out_args.append(out_arg)
105
+
106
+ out_args: List[Dict[str, Any]] = []
107
+ _add_out_arg(out_args, f.func.arguments.flat_positional, is_kwarg_only=False)
108
+ if f"{f.func.name.name}" not in _get_kwargs_func_exclusion_list():
109
+ _add_out_arg(out_args, f.func.arguments.flat_kwarg_only, is_kwarg_only=True)
110
+
111
+ return f"{f.func.name.name}: {repr(out_args)},"
112
+
113
+
114
+ def main() -> None:
115
+ parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
116
+ parser.add_argument(
117
+ "native_functions", metavar="NATIVE", help="path to native_functions.yaml"
118
+ )
119
+ parser.add_argument("tags", metavar="TAGS", help="path to tags.yaml")
120
+ parser.add_argument("out", metavar="OUT", help="path to output directory")
121
+ parser.add_argument(
122
+ "autograd", metavar="AUTOGRAD", help="path to template directory"
123
+ )
124
+ args = parser.parse_args()
125
+ gen_annotated(args.native_functions, args.tags, args.out, args.autograd)
126
+
127
+
128
+ if __name__ == "__main__":
129
+ main()
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_inplace_or_view_type.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates ADInplaceOrViewType.h/cpp
2
+ #
3
+ # NOTE: If any changes are being made to the ADInplaceOrView codegen please also check
4
+ # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
5
+ # The fallback is expected to mimick this codegen, so we should keep the two in sync.
6
+
7
+ from typing import Dict, List, Optional, Sequence, Tuple
8
+
9
+ from torchgen.api import cpp
10
+ from torchgen.api.autograd import (
11
+ dispatch_strategy,
12
+ gen_differentiable_outputs,
13
+ NativeFunctionWithDifferentiabilityInfo,
14
+ )
15
+ from torchgen.api.types import (
16
+ BaseCType,
17
+ Binding,
18
+ boolT,
19
+ ConstRefCType,
20
+ CType,
21
+ DispatcherSignature,
22
+ intArrayRefT,
23
+ longT,
24
+ OptionalCType,
25
+ symIntArrayRefT,
26
+ SymIntT,
27
+ # See Note [Nested Arg Types]
28
+ tensorT,
29
+ )
30
+ from torchgen.code_template import CodeTemplate
31
+ from torchgen.context import with_native_function
32
+ from torchgen.model import (
33
+ NativeFunction,
34
+ SchemaKind,
35
+ SelfArgument,
36
+ TensorOptionsArguments,
37
+ Type,
38
+ )
39
+ from torchgen.utils import FileManager
40
+
41
+ from .context import with_native_function_with_differentiability_info
42
+ from .gen_trace_type import (
43
+ get_return_value,
44
+ MANUAL_AUTOGRAD,
45
+ tie_return_values,
46
+ type_wrapper_name,
47
+ )
48
+
49
+ # See NOTE [ Autograd View Variables ] in variable.h for details.
50
+ # If you update list VIEW_FUNCTIONS or RETURNS_VIEWS_OF_INPUT,
51
+ # you **MUST** also update the public list of view ops accordingly in
52
+ # docs/source/tensor_view.rst. Note not all ATen functions are exposed to public,
53
+ # e.g alias & sparse_coo_tensor_with_dims_and_tensors.
54
+ #
55
+ # A map: function name => name of the argument that all outputs are view of
56
+
57
+ VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [
58
+ "view_as_complex",
59
+ "view_as_real",
60
+ "_conj",
61
+ "_neg_view",
62
+ "_nested_view_from_buffer",
63
+ ]
64
+
65
+ VIEW_FUNCTIONS = {
66
+ "numpy_T": "self",
67
+ "alias": "self",
68
+ "as_strided": "self",
69
+ "diagonal": "self",
70
+ "expand": "self",
71
+ "permute": "self",
72
+ "select": "self",
73
+ "slice": "self",
74
+ "split": "self",
75
+ "split_with_sizes": "self",
76
+ "squeeze": "self",
77
+ "t": "self",
78
+ "transpose": "self",
79
+ "unfold": "self",
80
+ "unsqueeze": "self",
81
+ "flatten": "self",
82
+ "view": "self",
83
+ "unbind": "self",
84
+ "_indices": "self",
85
+ "_values": "self",
86
+ "indices": "self",
87
+ "values": "self",
88
+ "crow_indices": "self",
89
+ "col_indices": "self",
90
+ "ccol_indices": "self",
91
+ "row_indices": "self",
92
+ # sparse_coo ctor output should really be views of both indices and values,
93
+ # but we only supports making as view of a single variable, and indices is
94
+ # discrete anyways.
95
+ # FIXME: clone indices on construction.
96
+ "sparse_coo_tensor_with_dims_and_tensors": "values",
97
+ "_reshape_alias": "self",
98
+ "_test_autograd_multiple_dispatch_view": "self",
99
+ }
100
+
101
+ for key in VIEW_FUNCTIONS_WITH_METADATA_CHANGE:
102
+ VIEW_FUNCTIONS[key] = "self"
103
+
104
+ # note: some VIEW_FUNCTIONS are just compositions of the view functions above
105
+ # this list contains both the root view functions and any that are purely composed
106
+ # of viewing functions, and is used by the JIT to determine when an operator
107
+ # may return a view of its inputs; however they may sometimes return a copy.
108
+ # (e.g. `contiguous`)
109
+ RETURNS_VIEWS_OF_INPUT = set(VIEW_FUNCTIONS.keys()).union(
110
+ {
111
+ "chunk",
112
+ "detach",
113
+ "contiguous",
114
+ "reshape",
115
+ "reshape_as",
116
+ "expand_as",
117
+ "view_as",
118
+ "real",
119
+ "imag",
120
+ "narrow",
121
+ "movedim",
122
+ "tensor_split",
123
+ "swapdims",
124
+ "swapaxes",
125
+ "mT",
126
+ "mH",
127
+ "adjoint",
128
+ "matrix_H",
129
+ }
130
+ )
131
+
132
+ # These are the functions we consider views for the purposes of validating
133
+ # StorageImpl and TensorImpl in gen_variable_type.
134
+ # `_unsafe_view` is not included in VIEW_FUNCTIONS above because it is not a
135
+ # view for the purposes of ADInplaceOrView kernel, we do not want to call as_view
136
+ # See NOTE [Unsafe View] for more info.
137
+ ALL_VIEW_FUNCTIONS = {
138
+ **VIEW_FUNCTIONS,
139
+ "_unsafe_view": "self",
140
+ }
141
+
142
+ ARRAYREF_TO_VEC = CodeTemplate(
143
+ """\
144
+ auto ${vec} = ${arg}.vec();
145
+ """
146
+ )
147
+
148
+ OPTIONAL_TO_VAL = CodeTemplate(
149
+ """\
150
+ auto ${val} = ${arg}.value_or(${default});
151
+ """
152
+ )
153
+
154
+ CALL_DISPATCH = CodeTemplate(
155
+ """\
156
+ at::_ops::${unambiguous_name}::call(${unpacked_args})"""
157
+ )
158
+
159
+ SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE = CodeTemplate(
160
+ """\
161
+ std::function<at::Tensor(const at::Tensor&)> func=nullptr;
162
+ if (${is_view_with_metadata_change} || !self.unsafeGetTensorImpl()->support_as_strided() ||
163
+ c10::AutogradState::get_tls_state().get_view_replay_enabled()) {
164
+ ${replay_view_func}
165
+ }
166
+ """
167
+ )
168
+
169
+ REPLAY_VIEW_LAMBDA_FUNC = CodeTemplate(
170
+ """\
171
+ func = [=](const at::Tensor& ${input_base}) {
172
+ return ${replay_view_call};
173
+ };
174
+ """
175
+ )
176
+
177
+ METHOD_DEFINITION = CodeTemplate(
178
+ """\
179
+ ${return_type} ${type_wrapper_name}(${formals}) {
180
+ ${type_definition_body}
181
+ }
182
+ """
183
+ )
184
+
185
+ WRAPPER_REGISTRATION = CodeTemplate(
186
+ """\
187
+ m.impl("${unqual_operator_name_with_overload}",
188
+ TORCH_FN(${class_type}::${type_wrapper_name})
189
+ );
190
+ """
191
+ )
192
+
193
+ AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION = CodeTemplate(
194
+ """\
195
+ m.impl("${unqual_operator_name_with_overload}", torch::autograd::autogradNotImplementedFallback());
196
+ """
197
+ )
198
+
199
+ INPLACE_REDISPATCH = CodeTemplate(
200
+ """\
201
+ {
202
+ at::AutoDispatchBelowADInplaceOrView guard;
203
+ at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
204
+ }
205
+ """
206
+ )
207
+
208
+ ASSIGN_RETURN_VALUE = CodeTemplate(
209
+ """\
210
+ ${return_values} = ${rhs_value};
211
+ """
212
+ )
213
+
214
+ VIEW_REDISPATCH = CodeTemplate(
215
+ """\
216
+ ${assign_return_values} ([&]() {
217
+ at::AutoDispatchBelowADInplaceOrView guard;
218
+ return at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
219
+ })();
220
+ """
221
+ )
222
+
223
+ TMP_VAR = "_tmp"
224
+
225
+
226
+ # FIXME: Ideally these functions should be methods on Type class, but we have a
227
+ # comment in codegen/model.py there saying these concepts are not well defined.
228
+ # Thus we put a version that commonly used by autograd codegen here.
229
+ def is_tensor_type(t: Type) -> bool:
230
+ # TODO: Should handle optional here?
231
+ return t.is_tensor_like() and t.is_list_like() is None
232
+
233
+
234
+ def is_tensor_list_type(t: Type) -> bool:
235
+ # TODO: Should handle optional here?
236
+ return t.is_tensor_like() and t.is_list_like() is not None
237
+
238
+
239
+ UNPACK_TENSOR = CodeTemplate(
240
+ """\
241
+ auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});"""
242
+ )
243
+
244
+
245
+ def unpacked_name(arg_name: str) -> str:
246
+ return arg_name + "_"
247
+
248
+
249
+ @with_native_function
250
+ def unpack_args(f: NativeFunction) -> Tuple[List[str], List[Binding]]:
251
+ body: List[str] = []
252
+ unpacked_bindings: List[Binding] = []
253
+
254
+ bindings = [
255
+ r
256
+ for a in f.func.schema_order_arguments()
257
+ for r in cpp.argument(
258
+ a,
259
+ method=False,
260
+ symint=True,
261
+ cpp_no_default_args=set(),
262
+ faithful=False,
263
+ has_tensor_options=False,
264
+ )
265
+ ]
266
+
267
+ for i, binding in enumerate(bindings):
268
+ assert not isinstance(binding.argument, SelfArgument)
269
+ if isinstance(binding.argument, TensorOptionsArguments):
270
+ raise RuntimeError("VariableKernel shouldn't take TensorOptions")
271
+
272
+ is_nullable = binding.argument.type.is_nullable()
273
+ if not binding.argument.type.is_tensor_like() or is_nullable:
274
+ unpacked_bindings.append(binding)
275
+ continue
276
+
277
+ is_tensor_list = is_tensor_list_type(binding.argument.type)
278
+ ref = (not is_nullable) and not is_tensor_list
279
+ suffix = "_opt" if is_nullable and not is_tensor_list else ""
280
+ body.append(
281
+ UNPACK_TENSOR.substitute(
282
+ arg_name=binding.name,
283
+ arg_pos=i,
284
+ suffix=suffix,
285
+ ref="&" if ref else "",
286
+ )
287
+ )
288
+ unpacked_bindings.append(
289
+ Binding(
290
+ name=unpacked_name(binding.name),
291
+ nctype=binding.nctype,
292
+ argument=binding.argument,
293
+ default=binding.default,
294
+ )
295
+ )
296
+
297
+ return body, unpacked_bindings
298
+
299
+
300
+ def get_base_name(f: NativeFunction) -> str:
301
+ return f.func.name.name.base # TODO: should be str(f.func.name.name)?
302
+
303
+
304
+ def get_view_info(f: NativeFunction) -> Optional[str]:
305
+ base_name = get_base_name(f)
306
+ view_info = VIEW_FUNCTIONS.get(base_name, None)
307
+ if view_info is None and base_name in RETURNS_VIEWS_OF_INPUT:
308
+ view_info = "self"
309
+ return view_info
310
+
311
+
312
+ # For view replay calls, we generate an ordinary Dispatcher::call() instead, because:
313
+ # - We want to replay the entire call into the op, including any previously-set dispatch keys (including autograd!).
314
+ # - The view replay call also is not part of the hot path.
315
+ def emit_view_call(
316
+ f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
317
+ ) -> str:
318
+ # View replay functions use the standard Dispatcher::call API.
319
+ return CALL_DISPATCH.substitute(
320
+ unambiguous_name=f.func.name.unambiguous_name(), unpacked_args=unpacked_args
321
+ )
322
+
323
+
324
+ def emit_view_lambda(f: NativeFunction, unpacked_bindings: List[Binding]) -> str:
325
+ """Generate an additional lambda function to recover views in backward when as_strided is not supported.
326
+ See Note [View + Inplace update for base tensor] and [View + Inplace update for view tensor] for more details.
327
+ """
328
+ input_base = "input_base"
329
+ replay_view_func = ""
330
+ updated_unpacked_args: List[str] = []
331
+ known_view_arg_simple_types: List[CType] = [
332
+ BaseCType(longT),
333
+ OptionalCType(BaseCType(longT)),
334
+ BaseCType(SymIntT),
335
+ OptionalCType(BaseCType(SymIntT)),
336
+ BaseCType(boolT),
337
+ BaseCType(intArrayRefT),
338
+ BaseCType(symIntArrayRefT),
339
+ ConstRefCType(BaseCType(tensorT)),
340
+ ]
341
+ for unpacked_binding in unpacked_bindings:
342
+ arg, arg_type = unpacked_binding.name, unpacked_binding.nctype.type
343
+ if arg == "self_":
344
+ updated_unpacked_args.append(input_base)
345
+ continue
346
+ if arg_type not in known_view_arg_simple_types:
347
+ known_types_str = ", ".join([str(t) for t in known_view_arg_simple_types])
348
+ raise TypeError(
349
+ f"You are adding an {arg_type} {arg} argument to op {cpp.name(f.func)} in addition to known types: "
350
+ f"{known_types_str}. Please update the list or materialize it so that it can be closed "
351
+ "over by value, also add a test in pytorch/xla/test/test_operations.py where this code "
352
+ "is exercised."
353
+ )
354
+ if arg_type == BaseCType(intArrayRefT) or arg_type == BaseCType(
355
+ symIntArrayRefT
356
+ ):
357
+ # It's not safe to close over IntArrayRef by value, since this is a
358
+ # reference type, so materialize a vector to close over by value
359
+ arg_vec = arg + "_vec"
360
+ replay_view_func += ARRAYREF_TO_VEC.substitute(arg=arg, vec=arg_vec)
361
+ updated_unpacked_args.append(arg_vec)
362
+ elif arg_type == OptionalCType(BaseCType(longT)):
363
+ # Materialize int64_t? to int64_t
364
+ arg_value = arg + "_val"
365
+ replay_view_func += OPTIONAL_TO_VAL.substitute(
366
+ arg=arg, val=arg_value, default="0"
367
+ )
368
+ updated_unpacked_args.append(arg_value)
369
+ elif (
370
+ arg == "nested_size_" or arg == "nested_strides_" or arg == "offsets_"
371
+ ) and arg_type == ConstRefCType(BaseCType(tensorT)):
372
+ # [NOTE] [Nested Arg Types]
373
+ # This is temporary. Nested tensors will be migrating to use SymInts and
374
+ # nested_size and nested_strides will no longer be tensors.
375
+ updated_unpacked_args.append(arg[:-1])
376
+ else:
377
+ updated_unpacked_args.append(arg)
378
+
379
+ replay_view_call = emit_view_call(f, input_base, updated_unpacked_args)
380
+ replay_view_func += REPLAY_VIEW_LAMBDA_FUNC.substitute(
381
+ input_base=input_base, replay_view_call=replay_view_call
382
+ )
383
+
384
+ is_view_with_metadata_change = (
385
+ "true" if cpp.name(f.func) in VIEW_FUNCTIONS_WITH_METADATA_CHANGE else "false"
386
+ )
387
+
388
+ return SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE.substitute(
389
+ is_view_with_metadata_change=is_view_with_metadata_change,
390
+ replay_view_func=replay_view_func,
391
+ )
392
+
393
+
394
+ def emit_view_body(
395
+ fn: NativeFunctionWithDifferentiabilityInfo, var: str
396
+ ) -> Tuple[str, str]:
397
+ # See NOTE [ Autograd View Variables ] in variable.h for details.
398
+ f = fn.func
399
+ base_name = get_base_name(f)
400
+ view_info = get_view_info(f)
401
+ call = ""
402
+ differentiable_outputs = gen_differentiable_outputs(fn)
403
+ differentiable_output_vars = {r.name for r in differentiable_outputs}
404
+ if not isinstance(view_info, str):
405
+ raise TypeError(
406
+ f"The view info should be a string for {base_name}, but it is: {view_info}"
407
+ )
408
+ if len(differentiable_output_vars) == 0:
409
+ # no output is differentiable (.indices() for SparseTensors for example)
410
+ rhs_value = (
411
+ f"as_view({view_info}, {var}, "
412
+ f"/* is_bw_differentiable */ false, /* is_fw_differentiable */ false)"
413
+ )
414
+ elif len(differentiable_output_vars) == 1:
415
+ # Single differentiable output (Tensor or Tensor[])
416
+ return_info = differentiable_outputs[0]
417
+ # We only support simple Tensor or a TensorList for functions that return views
418
+ if not is_tensor_type(return_info.type) and not is_tensor_list_type(
419
+ return_info.type
420
+ ):
421
+ raise RuntimeError(
422
+ f"{base_name} that return differentiable views can only return Tensor or Tensor[]"
423
+ )
424
+
425
+ # See Note [ View + Inplace detection]
426
+ def get_creation_meta_in_mode(original: str) -> str:
427
+ creation_meta_with_grad_mode = f"(at::GradMode::is_enabled() ? {original} : CreationMeta::NO_GRAD_MODE)"
428
+ return f"InferenceMode::is_enabled() ? CreationMeta::INFERENCE_MODE : {creation_meta_with_grad_mode}"
429
+
430
+ # Only allow rebasing of the history if we return a single Tensor
431
+ # If we are in a no grad block, raise a warning
432
+ # See NOTE [ View + Inplace detection ] for more details about this logic
433
+ if is_tensor_list_type(return_info.type):
434
+ creation_meta = get_creation_meta_in_mode("CreationMeta::MULTI_OUTPUT_NODE")
435
+ call += (
436
+ f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
437
+ "/* is_fw_differentiable */ true, "
438
+ f"/* creation_meta */ {creation_meta});"
439
+ )
440
+ rhs_value = f"std::move({var})"
441
+ else:
442
+ _, unpacked_bindings = unpack_args(f)
443
+ call += emit_view_lambda(f, unpacked_bindings)
444
+ creation_meta = get_creation_meta_in_mode("CreationMeta::DEFAULT")
445
+ rhs_value = (
446
+ f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
447
+ "/* is_fw_differentiable */ true, "
448
+ f"/* view_func */ func, /* creation_meta */ {creation_meta})"
449
+ )
450
+ else:
451
+ # This could be supported but we don't need it at the moment, so keeping things simple.
452
+ raise RuntimeError(
453
+ "Function that return multiple differentiable output "
454
+ "when at least one of them is view is not supported."
455
+ )
456
+ return call, rhs_value
457
+
458
+
459
+ def modifies_arguments(f: NativeFunction) -> bool:
460
+ return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
461
+
462
+
463
+ @with_native_function_with_differentiability_info
464
+ def emit_inplace_or_view_body(fn: NativeFunctionWithDifferentiabilityInfo) -> List[str]:
465
+ f = fn.func
466
+ inplace_view_body: List[str] = []
467
+
468
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
469
+ dispatcher_exprs = dispatcher_sig.exprs()
470
+
471
+ # code-generated ADInplaceOrView kernels plumb and recompute dispatch keys directly through the kernel for performance.
472
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
473
+ dispatch_key_set = "ks & c10::after_ADInplaceOrView_keyset"
474
+ redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
475
+
476
+ # Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
477
+ # We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal.
478
+ if modifies_arguments(f): # inplace op
479
+ inplace_view_body.append(
480
+ INPLACE_REDISPATCH.substitute(
481
+ unambiguous_name=f.func.name.unambiguous_name(),
482
+ unpacked_args=redispatch_args,
483
+ )
484
+ )
485
+ for r in cpp.return_names(f):
486
+ inplace_view_body.append(f"increment_version({r});")
487
+ else:
488
+ assert get_view_info(f) is not None
489
+ inplace_view_body.append(
490
+ VIEW_REDISPATCH.substitute(
491
+ assign_return_values="auto " + TMP_VAR + " = ",
492
+ unambiguous_name=f.func.name.unambiguous_name(),
493
+ unpacked_args=redispatch_args,
494
+ )
495
+ )
496
+ call, rhs_value = emit_view_body(fn, TMP_VAR)
497
+ inplace_view_body.append(call)
498
+ assert rhs_value is not None
499
+ inplace_view_body.append(
500
+ ASSIGN_RETURN_VALUE.substitute(
501
+ return_values=tie_return_values(f), rhs_value=rhs_value
502
+ )
503
+ )
504
+ if f.func.returns:
505
+ inplace_view_body.append(f"return {get_return_value(f)};")
506
+ return inplace_view_body
507
+
508
+
509
+ @with_native_function
510
+ def gen_formals(f: NativeFunction) -> str:
511
+ return ", ".join(
512
+ # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
513
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
514
+ ["c10::DispatchKeySet ks"]
515
+ + [
516
+ f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
517
+ for a in f.func.schema_order_arguments()
518
+ ]
519
+ )
520
+
521
+
522
+ @with_native_function_with_differentiability_info
523
+ def inplace_or_view_method_definition(
524
+ fn: NativeFunctionWithDifferentiabilityInfo,
525
+ ) -> Optional[str]:
526
+ f = fn.func
527
+ if get_view_info(f) is None and (
528
+ # For functions that modify their inputs but don't return them,
529
+ # we can't give them autograd support.
530
+ # See https://github.com/pytorch/pytorch/issues/53796
531
+ not modifies_arguments(f)
532
+ or len(f.func.returns) == 0
533
+ ):
534
+ return None
535
+ return METHOD_DEFINITION.substitute(
536
+ return_type=cpp.returns_type(f.func.returns, symint=True).cpp_type(),
537
+ type_wrapper_name=type_wrapper_name(f),
538
+ formals=gen_formals(f),
539
+ type_definition_body=emit_inplace_or_view_body(fn),
540
+ )
541
+
542
+
543
+ @with_native_function_with_differentiability_info
544
+ def inplace_or_view_method_registration(
545
+ fn: NativeFunctionWithDifferentiabilityInfo,
546
+ ) -> Optional[str]:
547
+ f = fn.func
548
+ if get_view_info(f) is None and (
549
+ not modifies_arguments(f) or len(f.func.returns) == 0
550
+ ):
551
+ return None
552
+ return WRAPPER_REGISTRATION.substitute(
553
+ unqual_operator_name_with_overload=f.func.name,
554
+ type_wrapper_name=type_wrapper_name(f),
555
+ class_type="ADInplaceOrView",
556
+ )
557
+
558
+
559
+ def use_derived(fn: NativeFunctionWithDifferentiabilityInfo) -> bool:
560
+ f = fn.func
561
+ name = cpp.name(f.func)
562
+ return name not in MANUAL_AUTOGRAD and dispatch_strategy(fn) == "use_derived"
563
+
564
+
565
+ def gen_inplace_or_view_type_env(
566
+ fn: NativeFunctionWithDifferentiabilityInfo,
567
+ ) -> Dict[str, List[str]]:
568
+ definition = inplace_or_view_method_definition(fn)
569
+ registration = inplace_or_view_method_registration(fn)
570
+
571
+ return {
572
+ "ops_headers": (
573
+ [f"#include <ATen/ops/{fn.func.root_name}_ops.h>"]
574
+ if definition is not None
575
+ else []
576
+ ),
577
+ "inplace_or_view_method_definitions": [definition]
578
+ if definition is not None
579
+ else [],
580
+ "inplace_or_view_wrapper_registrations": [registration]
581
+ if registration is not None
582
+ else [],
583
+ }
584
+
585
+
586
+ def gen_inplace_or_view_type(
587
+ out: str,
588
+ native_yaml_path: str,
589
+ tags_yaml_path: str,
590
+ fns_with_infos: List[NativeFunctionWithDifferentiabilityInfo],
591
+ template_path: str,
592
+ ) -> None:
593
+ # NOTE: see Note [Sharded File] at the top of the VariableType.cpp
594
+ # template regarding sharding of the generated files.
595
+ num_shards = 2
596
+
597
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
598
+ fm.write_sharded(
599
+ "ADInplaceOrViewType.cpp",
600
+ [fn for fn in fns_with_infos if use_derived(fn)],
601
+ key_fn=lambda fn: fn.func.root_name,
602
+ base_env={
603
+ "generated_comment": "@"
604
+ + f"generated from {fm.template_dir_for_comments()}/ADInplaceOrViewType.cpp",
605
+ },
606
+ env_callable=gen_inplace_or_view_type_env,
607
+ num_shards=2,
608
+ sharded_keys={
609
+ "ops_headers",
610
+ "inplace_or_view_method_definitions",
611
+ "inplace_or_view_wrapper_registrations",
612
+ },
613
+ )
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_python_functions.py ADDED
@@ -0,0 +1,1377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates Python bindings for ATen functions
2
+ #
3
+ # The bindings are generated as methods on python_variable or functions on the
4
+ # torch._C._nn. torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse
5
+ # or torch._C._special objects.
6
+ #
7
+
8
+ # Code tries to stick to the following rules:
9
+ #
10
+ # - templates should be colocated with the functions that use them.
11
+ # no templates are currently shared between functions, but if that
12
+ # happens, maybe put the template with the first one
13
+ #
14
+ # - don't use environment dictionaries when calling template.substitute().
15
+ # pass named arguments directly for everything, otherwise it's much too
16
+ # hard to track what's actually being used and by who
17
+ #
18
+ # - colocate any new hacks/adjustments with existing ones of the same kind.
19
+ # ideally in a data structure rather than code if possible. See e.g.
20
+ # SCHEMA_DEFAULT_CONVERSION_HACKS, etc.
21
+ #
22
+ # - similarly, conversions from one format to another should ideally happen
23
+ # all at once in a single place.
24
+ #
25
+ # - no nontrivial nested functions. couple-liners are ok but please no more.
26
+ # especially avoid functions that read/write outer variables defined far away.
27
+ #
28
+ # - raise RuntimeError instead of asserting, and put as much
29
+ # information as is available into the message. I.e. no need to
30
+ # plumb in new params whose only purpose is to fill out an error
31
+ # message, but use what's there
32
+ #
33
+
34
+ import itertools
35
+ import re
36
+ from collections import defaultdict
37
+
38
+ from typing import Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple
39
+
40
+ import yaml
41
+ from torchgen.api import cpp
42
+ from torchgen.api.python import (
43
+ arg_parser_output_exprs,
44
+ cpp_dispatch_exprs,
45
+ cpp_dispatch_target,
46
+ dispatch_lambda_args,
47
+ dispatch_lambda_exprs,
48
+ dispatch_lambda_return_str,
49
+ has_tensor_options,
50
+ namedtuple_fieldnames,
51
+ PythonSignature,
52
+ PythonSignatureDeprecated,
53
+ PythonSignatureGroup,
54
+ PythonSignatureNativeFunctionPair,
55
+ signature,
56
+ signature_from_schema,
57
+ )
58
+
59
+ from torchgen.code_template import CodeTemplate
60
+ from torchgen.context import with_native_function
61
+ from torchgen.gen import cpp_string, parse_native_yaml, parse_tags_yaml
62
+ from torchgen.model import (
63
+ Argument,
64
+ BaseOperatorName,
65
+ FunctionSchema,
66
+ NativeFunction,
67
+ Type,
68
+ Variant,
69
+ )
70
+ from torchgen.utils import FileManager, split_name_params
71
+ from torchgen.yaml_utils import YamlLoader
72
+
73
+ from .gen_trace_type import should_trace
74
+
75
+ #
76
+ # declarations blocklist
77
+ # We skip codegen for these functions, for various reasons.
78
+ # Future PRs will categorize this list and eliminate or hoist
79
+ # them out of eager-only codegen.
80
+ # See https://github.com/pytorch/pytorch/issues/30788
81
+ #
82
+
83
+ # These functions require manual Python bindings or are not exposed to Python
84
+ _SKIP_PYTHON_BINDINGS = [
85
+ "alias",
86
+ "contiguous",
87
+ "is_cuda",
88
+ "is_sparse",
89
+ "is_sparse_csr",
90
+ "size",
91
+ "stride",
92
+ "sym_size",
93
+ "sym_stride",
94
+ "sym_storage_offset",
95
+ "sym_numel",
96
+ ".*_backward",
97
+ ".*_backward_(out|input|weight|bias)",
98
+ ".*_forward",
99
+ ".*_forward_out",
100
+ ".*_jvp",
101
+ "_unsafe_view",
102
+ "tensor",
103
+ "_?sparse_(coo|compressed|csr|csc|bsr|bsc)_tensor.*",
104
+ "_range.*",
105
+ "_sparse_add_out",
106
+ "_sparse_div.*",
107
+ "_sparse_mul.*",
108
+ "_sparse_sub.*",
109
+ "_sparse_dense_add_out",
110
+ "index",
111
+ "index_out",
112
+ "unique_dim_consecutive",
113
+ "_cumsum.*",
114
+ "_cumprod.*",
115
+ "_sum.*",
116
+ "_prod.*",
117
+ "_th_.*",
118
+ "_thnn_.*",
119
+ "range.*",
120
+ "_solve.*",
121
+ "_inverse.*",
122
+ "_cholesky.*",
123
+ "_triangular_solve.*",
124
+ "_qr.*",
125
+ "_svd.*",
126
+ "slice",
127
+ "item",
128
+ "_local_scalar_dense",
129
+ "to",
130
+ "_to_copy",
131
+ "_to_copy_out",
132
+ "_reshape_copy",
133
+ "_reshape_copy_out",
134
+ "copy_sparse_to_sparse_",
135
+ "copy_",
136
+ "numpy_T",
137
+ "matrix_H",
138
+ "mT",
139
+ "mH", # these need to be an attributes in Python, not functions
140
+ "nonzero(_(out|numpy))?",
141
+ "set_data",
142
+ ".*_overrideable", # overrideable functions for backend extension
143
+ "data",
144
+ "is_leaf",
145
+ "output_nr",
146
+ "_version",
147
+ "requires_grad_",
148
+ "retains_grad",
149
+ "set_",
150
+ "_fw_primal",
151
+ "fake_quantize_per_tensor_affine_cachemask",
152
+ "fake_quantize_per_channel_affine_cachemask",
153
+ "_new_zeros_with_same_feature_meta",
154
+ "_has_same_storage_numel", # used for forward AD internals
155
+ "_reshape_alias",
156
+ "replace_", # only used by the functionalization pass, doesn't need to be exposed to python
157
+ "copy", # only used by the functionalization pass
158
+ "fill.Tensor", # only used by the functionalization pass
159
+ "fill.Scalar", # only used by the functionalization pass
160
+ "lift.*",
161
+ "normal_functional", # only used by the functionalization pas
162
+ "nbytes",
163
+ "itemsize",
164
+ ]
165
+
166
+ SKIP_PYTHON_BINDINGS = [
167
+ re.compile(rf"^{pattern}$") for pattern in _SKIP_PYTHON_BINDINGS
168
+ ]
169
+
170
+ # These function signatures are not exposed to Python. Note that this signature
171
+ # list does not support regex.
172
+ SKIP_PYTHON_BINDINGS_SIGNATURES = [
173
+ "add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
174
+ "add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
175
+ "sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
176
+ "sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
177
+ "mul.Scalar(Tensor self, Scalar other) -> Tensor",
178
+ "mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
179
+ "div.Scalar(Tensor self, Scalar other) -> Tensor",
180
+ "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
181
+ ]
182
+
183
+
184
+ @with_native_function
185
+ def should_generate_py_binding(f: NativeFunction) -> bool:
186
+ # NativeFunctions that are entirely code-generated should not get python bindings
187
+ # because these codegen implementations are often inefficient. A handful of
188
+ # view_copy style ops were exposed accidentally when they were handwritten and now
189
+ # that we are moving them to codegen for bc reasons we need to keep them exposed in
190
+ # python.
191
+ if "generated" in f.tags and "view_copy" not in f.tags:
192
+ return False
193
+
194
+ name = cpp.name(f.func)
195
+ for skip_regex in SKIP_PYTHON_BINDINGS:
196
+ if skip_regex.match(name):
197
+ return False
198
+
199
+ signature = str(f.func)
200
+ for pattern in SKIP_PYTHON_BINDINGS_SIGNATURES:
201
+ if pattern == signature:
202
+ return False
203
+ return True
204
+
205
+
206
+ def get_pycname(name: BaseOperatorName) -> str:
207
+ return f"THPVariable_{name}"
208
+
209
+
210
+ def is_noarg(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> bool:
211
+ return len(overloads) == 1 and overloads[0].signature.arguments_count() == 0
212
+
213
+
214
+ def is_py_variable_method(f: NativeFunction) -> bool:
215
+ return f.python_module is None and Variant.method in f.variants
216
+
217
+
218
+ def is_py_torch_function(f: NativeFunction) -> bool:
219
+ return f.python_module is None and Variant.function in f.variants
220
+
221
+
222
+ def is_py_nn_function(f: NativeFunction) -> bool:
223
+ return f.python_module == "nn"
224
+
225
+
226
+ def is_py_fft_function(f: NativeFunction) -> bool:
227
+ return f.python_module == "fft"
228
+
229
+
230
+ def is_py_linalg_function(f: NativeFunction) -> bool:
231
+ return f.python_module == "linalg"
232
+
233
+
234
+ def is_py_nested_function(f: NativeFunction) -> bool:
235
+ return f.python_module == "nested"
236
+
237
+
238
+ def is_py_sparse_function(f: NativeFunction) -> bool:
239
+ return f.python_module == "sparse"
240
+
241
+
242
+ def is_py_special_function(f: NativeFunction) -> bool:
243
+ return f.python_module == "special"
244
+
245
+
246
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
247
+ #
248
+ # Main Function
249
+ #
250
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
251
+
252
+
253
+ def gen(
254
+ out: str,
255
+ native_yaml_path: str,
256
+ tags_yaml_path: str,
257
+ deprecated_yaml_path: str,
258
+ template_path: str,
259
+ *,
260
+ symint: bool = True,
261
+ ) -> None:
262
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
263
+ native_functions = parse_native_yaml(
264
+ native_yaml_path, tags_yaml_path
265
+ ).native_functions
266
+ native_functions = list(filter(should_generate_py_binding, native_functions))
267
+
268
+ methods = load_signatures(native_functions, deprecated_yaml_path, method=True)
269
+ create_python_bindings(
270
+ fm,
271
+ methods,
272
+ is_py_variable_method,
273
+ None,
274
+ "python_variable_methods.cpp",
275
+ method=True,
276
+ symint=symint,
277
+ )
278
+
279
+ # NOTE: num_shards here must be synced with gatherTorchFunctions in
280
+ # torch/csrc/autograd/python_torch_functions_manual.cpp
281
+ functions = load_signatures(native_functions, deprecated_yaml_path, method=False)
282
+ create_python_bindings_sharded(
283
+ fm,
284
+ functions,
285
+ is_py_torch_function,
286
+ "torch",
287
+ "python_torch_functions.cpp",
288
+ method=False,
289
+ num_shards=3,
290
+ symint=symint,
291
+ )
292
+
293
+ create_python_bindings(
294
+ fm,
295
+ functions,
296
+ is_py_nn_function,
297
+ "torch.nn",
298
+ "python_nn_functions.cpp",
299
+ method=False,
300
+ symint=symint,
301
+ )
302
+
303
+ create_python_bindings(
304
+ fm,
305
+ functions,
306
+ is_py_fft_function,
307
+ "torch.fft",
308
+ "python_fft_functions.cpp",
309
+ method=False,
310
+ symint=symint,
311
+ )
312
+
313
+ create_python_bindings(
314
+ fm,
315
+ functions,
316
+ is_py_linalg_function,
317
+ "torch.linalg",
318
+ "python_linalg_functions.cpp",
319
+ method=False,
320
+ symint=symint,
321
+ )
322
+
323
+ create_python_bindings(
324
+ fm,
325
+ functions,
326
+ is_py_nested_function,
327
+ "torch.nested",
328
+ "python_nested_functions.cpp",
329
+ method=False,
330
+ )
331
+
332
+ create_python_bindings(
333
+ fm,
334
+ functions,
335
+ is_py_sparse_function,
336
+ "torch.sparse",
337
+ "python_sparse_functions.cpp",
338
+ method=False,
339
+ symint=symint,
340
+ )
341
+
342
+ create_python_bindings(
343
+ fm,
344
+ functions,
345
+ is_py_special_function,
346
+ "torch.special",
347
+ "python_special_functions.cpp",
348
+ method=False,
349
+ symint=symint,
350
+ )
351
+
352
+ # Currently, we only use `functions` to generate `return_types` bindings.
353
+ # All methods which return namedtuple have function variant at this point.
354
+ # If any method only operator with namedtuple is added in the future,
355
+ # we will have to address that.
356
+ create_python_return_type_bindings(
357
+ fm, functions, lambda fn: True, "python_return_types.cpp"
358
+ )
359
+ create_python_return_type_bindings_header(
360
+ fm, functions, lambda fn: True, "python_return_types.h"
361
+ )
362
+
363
+ valid_tags = parse_tags_yaml(tags_yaml_path)
364
+
365
+ def gen_tags_enum() -> Dict[str, str]:
366
+ return {
367
+ "enum_of_valid_tags": (
368
+ "".join(
369
+ [f'\n.value("{tag}", at::Tag::{tag})' for tag in sorted(valid_tags)]
370
+ )
371
+ )
372
+ }
373
+
374
+ fm.write("python_enum_tag.cpp", gen_tags_enum)
375
+
376
+
377
+ def group_filter_overloads(
378
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
379
+ pred: Callable[[NativeFunction], bool],
380
+ ) -> Dict[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]:
381
+ grouped: Dict[
382
+ BaseOperatorName, List[PythonSignatureNativeFunctionPair]
383
+ ] = defaultdict(list)
384
+ for pair in pairs:
385
+ if pred(pair.function):
386
+ grouped[pair.function.func.name.name].append(pair)
387
+ return grouped
388
+
389
+
390
+ def create_python_bindings(
391
+ fm: FileManager,
392
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
393
+ pred: Callable[[NativeFunction], bool],
394
+ module: Optional[str],
395
+ filename: str,
396
+ *,
397
+ method: bool,
398
+ symint: bool = True,
399
+ ) -> None:
400
+ """Generates Python bindings to ATen functions"""
401
+ py_methods: List[str] = []
402
+ ops_headers: List[str] = []
403
+ py_method_defs: List[str] = []
404
+ py_forwards: List[str] = []
405
+
406
+ grouped = group_filter_overloads(pairs, pred)
407
+
408
+ for name in sorted(grouped.keys(), key=str):
409
+ overloads = grouped[name]
410
+ py_methods.append(
411
+ method_impl(name, module, overloads, method=method, symint=symint)
412
+ )
413
+ py_method_defs.append(method_def(name, module, overloads, method=method))
414
+ py_forwards.extend(forward_decls(name, overloads, method=method))
415
+ ops_headers.append(f"#include <ATen/ops/{name.base}.h>")
416
+
417
+ fm.write_with_template(
418
+ filename,
419
+ filename,
420
+ lambda: {
421
+ "generated_comment": "@"
422
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
423
+ "ops_headers": ops_headers,
424
+ "py_forwards": py_forwards,
425
+ "py_methods": py_methods,
426
+ "py_method_defs": py_method_defs,
427
+ },
428
+ )
429
+
430
+
431
+ def create_python_return_type_bindings(
432
+ fm: FileManager,
433
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
434
+ pred: Callable[[NativeFunction], bool],
435
+ filename: str,
436
+ ) -> None:
437
+ """
438
+ Generate function to initialize and return named tuple for native functions
439
+ which returns named tuple and registration invocations in `python_return_types.cpp`.
440
+ """
441
+ py_return_types_definition: List[str] = []
442
+ py_return_types_registrations: List[str] = []
443
+
444
+ grouped = group_filter_overloads(pairs, pred)
445
+
446
+ for name in sorted(grouped.keys(), key=str):
447
+ overloads = grouped[name]
448
+ definitions, registrations = generate_return_type_definition_and_registrations(
449
+ overloads
450
+ )
451
+ py_return_types_definition.append(
452
+ "" if not definitions else "\n".join(definitions)
453
+ )
454
+ py_return_types_registrations.append(
455
+ "" if not registrations else "\n".join(registrations)
456
+ )
457
+
458
+ fm.write_with_template(
459
+ filename,
460
+ filename,
461
+ lambda: {
462
+ "generated_comment": "@"
463
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
464
+ "py_return_types": py_return_types_definition,
465
+ "py_return_types_registrations": py_return_types_registrations,
466
+ },
467
+ )
468
+
469
+
470
+ def create_python_return_type_bindings_header(
471
+ fm: FileManager,
472
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
473
+ pred: Callable[[NativeFunction], bool],
474
+ filename: str,
475
+ ) -> None:
476
+ """
477
+ Generate function to initialize and return named tuple for native functions
478
+ which returns named tuple and relevant entry for the map in `python_return_types.cpp`.
479
+ """
480
+ py_return_types_declarations: List[str] = []
481
+
482
+ grouped = group_filter_overloads(pairs, pred)
483
+
484
+ for name in sorted(grouped.keys(), key=str):
485
+ overloads = grouped[name]
486
+ declarations = generate_return_type_declarations(overloads)
487
+ py_return_types_declarations.append(
488
+ "" if not declarations else "\n".join(declarations)
489
+ )
490
+
491
+ fm.write_with_template(
492
+ filename,
493
+ filename,
494
+ lambda: {
495
+ "generated_comment": "@"
496
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
497
+ "py_return_types_declarations": py_return_types_declarations,
498
+ },
499
+ )
500
+
501
+
502
+ def create_python_bindings_sharded(
503
+ fm: FileManager,
504
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
505
+ pred: Callable[[NativeFunction], bool],
506
+ module: Optional[str],
507
+ filename: str,
508
+ *,
509
+ method: bool,
510
+ num_shards: int,
511
+ symint: bool = True,
512
+ ) -> None:
513
+ """Generates Python bindings to ATen functions"""
514
+ grouped = group_filter_overloads(pairs, pred)
515
+
516
+ def key_func(
517
+ kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
518
+ ) -> str:
519
+ return kv[0].base
520
+
521
+ def env_func(
522
+ kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
523
+ ) -> Dict[str, List[str]]:
524
+ name, fn_pairs = kv
525
+ return {
526
+ "ops_headers": [f"#include <ATen/ops/{name.base}.h>"],
527
+ "py_forwards": list(forward_decls(name, fn_pairs, method=method)),
528
+ "py_methods": [
529
+ method_impl(name, module, fn_pairs, method=method, symint=symint)
530
+ ],
531
+ "py_method_defs": [method_def(name, module, fn_pairs, method=method)],
532
+ }
533
+
534
+ fm.write_sharded(
535
+ filename,
536
+ grouped.items(),
537
+ base_env={
538
+ "generated_comment": "@"
539
+ + f"generated from {fm.template_dir_for_comments()}/{filename}",
540
+ },
541
+ key_fn=key_func,
542
+ env_callable=env_func,
543
+ num_shards=num_shards,
544
+ sharded_keys={"ops_headers", "py_forwards", "py_methods", "py_method_defs"},
545
+ )
546
+
547
+
548
+ def load_signatures(
549
+ native_functions: List[NativeFunction],
550
+ deprecated_yaml_path: str,
551
+ *,
552
+ method: bool,
553
+ skip_deprecated: bool = False,
554
+ pyi: bool = False,
555
+ ) -> Sequence[PythonSignatureNativeFunctionPair]:
556
+ @with_native_function
557
+ def gen_signature_pairs(f: NativeFunction) -> PythonSignatureNativeFunctionPair:
558
+ return PythonSignatureNativeFunctionPair(
559
+ signature=signature(f, method=method, pyi=pyi),
560
+ function=f,
561
+ )
562
+
563
+ pairs = list(map(gen_signature_pairs, native_functions))
564
+ deprecated = load_deprecated_signatures(
565
+ pairs, deprecated_yaml_path, method=method, pyi=pyi
566
+ )
567
+ return pairs if skip_deprecated else pairs + deprecated
568
+
569
+
570
+ def load_deprecated_signatures(
571
+ pairs: Sequence[PythonSignatureNativeFunctionPair],
572
+ deprecated_yaml_path: str,
573
+ *,
574
+ method: bool,
575
+ pyi: bool,
576
+ ) -> List[PythonSignatureNativeFunctionPair]:
577
+ # The deprecated.yaml doesn't have complete type information, we need
578
+ # find and leverage the original ATen signature (to which it delegates
579
+ # the call) to generate the full python signature.
580
+ # We join the deprecated and the original signatures using type-only form.
581
+
582
+ # group the original ATen signatures by name
583
+ grouped: Dict[str, List[PythonSignatureNativeFunctionPair]] = defaultdict(list)
584
+ for pair in pairs:
585
+ grouped[pair.signature.name].append(pair)
586
+
587
+ # find matching original signatures for each deprecated signature
588
+ results: List[PythonSignatureNativeFunctionPair] = []
589
+
590
+ with open(deprecated_yaml_path) as f:
591
+ deprecated_defs = yaml.load(f, Loader=YamlLoader)
592
+
593
+ for deprecated in deprecated_defs:
594
+ schema = FunctionSchema.parse(deprecated["name"])
595
+ aten_name, call_args = split_name_params(deprecated["aten"])
596
+ is_out = aten_name.endswith("_out")
597
+ if is_out:
598
+ aten_name = aten_name.replace("_out", "")
599
+
600
+ # HACK: these are fixed constants used to pass the aten function.
601
+ # The type must be known ahead of time
602
+ known_constants = {
603
+ "1": Type.parse("Scalar"),
604
+ }
605
+ schema_args_by_name = {a.name: a for a in schema.arguments.flat_all}
606
+ for name in call_args:
607
+ assert (
608
+ name in schema_args_by_name or name in known_constants
609
+ ), f"deprecation definiton: Unrecognized value {name}"
610
+
611
+ # Map deprecated signature arguments to their aten signature and test
612
+ # if the types and alias annotation match.
613
+ def is_schema_compatible(
614
+ aten_schema: FunctionSchema,
615
+ ) -> bool:
616
+ arguments: Iterable[Argument]
617
+ if is_out:
618
+ arguments = itertools.chain(
619
+ aten_schema.arguments.out, aten_schema.arguments.flat_non_out
620
+ )
621
+ else:
622
+ arguments = aten_schema.arguments.flat_all
623
+
624
+ for i, arg in enumerate(arguments):
625
+ if i < len(call_args):
626
+ arg_name = call_args[i]
627
+ if arg_name in known_constants:
628
+ schema_type = known_constants[arg_name]
629
+ schema_annotation = None
630
+ else:
631
+ schema_arg = schema_args_by_name[arg_name]
632
+ schema_type = schema_arg.type
633
+ schema_annotation = schema_arg.annotation
634
+
635
+ if schema_type != arg.type or schema_annotation != arg.annotation:
636
+ return False
637
+ else:
638
+ if arg.default is None:
639
+ return False
640
+
641
+ return len(schema.returns) == len(aten_schema.returns) and all(
642
+ a == b for a, b in zip(schema.returns, aten_schema.returns)
643
+ )
644
+
645
+ any_schema_found = False
646
+ for pair in grouped[aten_name]:
647
+ if not is_schema_compatible(pair.function.func):
648
+ continue
649
+ any_schema_found = True
650
+
651
+ python_sig = signature_from_schema(
652
+ schema,
653
+ category_override=pair.function.category_override,
654
+ method=method,
655
+ pyi=pyi,
656
+ )
657
+
658
+ results.append(
659
+ PythonSignatureNativeFunctionPair(
660
+ signature=PythonSignatureDeprecated(
661
+ name=python_sig.name,
662
+ input_args=python_sig.input_args,
663
+ input_kwargs=python_sig.input_kwargs,
664
+ output_args=python_sig.output_args,
665
+ tensor_options_args=python_sig.tensor_options_args,
666
+ method=python_sig.method,
667
+ deprecated_schema=schema,
668
+ deprecated_args_exprs=tuple(call_args),
669
+ returns=python_sig.returns,
670
+ ),
671
+ function=pair.function,
672
+ )
673
+ )
674
+ assert (
675
+ any_schema_found
676
+ ), f"No native function with name {aten_name} matched signature:\n {str(schema)}"
677
+
678
+ return results
679
+
680
+
681
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
682
+ #
683
+ # Named Tuple Codegen
684
+ #
685
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
686
+
687
+
688
+ @with_native_function
689
+ def gen_namedtuple_typename_key(f: NativeFunction) -> str:
690
+ name = cpp.name(f.func)
691
+ fieldnames = namedtuple_fieldnames(f.func.returns)
692
+ return "_".join([name] + fieldnames)
693
+
694
+
695
+ def emit_namedtuple_call(
696
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
697
+ ) -> Tuple[List[str], Dict[str, str]]:
698
+ """
699
+ Generate block of named tuple type def inits, and add typeref snippets
700
+ to declarations that use them
701
+ """
702
+ typenames: Dict[
703
+ str, str
704
+ ] = {} # map from unique name + field name lists to typedef name
705
+ typedefs: List[str] = [] # typedef declarations and init code
706
+
707
+ for overload in overloads:
708
+ fieldnames = namedtuple_fieldnames(overload.function.func.returns)
709
+ if not fieldnames:
710
+ continue
711
+
712
+ name = cpp.name(overload.function.func) # use @with_native_function?
713
+ tn_key = gen_namedtuple_typename_key(overload.function)
714
+ typename = typenames.get(tn_key)
715
+ if typename is None:
716
+ typename = f'NamedTuple{"" if not typedefs else len(typedefs)}'
717
+ typenames[tn_key] = typename
718
+ typedefs.append(
719
+ f"""\
720
+ static PyTypeObject* {typename} = generated::get_{name}_namedtuple();"""
721
+ )
722
+
723
+ return typedefs, typenames
724
+
725
+
726
+ def generate_return_type_definition_and_registrations(
727
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
728
+ ) -> Tuple[List[str], List[str]]:
729
+ """
730
+ Generate block of function in `python_return_types.cpp` to initialize
731
+ and return named tuple for a native function which returns named tuple
732
+ and registration invocations in same file.
733
+ """
734
+ typenames: Dict[
735
+ str, str
736
+ ] = {} # map from unique name + field name lists to typedef name
737
+ definitions: List[str] = [] # function definition to register the typedef
738
+ registrations: List[str] = [] # register call for the typedef
739
+
740
+ for overload in overloads:
741
+ fieldnames = namedtuple_fieldnames(overload.function.func.returns)
742
+ if not fieldnames:
743
+ continue
744
+
745
+ fields = ", ".join(f'{{"{fn}", ""}}' for fn in fieldnames)
746
+
747
+ name = cpp.name(overload.function.func) # use @with_native_function?
748
+ tn_key = gen_namedtuple_typename_key(overload.function)
749
+ typename = typenames.get(tn_key)
750
+
751
+ if typename is None:
752
+ typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}'
753
+ typenames[tn_key] = typename
754
+ definitions.append(
755
+ f"""\
756
+ PyTypeObject* get_{name}_namedtuple() {{
757
+ static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }};
758
+ static PyTypeObject {typename};
759
+ static bool is_initialized = false;
760
+ static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }};
761
+ if (!is_initialized) {{
762
+ PyStructSequence_InitType(&{typename}, &desc);
763
+ {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
764
+ is_initialized = true;
765
+ }}
766
+ return &{typename};
767
+ }}
768
+ """
769
+ )
770
+ registrations.append(
771
+ f'addReturnType(return_types_module, "{name}", generated::get_{name}_namedtuple());'
772
+ )
773
+
774
+ return definitions, registrations
775
+
776
+
777
+ def generate_return_type_declarations(
778
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
779
+ ) -> List[str]:
780
+ """
781
+ Generate block of function declarations in `python_return_types.h` to initialize
782
+ and return named tuple for a native function.
783
+ """
784
+ typenames: Dict[
785
+ str, str
786
+ ] = {} # map from unique name + field name lists to typedef name
787
+ declarations: List[str] = [] # function declaration to register the typedef
788
+
789
+ for overload in overloads:
790
+ fieldnames = namedtuple_fieldnames(overload.function.func.returns)
791
+ if not fieldnames:
792
+ continue
793
+
794
+ name = cpp.name(overload.function.func) # use @with_native_function?
795
+ tn_key = gen_namedtuple_typename_key(overload.function)
796
+ typename = typenames.get(tn_key)
797
+
798
+ if typename is None:
799
+ typename = (
800
+ f'{name}NamedTuple{"" if not declarations else len(declarations)}'
801
+ )
802
+ typenames[tn_key] = typename
803
+ declarations.append(f"PyTypeObject* get_{name}_namedtuple();")
804
+
805
+ return declarations
806
+
807
+
808
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
809
+ #
810
+ # Method Impl Codegen
811
+ #
812
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
813
+
814
+ # python binding for all overloads of a particular function/method
815
+ PY_VARIABLE_METHOD_VARARGS = CodeTemplate(
816
+ r"""\
817
+ // ${name}
818
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
819
+ {
820
+ ${method_header}
821
+ static PythonArgParser parser({
822
+ ${signatures}
823
+ }, /*traceable=*/${traceable});
824
+
825
+ ParsedArgs<${max_args}> parsed_args;
826
+ auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
827
+ ${check_has_torch_function}
828
+ switch (_r.idx) {
829
+ ${dispatch}
830
+ }
831
+ ${method_footer}
832
+ }
833
+
834
+ """
835
+ )
836
+
837
+ # handler for a single parsed signature - may be a single overload or
838
+ # a pair of overloads that whose signatures only differ in output params
839
+ # (plugged into PY_VARIABLE_METHOD_VARARGS as an item in ${dispatch})
840
+ PY_VARIABLE_CASE = CodeTemplate(
841
+ """\
842
+ case ${overload_index}: {
843
+ ${body}
844
+ }
845
+ """
846
+ )
847
+
848
+ # python binding for single-overload function/method
849
+ PY_VARIABLE_METHOD_VARARGS_SINGLETON = CodeTemplate(
850
+ """\
851
+ // ${name}
852
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
853
+ {
854
+ ${method_header}
855
+ static PythonArgParser parser({
856
+ ${signatures}
857
+ }, /*traceable=*/${traceable});
858
+
859
+ ParsedArgs<${max_args}> parsed_args;
860
+ auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
861
+ ${check_has_torch_function}
862
+ ${dispatch}
863
+ ${method_footer}
864
+ }
865
+
866
+ """
867
+ )
868
+
869
+ # python binding for a method with no args, shortcuts parsing
870
+ PY_VARIABLE_METHOD_NOARGS = CodeTemplate(
871
+ """\
872
+ // ${name}
873
+ static PyObject * ${pycname}(PyObject* self_, PyObject* args)
874
+ {
875
+ ${method_header}
876
+ ${check_has_torch_function}
877
+ ${dispatch}
878
+ ${method_footer}
879
+ }
880
+
881
+ """
882
+ )
883
+
884
+
885
+ def method_impl(
886
+ name: BaseOperatorName,
887
+ module: Optional[str],
888
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
889
+ *,
890
+ method: bool,
891
+ symint: bool = True,
892
+ ) -> str:
893
+ """
894
+ Generate a python binding for all overloads of an op.
895
+ """
896
+ pycname = get_pycname(name)
897
+ noarg = is_noarg(overloads)
898
+ namedtuple_inits, namedtuple_typenames = emit_namedtuple_call(overloads)
899
+
900
+ method_header = ["HANDLE_TH_ERRORS"]
901
+ method_header += namedtuple_inits
902
+ method_header += (
903
+ ["const Tensor& self = THPVariable_Unpack(self_);"] if method else []
904
+ )
905
+
906
+ method_footer = ([] if noarg else ["Py_RETURN_NONE;"]) + ["END_HANDLE_TH_ERRORS"]
907
+
908
+ traceable = "true" if all(should_trace(o.function) for o in overloads) else "false"
909
+
910
+ grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads(
911
+ overloads, symint=symint
912
+ )
913
+ is_singleton = len(grouped_overloads) == 1
914
+ signatures: List[str] = []
915
+ dispatch: List[str] = []
916
+ for overload_index, overload in enumerate(grouped_overloads):
917
+ signature = overload.signature.signature_str(symint=symint)
918
+ signatures.append(f"{cpp_string(str(signature))},")
919
+ dispatch_body = emit_dispatch_case(
920
+ overload, namedtuple_typenames, symint=symint
921
+ )
922
+ dispatch.append(
923
+ PY_VARIABLE_CASE.substitute(
924
+ overload_index=overload_index, body=dispatch_body
925
+ )
926
+ if not is_singleton
927
+ else dispatch_body
928
+ )
929
+
930
+ if noarg:
931
+ template = PY_VARIABLE_METHOD_NOARGS
932
+ elif is_singleton:
933
+ template = PY_VARIABLE_METHOD_VARARGS_SINGLETON
934
+ else:
935
+ template = PY_VARIABLE_METHOD_VARARGS
936
+
937
+ return template.substitute(
938
+ name=name,
939
+ pycname=pycname,
940
+ method_header=method_header,
941
+ max_args=max(o.signature.arguments_count() for o in overloads),
942
+ signatures=signatures,
943
+ traceable=traceable,
944
+ check_has_torch_function=gen_has_torch_function_check(
945
+ name=name,
946
+ module=module,
947
+ noarg=noarg,
948
+ method=method,
949
+ ),
950
+ dispatch=dispatch,
951
+ method_footer=method_footer,
952
+ self_="self_" if method else "nullptr",
953
+ )
954
+
955
+
956
+ def gen_has_torch_function_check(
957
+ name: BaseOperatorName, module: Optional[str], *, noarg: bool, method: bool
958
+ ) -> str:
959
+ if noarg:
960
+ if method:
961
+ return f"""\
962
+ if(check_has_torch_function(self_)) {{
963
+ return handle_torch_function(self_, "{name}");
964
+ }}
965
+ """
966
+ else:
967
+ return ""
968
+
969
+ self_ = "self_" if method else "nullptr"
970
+ namespace = (
971
+ {
972
+ "torch": "THPVariableFunctionsModule",
973
+ "torch.nn": "THPNNVariableFunctionsModule",
974
+ "torch.fft": "THPFFTVariableFunctionsModule",
975
+ "torch.linalg": "THPLinalgVariableFunctionsModule",
976
+ "torch.nested": "THPNestedVariableFunctionsModule",
977
+ "torch.sparse": "THPSparseVariableFunctionsModule",
978
+ "torch.special": "THPSpecialVariableFunctionsModule",
979
+ }[module]
980
+ if module
981
+ else "THPVariableClass"
982
+ )
983
+
984
+ return f"""\
985
+ if(_r.has_torch_function()) {{
986
+ return handle_torch_function(_r, {self_}, args, kwargs, {namespace}, "{module or "torch.Tensor"}");
987
+ }}
988
+ """
989
+
990
+
991
+ # handler for output/no-output overload pair
992
+ PY_VARIABLE_OUT = CodeTemplate(
993
+ """\
994
+ if (_r.isNone(${out_idx})) {
995
+ ${call_dispatch}
996
+ } else {
997
+ ${call_dispatch_out}
998
+ }
999
+ """
1000
+ )
1001
+
1002
+
1003
+ def emit_dispatch_case(
1004
+ overload: PythonSignatureGroup,
1005
+ namedtuple_typenames: Dict[str, str],
1006
+ *,
1007
+ symint: bool = True,
1008
+ ) -> str:
1009
+ """
1010
+ Emit dispatch code for a single parsed signature. This corresponds to either
1011
+ a single native function, or a pair that differ only in output params. In the
1012
+ latter case, a single python signature is used for both and dispatching
1013
+ switches on the presence/absence of passed output args.
1014
+ """
1015
+ if overload.outplace is not None:
1016
+ # dispatch output and no-output variants, branch on _r.isNone(<out_idx>)
1017
+ return PY_VARIABLE_OUT.substitute(
1018
+ out_idx=overload.signature.output_idx(),
1019
+ call_dispatch=emit_single_dispatch(
1020
+ overload.signature, overload.base, namedtuple_typenames, symint=symint
1021
+ ),
1022
+ call_dispatch_out=emit_single_dispatch(
1023
+ overload.signature,
1024
+ overload.outplace,
1025
+ namedtuple_typenames,
1026
+ symint=symint,
1027
+ ),
1028
+ )
1029
+ else:
1030
+ # no-output version only
1031
+ return emit_single_dispatch(
1032
+ overload.signature, overload.base, namedtuple_typenames, symint=symint
1033
+ )
1034
+
1035
+
1036
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1037
+ #
1038
+ # Forward Declarations Codegen
1039
+ #
1040
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1041
+
1042
+
1043
+ def forward_decls(
1044
+ name: BaseOperatorName,
1045
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
1046
+ *,
1047
+ method: bool,
1048
+ ) -> Tuple[str, ...]:
1049
+ if method:
1050
+ return ()
1051
+
1052
+ pycname = get_pycname(name)
1053
+ if is_noarg(overloads):
1054
+ return (
1055
+ f"""\
1056
+ static PyObject * {pycname}(PyObject* self_, PyObject* args);
1057
+ """,
1058
+ )
1059
+ else:
1060
+ return (
1061
+ f"""\
1062
+ static PyObject * {pycname}(PyObject* self_, PyObject* args, PyObject* kwargs);
1063
+ """,
1064
+ )
1065
+
1066
+
1067
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1068
+ #
1069
+ # Method Def (Binding Table Entry) Codegen
1070
+ #
1071
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1072
+
1073
+
1074
+ def method_def(
1075
+ name: BaseOperatorName,
1076
+ module: Optional[str],
1077
+ overloads: Sequence[PythonSignatureNativeFunctionPair],
1078
+ *,
1079
+ method: bool,
1080
+ ) -> str:
1081
+ """
1082
+ Generate method def entry.
1083
+ """
1084
+ pycname = get_pycname(name)
1085
+
1086
+ if name.dunder_method:
1087
+ # PyMethodDef entry for binary op, throws not implemented error
1088
+ pycname = f"TypeError_to_NotImplemented_<{pycname}>"
1089
+
1090
+ if is_noarg(overloads):
1091
+ flags = "METH_NOARGS" if method else "METH_VARARGS | METH_KEYWORDS"
1092
+ else:
1093
+ pycname = f"castPyCFunctionWithKeywords({pycname})"
1094
+ flags = "METH_VARARGS | METH_KEYWORDS"
1095
+
1096
+ if module == "torch":
1097
+ flags += " | METH_STATIC"
1098
+
1099
+ return f'{{"{name}", {pycname}, {flags}, NULL}},'
1100
+
1101
+
1102
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1103
+ #
1104
+ # Overload Sorting and Grouping
1105
+ #
1106
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1107
+
1108
+
1109
+ def group_overloads(
1110
+ overloads: Sequence[PythonSignatureNativeFunctionPair], *, symint: bool = True
1111
+ ) -> Sequence[PythonSignatureGroup]:
1112
+ bases: Dict[str, PythonSignatureNativeFunctionPair] = {}
1113
+ outplaces: Dict[str, PythonSignatureNativeFunctionPair] = {}
1114
+
1115
+ # first group by signature ignoring out arguments
1116
+ for overload in overloads:
1117
+ sig = overload.signature.signature_str(skip_outputs=True, symint=symint)
1118
+ if overload.function.func.is_out_fn():
1119
+ if sig in outplaces:
1120
+ raise RuntimeError(
1121
+ f"Found duplicated function definition:\n- {overload.function.func}.\n"
1122
+ f"Existing definition:\n- {outplaces[sig].function.func}."
1123
+ )
1124
+ outplaces[sig] = overload
1125
+ else:
1126
+ if sig in bases:
1127
+ raise RuntimeError(
1128
+ f"Found duplicated function definition:\n- {overload.function.func}.\n"
1129
+ f"Existing definition:\n- {bases[sig].function.func}."
1130
+ )
1131
+ bases[sig] = overload
1132
+
1133
+ for sig, out in outplaces.items():
1134
+ if sig not in bases:
1135
+ candidates: List[str] = []
1136
+ for overload in overloads:
1137
+ if (
1138
+ str(overload.function.func.name.name)
1139
+ == str(out.function.func.name.name)
1140
+ and not overload.function.func.is_out_fn()
1141
+ and not overload.signature.deprecated
1142
+ ):
1143
+ candidates.append(
1144
+ overload.signature.signature_str(
1145
+ skip_outputs=True, symint=symint
1146
+ )
1147
+ )
1148
+ out_sig = out.signature.signature_str(symint=symint)
1149
+ raise RuntimeError(
1150
+ f"While identifying overloads, we found an out schema {out_sig} without a corresponding non-out variant. "
1151
+ f"We expected the non-out variant to have schema: \n- {sig}\nPlease check that you spelled the schema "
1152
+ "correctly in native_functions.yaml. We discovered the following candidate(s): \n"
1153
+ + "\n".join(f"- {candidate}" for candidate in candidates)
1154
+ )
1155
+
1156
+ grouped = [
1157
+ PythonSignatureGroup.from_pairs(
1158
+ functional=base,
1159
+ out=outplaces.get(sig),
1160
+ )
1161
+ for sig, base in bases.items()
1162
+ ]
1163
+ return sort_overloads(grouped, symint=symint)
1164
+
1165
+
1166
+ # This function declares a partial order on declarations, and sorts them according
1167
+ # to its linear extension. This is necessary, because there's some ambiguity in the
1168
+ # choice of overload, and we want a different order.
1169
+ #
1170
+ # See Note[Order of overloads matters]
1171
+ #
1172
+ # A few examples of ambiguous python signature pairs.
1173
+ #
1174
+ # All parameters have the same type, except one taking Tensor the other taking
1175
+ # Scalar. A numeric PyObject can be casted into Tensor, and a zero-dim Tensor
1176
+ # object can be accepted as Scalar type parameter (see python_arg_parser.cpp).
1177
+ # Therefore, same input arguments might be accepted by either python signature.
1178
+ # We want to always parse the one taking Tensor first.
1179
+ #
1180
+ # bitwise_and(Tensor input, Tensor other, *, Tensor out=None)
1181
+ # bitwise_and(Tensor input, Scalar other, *, Tensor out=None)
1182
+ #
1183
+ # If they have different number of parameters then they are not ambiguous - but
1184
+ # the difference on output param can be ignored as it's optional.
1185
+ #
1186
+ # multiply(Tensor input, Tensor other, *, Tensor out=None)
1187
+ # multiply(Tensor input, Scalar other)
1188
+ #
1189
+ # Both positional args and keyword-only args are considered together.
1190
+ #
1191
+ # subtract(Tensor other, *, Scalar alpha=1)
1192
+ # subtract(Scalar other, Scalar alpha=1)
1193
+ #
1194
+ # A few ambiguous cases which it does NOT handle yet.
1195
+ #
1196
+ # If there is any difference in other parameters besides the Tensor/Scalar
1197
+ # difference, then they are not considered ambiguous by this method anymore.
1198
+ # However, the difference could be too trivial to disambiguate.
1199
+ #
1200
+ # foo(Tensor input, Scalar other, Scalar bar)
1201
+ # foo(Tensor input, Tensor other, double bar)
1202
+ #
1203
+ # If they are taking different number of parameters then they are not considered
1204
+ # ambiguous anymore, even if the difference is only on optional kwargs.
1205
+ #
1206
+ # foo(Scalar other, Scalar alpha=1)
1207
+ # foo(Tensor other, *, Scalar alpha=1, Scalar beta=1)
1208
+ #
1209
+
1210
+
1211
+ def sort_overloads(
1212
+ grouped_overloads: Sequence[PythonSignatureGroup], *, symint: bool = True
1213
+ ) -> Sequence[PythonSignatureGroup]:
1214
+ # NB: Smaller here means lower priority
1215
+
1216
+ def is_arg_smaller(t1: Type, t2: Type) -> bool:
1217
+ return (
1218
+ str(t1) == "Scalar"
1219
+ and str(t2) == "Tensor"
1220
+ or str(t1) == "Scalar?"
1221
+ and str(t2) == "Tensor?"
1222
+ or "Dimname" in str(t1)
1223
+ and "Dimname" not in str(t2)
1224
+ or
1225
+ # In the discussion https://github.com/pytorch/pytorch/issues/54555 it has been
1226
+ # discussed why it is important to prioritize int/int? over int[]
1227
+ str(t1) == "int[]"
1228
+ and (str(t2) == "int" or str(t2) == "int?")
1229
+ or
1230
+ # TensorList currently throws an error during argument parsing, that's why it needs to be
1231
+ # last in signature ordering. See discussion: https://github.com/pytorch/pytorch/issues/58087
1232
+ str(t1) == "Tensor[]"
1233
+ and str(t2).find("[]") != -1
1234
+ or
1235
+ # Prioritize IntArrayRef overload over SymIntArrayRef
1236
+ str(t1) == "SymInt[]"
1237
+ and str(t2) == "int[]"
1238
+ or
1239
+ # Make sure both in, SymInt are sorted consistently w.r.t. Tensor since Tensor can be implicitly
1240
+ # converted to either int or SymInt. Prioritize the Tensor overload since it otherwise gets shadowed.
1241
+ (str(t1) == "SymInt" or str(t1) == "int")
1242
+ and str(t2) == "Tensor"
1243
+ )
1244
+
1245
+ def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool:
1246
+ """Returns True if s1 < s2 in the partial order."""
1247
+ args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True)
1248
+ if len(args1) != len(args2):
1249
+ return False
1250
+ # TODO: should use some canonical form instead of 'str(arg.type)' - see comments
1251
+ # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which
1252
+ # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'.
1253
+ equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2))
1254
+ smaller_or_equal = all(
1255
+ str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type)
1256
+ for arg1, arg2 in zip(args1, args2)
1257
+ )
1258
+ return smaller_or_equal and not equal
1259
+
1260
+ # First sort by signature
1261
+ grouped_overloads = sorted(
1262
+ grouped_overloads, key=lambda x: x.signature.signature_str(symint=symint)
1263
+ )
1264
+
1265
+ # Construct the relation graph
1266
+ larger_than: Dict[int, Set[int]] = defaultdict(set)
1267
+ for i1, overload1 in enumerate(grouped_overloads):
1268
+ for i2, overload2 in enumerate(grouped_overloads):
1269
+ if is_smaller(overload1.signature, overload2.signature):
1270
+ larger_than[i1].add(i2)
1271
+
1272
+ if not larger_than:
1273
+ return list(grouped_overloads)
1274
+
1275
+ # Use a topological sort to sort overloads according to the partial order.
1276
+ N = len(grouped_overloads)
1277
+ sorted_ids: List[int] = list(filter(lambda x: x not in larger_than, range(N)))
1278
+
1279
+ for idx in range(N):
1280
+ # The size of sorted_ids will grow to N eventually.
1281
+ i = sorted_ids[idx]
1282
+ for j in sorted(larger_than.keys()):
1283
+ larger = larger_than[j]
1284
+ larger.discard(i)
1285
+ if not larger:
1286
+ del larger_than[j]
1287
+ sorted_ids.append(j)
1288
+
1289
+ return [grouped_overloads[x] for x in sorted_ids]
1290
+
1291
+
1292
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1293
+ #
1294
+ # Codegen API Integration
1295
+ #
1296
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
1297
+
1298
+
1299
+ def emit_single_dispatch(
1300
+ ps: PythonSignature,
1301
+ f: NativeFunction,
1302
+ namedtuple_typenames: Dict[str, str],
1303
+ *,
1304
+ symint: bool = True,
1305
+ ) -> str:
1306
+ """
1307
+ Emit dispatch code for a single native function.
1308
+ """
1309
+
1310
+ @with_native_function
1311
+ def go(f: NativeFunction) -> str:
1312
+ # header comments
1313
+ if isinstance(ps, PythonSignatureDeprecated):
1314
+ schema_comment = f"// [deprecated] aten::{ps.deprecated_schema}"
1315
+ else:
1316
+ schema_comment = f"// aten::{f.func}"
1317
+
1318
+ deprecated = "[deprecated] " if ps.deprecated else ""
1319
+
1320
+ # dispatch lambda signature
1321
+ name = cpp.name(f.func)
1322
+ lambda_formals = ", ".join(
1323
+ f"{a.type_str} {a.name}" for a in dispatch_lambda_args(ps, f, symint=symint)
1324
+ )
1325
+ lambda_return = dispatch_lambda_return_str(f)
1326
+
1327
+ # dispatch lambda body
1328
+ dispatch_callee = cpp_dispatch_target(f)
1329
+ dispatch_args = ", ".join(cpp_dispatch_exprs(f, python_signature=ps))
1330
+
1331
+ # from arg parser outputs to dispatch lambda arguments
1332
+ parser_outputs = arg_parser_output_exprs(ps, f, symint=symint)
1333
+ lambda_arg_exprs = dispatch_lambda_exprs(ps, f, symint=symint)
1334
+ inits = "\n".join(lambda_arg_exprs.inits)
1335
+ lambda_args = ", ".join(lambda_arg_exprs.exprs)
1336
+
1337
+ # scatter fields
1338
+ # TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky
1339
+ # solution for enabling the 'requires_grad' argument for tensor methods
1340
+ # new_full, new_empty, and new_zeros. A much better but more difficult to
1341
+ # implement solution involves refactoring according to Ed's description here:
1342
+ # https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589
1343
+ need_set_requires_grad = ps.tensor_options_args and (
1344
+ not has_tensor_options(f)
1345
+ or (ps.method and ("requires_grad" in parser_outputs))
1346
+ )
1347
+ set_requires_grad = (
1348
+ f'.set_requires_grad({parser_outputs["requires_grad"].expr})'
1349
+ if need_set_requires_grad
1350
+ else ""
1351
+ )
1352
+
1353
+ if lambda_return == "void":
1354
+ return f"""\
1355
+ {schema_comment}
1356
+ {inits}
1357
+ auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
1358
+ pybind11::gil_scoped_release no_gil;
1359
+ {dispatch_callee}({dispatch_args});
1360
+ }};
1361
+ dispatch_{name}({lambda_args}){set_requires_grad};
1362
+ Py_RETURN_NONE;
1363
+ """
1364
+ else:
1365
+ typename = namedtuple_typenames.get(gen_namedtuple_typename_key(f))
1366
+ namedtuple_typeref = f"{typename}, " if typename is not None else ""
1367
+ return f"""\
1368
+ {schema_comment}
1369
+ {inits}
1370
+ auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
1371
+ pybind11::gil_scoped_release no_gil;
1372
+ return {dispatch_callee}({dispatch_args});
1373
+ }};
1374
+ return wrap({namedtuple_typeref}dispatch_{name}({lambda_args}){set_requires_grad});
1375
+ """
1376
+
1377
+ return go(f)
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_factories.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates C++ functions that wrap ATen tensor factory methods to turn them into Variables.
2
+ #
3
+ # This writes one file: variable_factories.h
4
+
5
+ import re
6
+ from typing import List, Optional
7
+
8
+ import torchgen.api.python as python
9
+ from torchgen.api import cpp
10
+
11
+ from torchgen.api.types import CppSignatureGroup
12
+ from torchgen.context import with_native_function
13
+ from torchgen.gen import parse_native_yaml
14
+ from torchgen.model import NativeFunction, TensorOptionsArguments, Variant
15
+ from torchgen.utils import FileManager, mapMaybe
16
+
17
+ OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
18
+ TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")
19
+
20
+
21
+ # Add 'at::' to types defined in ATen namespace, e.g. Tensor, TensorList, IntArrayRef and etc.
22
+ # TODO: maybe update the cpp argument API to take optional namespace argument?
23
+ def fully_qualified_type(argument_type: str) -> str:
24
+ def maybe_optional_type(type: str, is_opt: bool) -> str:
25
+ return f"c10::optional<{type}>" if is_opt else type
26
+
27
+ opt_match = OPTIONAL_TYPE_PATTERN.match(argument_type)
28
+ is_opt = opt_match is not None
29
+ if opt_match:
30
+ argument_type = argument_type[opt_match.start(1) : opt_match.end(1)]
31
+ match = TYPE_PATTERN.match(argument_type)
32
+ if match is None:
33
+ return maybe_optional_type(argument_type, is_opt)
34
+ index = match.start(1)
35
+ qualified_type = f"{argument_type[:index]}at::{argument_type[index:]}"
36
+ return maybe_optional_type(qualified_type, is_opt)
37
+
38
+
39
+ def gen_variable_factories(
40
+ out: str, native_yaml_path: str, tags_yaml_path: str, template_path: str
41
+ ) -> None:
42
+ native_functions = parse_native_yaml(
43
+ native_yaml_path, tags_yaml_path
44
+ ).native_functions
45
+ factory_functions = [fn for fn in native_functions if is_factory_function(fn)]
46
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
47
+ fm.write_with_template(
48
+ "variable_factories.h",
49
+ "variable_factories.h",
50
+ lambda: {
51
+ "generated_comment": "@"
52
+ + f"generated from {fm.template_dir_for_comments()}/variable_factories.h",
53
+ "ops_headers": [
54
+ f"#include <ATen/ops/{fn.root_name}.h>" for fn in factory_functions
55
+ ],
56
+ "function_definitions": list(mapMaybe(process_function, factory_functions)),
57
+ },
58
+ )
59
+
60
+
61
+ @with_native_function
62
+ def is_factory_function(f: NativeFunction) -> bool:
63
+ if Variant.function not in f.variants:
64
+ return False
65
+
66
+ name = cpp.name(f.func)
67
+ has_tensor_options = python.has_tensor_options(f)
68
+ return has_tensor_options or name.endswith("_like")
69
+
70
+
71
+ @with_native_function
72
+ def process_function(f: NativeFunction) -> Optional[str]:
73
+ name = cpp.name(f.func)
74
+ has_tensor_options = python.has_tensor_options(f)
75
+ is_factory = has_tensor_options or name.endswith("_like")
76
+
77
+ if Variant.function not in f.variants or not is_factory:
78
+ return None
79
+
80
+ cpp_sigs = CppSignatureGroup.from_native_function(f, method=False)
81
+ sigs = [cpp_sigs.signature]
82
+ if cpp_sigs.symint_signature is not None:
83
+ sigs.append(cpp_sigs.symint_signature)
84
+ r = ""
85
+ for sig in sigs:
86
+ formals: List[str] = []
87
+ exprs: List[str] = []
88
+ requires_grad = "false"
89
+ for arg in sig.arguments():
90
+ qualified_type = fully_qualified_type(arg.type)
91
+ if arg.default:
92
+ formals.append(f"{qualified_type} {arg.name} = {arg.default}")
93
+ else:
94
+ formals.append(f"{qualified_type} {arg.name}")
95
+
96
+ if isinstance(arg.argument, TensorOptionsArguments):
97
+ # note: we remove the requires_grad setting from the TensorOptions because
98
+ # it is ignored anyways (and we actually have an assertion that it isn't set
99
+ # which would fail otherwise). We handle requires_grad explicitly here
100
+ # instead of passing it through to the kernel.
101
+ exprs.append(
102
+ f"at::TensorOptions({arg.name}).requires_grad(c10::nullopt)"
103
+ )
104
+ # Manually set the requires_grad bit on the result tensor.
105
+ requires_grad = f"{arg.name}.requires_grad()"
106
+ else:
107
+ exprs.append(arg.name)
108
+
109
+ r += f"""\
110
+ inline at::Tensor {sig.name()}({', '.join(formals)}) {{
111
+ at::AutoDispatchBelowADInplaceOrView guard;
112
+ return autograd::make_variable(at::{sig.name()}({', '.join(exprs)}), /*requires_grad=*/{requires_grad});
113
+ }}
114
+ """
115
+ return r
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/gen_variable_type.py ADDED
@@ -0,0 +1,2164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generates VariableType.h/cpp
2
+ #
3
+ # **If any changes are being made to the VariableType codegen please also check
4
+ # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
5
+ #
6
+ # VariableType is a subclass of at::Type that provides the binding code
7
+ # necessary to provide a differentiable version of ATen operators. There are a
8
+ # number of different things we could mean:
9
+ #
10
+ # - Given a non-differentiable forward implementation, we might
11
+ # directly associate it with a backward implementation to make
12
+ # it differentiable. This is the common case.
13
+ #
14
+ # - Some functions don't need a backwards implementation, because
15
+ # backpropagation will never propagate beyond them. There are a
16
+ # number of different reasons why this may be the case:
17
+ #
18
+ # - The function has no differentiable inputs
19
+ # - The function's output is not differentiable
20
+ # - The function has no data dependency on its input
21
+ #
22
+ # - Some function don't need a backwards implementation because they
23
+ # are implemented as a composition of other (differentiable) ATen
24
+ # functions. These are dispatched directly to the Type superclass,
25
+ # which will in turn dispatch back to VariableType for its
26
+ # differentiable subcomponents.
27
+ #
28
+ import re
29
+ from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
30
+
31
+ from torchgen.api import cpp
32
+ from torchgen.api.autograd import (
33
+ DifferentiableInput,
34
+ dispatch_strategy,
35
+ ForwardDerivative,
36
+ gen_differentiable_outputs,
37
+ is_differentiable,
38
+ NativeFunctionWithDifferentiabilityInfo,
39
+ SavedAttribute,
40
+ )
41
+
42
+ from torchgen.api.types import (
43
+ ArrayRefCType,
44
+ BaseCppType,
45
+ BaseCType,
46
+ Binding,
47
+ DispatcherSignature,
48
+ intArrayRefT,
49
+ iTensorListRefT,
50
+ ListCType,
51
+ MutRefCType,
52
+ OptionalCType,
53
+ scalarT,
54
+ SpecialArgName,
55
+ stringT,
56
+ symIntArrayRefT,
57
+ TENSOR_LIST_LIKE_CTYPES,
58
+ tensorListT,
59
+ tensorT,
60
+ TupleCType,
61
+ VectorCType,
62
+ )
63
+ from torchgen.code_template import CodeTemplate
64
+ from torchgen.context import (
65
+ native_function_manager,
66
+ with_native_function,
67
+ with_native_function_and,
68
+ )
69
+ from torchgen.model import (
70
+ Argument,
71
+ BaseType,
72
+ ListType,
73
+ NativeFunction,
74
+ SchemaKind,
75
+ SelfArgument,
76
+ TensorOptionsArguments,
77
+ )
78
+ from torchgen.utils import FileManager, mapMaybe
79
+
80
+ from .context import with_native_function_with_differentiability_info_and_key
81
+ from .gen_inplace_or_view_type import (
82
+ ALL_VIEW_FUNCTIONS,
83
+ ASSIGN_RETURN_VALUE,
84
+ AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION,
85
+ gen_formals,
86
+ get_base_name,
87
+ get_view_info,
88
+ is_tensor_list_type,
89
+ is_tensor_type,
90
+ METHOD_DEFINITION,
91
+ modifies_arguments,
92
+ TMP_VAR,
93
+ unpack_args,
94
+ unpacked_name,
95
+ use_derived,
96
+ WRAPPER_REGISTRATION,
97
+ )
98
+ from .gen_trace_type import (
99
+ declare_returned_variables,
100
+ get_return_value,
101
+ MANUAL_AUTOGRAD_AND_TRACER,
102
+ MANUAL_BACKEND,
103
+ tie_return_values,
104
+ type_wrapper_name,
105
+ )
106
+
107
+ # We don't set or modify grad_fn on these methods. Generally, they return
108
+ # tensors that have requires_grad=False. In-place functions listed here will
109
+ # not examine or modify requires_grad or grad_fn.
110
+ # NB: this does NOT include overload name
111
+ DONT_REQUIRE_DERIVATIVE = {
112
+ # These only depend on the input Tensor's shape and device, not the data
113
+ "empty_like",
114
+ "ones_like",
115
+ "full_like",
116
+ "zeros_like",
117
+ "rand_like",
118
+ "randn_like",
119
+ "new_empty",
120
+ "new_empty_strided",
121
+ "new_full",
122
+ "new_zeros",
123
+ "new_ones",
124
+ # These are only implemented on integral types
125
+ "__and__",
126
+ "__iand__",
127
+ "__ilshift__",
128
+ "__ior__",
129
+ "__irshift__",
130
+ "__ixor__",
131
+ "__lshift__",
132
+ "__or__",
133
+ "__rshift__",
134
+ "__xor__",
135
+ # These work on integral data types, and hence don't require derivative
136
+ "_sobol_engine_draw",
137
+ "_sobol_engine_ff",
138
+ "_sobol_engine_scramble_",
139
+ "_sobol_engine_initialize_state_",
140
+ # This is an unsafe method that is meant to be out of reach of autograd.
141
+ "_coalesced_",
142
+ # Quantize functions should not record gradients
143
+ "quantize_per_tensor",
144
+ "quantize_per_channel",
145
+ # Functions that return integers should not have output that require gradients
146
+ "argmax",
147
+ "argmin",
148
+ "argsort",
149
+ "searchsorted",
150
+ "bucketize",
151
+ # Functions that return booleans are not differentiable
152
+ "isnan",
153
+ "isposinf",
154
+ "isneginf",
155
+ "isinf",
156
+ "signbit",
157
+ "isin",
158
+ "allclose",
159
+ # Functions return none are not differentiable
160
+ "record_stream",
161
+ # These functions are not differentiable
162
+ "logical_and",
163
+ "logical_xor",
164
+ "logical_not",
165
+ "logical_or",
166
+ # This function returns nested_tensor shape as a tensor that is non-differentiable
167
+ "_nested_tensor_size",
168
+ "_nested_tensor_strides",
169
+ "_nested_tensor_storage_offsets",
170
+ }
171
+
172
+ # The C -> R functions at the time of adding this are still being audited and tested
173
+ # but will not error out.
174
+ # C -> C, R -> C functions for which backward is correctly implemented and tested
175
+ GRADIENT_IMPLEMENTED_FOR_COMPLEX = {
176
+ "fill",
177
+ "t",
178
+ "view",
179
+ "reshape",
180
+ "reshape_as",
181
+ "view_as",
182
+ "roll",
183
+ "clone",
184
+ "block_diag",
185
+ "diag_embed",
186
+ "repeat",
187
+ "expand",
188
+ "flip",
189
+ "fliplr",
190
+ "flipud",
191
+ "rot90",
192
+ "nanmean",
193
+ "nansum",
194
+ "transpose",
195
+ "permute",
196
+ "squeeze",
197
+ "unsqueeze",
198
+ "resize",
199
+ "resize_as",
200
+ "tril",
201
+ "triu",
202
+ "chunk",
203
+ "zero_",
204
+ "eq_",
205
+ "ne_",
206
+ "add",
207
+ "__radd__",
208
+ "sum",
209
+ "_conj",
210
+ "sin",
211
+ "cos",
212
+ "mul",
213
+ "sinc",
214
+ "sinh",
215
+ "cosh",
216
+ "__rmul__",
217
+ "sgn",
218
+ "asin",
219
+ "acos",
220
+ "sub",
221
+ "div",
222
+ "cat",
223
+ "view_as_complex",
224
+ "index_put",
225
+ "neg",
226
+ "complex",
227
+ "select",
228
+ "where",
229
+ "as_strided",
230
+ "as_strided_scatter",
231
+ "slice",
232
+ "constant_pad_nd",
233
+ "unbind",
234
+ "split",
235
+ "split_with_sizes",
236
+ "unsafe_split",
237
+ "split_with_sizes_backward",
238
+ "dot",
239
+ "vdot",
240
+ "cholesky",
241
+ "triangular_solve",
242
+ "mm",
243
+ "_unsafe_view",
244
+ "mv",
245
+ "outer",
246
+ "bmm",
247
+ "diagonal",
248
+ "alias",
249
+ "atan",
250
+ "log",
251
+ "log10",
252
+ "log1p",
253
+ "log2",
254
+ "logaddexp",
255
+ "logcumsumexp",
256
+ "reciprocal",
257
+ "tan",
258
+ "pow",
259
+ "rsqrt",
260
+ "tanh",
261
+ "tanh_backward",
262
+ "asinh",
263
+ "acosh",
264
+ "atanh",
265
+ "take",
266
+ "fill_",
267
+ "exp",
268
+ "exp2",
269
+ "expm1",
270
+ "nonzero",
271
+ "mean",
272
+ "std_mean",
273
+ "var_mean",
274
+ "inverse",
275
+ "solve",
276
+ "linalg_cholesky",
277
+ "addcmul",
278
+ "addcdiv",
279
+ "matrix_exp",
280
+ "linalg_matrix_exp",
281
+ "_linalg_eigh",
282
+ "cholesky_solve",
283
+ "linalg_qr",
284
+ "_linalg_svd",
285
+ "_fft_c2c",
286
+ "_fft_r2c",
287
+ "linalg_solve",
288
+ "sqrt",
289
+ "stack",
290
+ "gather",
291
+ "index_select",
292
+ "index_add_",
293
+ "linalg_inv",
294
+ "linalg_inv_ex",
295
+ "baddbmm",
296
+ "addbmm",
297
+ "addmm",
298
+ "addmv",
299
+ "addr",
300
+ "linalg_householder_product",
301
+ "ormqr",
302
+ "reflection_pad1d",
303
+ "reflection_pad2d",
304
+ "reflection_pad3d",
305
+ "linalg_cholesky_ex",
306
+ "linalg_eig",
307
+ "diagonal_copy",
308
+ "diagonal_scatter",
309
+ "select_backward",
310
+ "diagonal_backward",
311
+ "slice_backward",
312
+ "reflection_pad1d_backward",
313
+ "reflection_pad2d_backward",
314
+ "reflection_pad3d_backward",
315
+ "_sparse_sparse_matmul",
316
+ "replication_pad1d",
317
+ "replication_pad2d",
318
+ "replication_pad3d",
319
+ "put",
320
+ "put_",
321
+ "_to_copy",
322
+ "replication_pad1d_backward",
323
+ "replication_pad2d_backward",
324
+ "replication_pad3d_backward",
325
+ "diag",
326
+ "masked_scatter",
327
+ "masked_select",
328
+ "index_add",
329
+ "index_fill",
330
+ "trace",
331
+ "polar",
332
+ "cumsum",
333
+ "rsub",
334
+ "eig",
335
+ "lerp",
336
+ "linalg_vector_norm",
337
+ "cumprod",
338
+ "prod",
339
+ "index_copy",
340
+ "lu",
341
+ "unfold",
342
+ "unfold_backward",
343
+ "index",
344
+ "masked_fill",
345
+ "masked_scatter_backward",
346
+ "linalg_cross",
347
+ "lu_unpack",
348
+ "renorm",
349
+ "_conj_physical",
350
+ "linalg_lu_factor_ex",
351
+ "scatter",
352
+ "scatter_add",
353
+ "sigmoid",
354
+ "sigmoid_backward",
355
+ "sparse_mask",
356
+ "trapezoid",
357
+ "cumulative_trapezoid",
358
+ "conj_physical_",
359
+ "_neg_view",
360
+ "_reshape_alias",
361
+ "_reshape_copy",
362
+ "_linalg_det",
363
+ "lu_solve",
364
+ "linalg_solve_triangular",
365
+ "linalg_pinv",
366
+ "linalg_lstsq",
367
+ "unfold_copy",
368
+ "col2im",
369
+ "im2col",
370
+ "cholesky_inverse",
371
+ "to_sparse",
372
+ "sparse_sampled_addmm",
373
+ "linalg_lu",
374
+ "pixel_shuffle",
375
+ "pixel_unshuffle",
376
+ "linalg_lu_solve",
377
+ "_linalg_slogdet",
378
+ "_linalg_solve_ex",
379
+ }
380
+
381
+ GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX = {
382
+ "_to_dense",
383
+ "_coalesce",
384
+ "coalesce",
385
+ "values",
386
+ "_sparse_coo_tensor_with_dims_and_tensors",
387
+ "_sparse_addmm",
388
+ }
389
+
390
+ GRADIENT_IMPLEMENTED_FOR_COMPLEX.update(GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX)
391
+
392
+ # Some operators invalidate the grad_accumulator. Let's reset it.
393
+ RESET_GRAD_ACCUMULATOR = {"set_", "resize_"}
394
+
395
+ # NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
396
+ #
397
+ # We check the following properties:
398
+ # 1) A function should never change the input tensors' underlying c10::TensorImpl
399
+ # pointers or c10::Storage pointers, even if it modifies its input tensors (via
400
+ # inplace or out-variants)
401
+ # If the function does not modify its arguments, we also check the following properties
402
+ # pertaining to its output:
403
+ # 2) Its TensorImpl has use_count of 1
404
+ # 3) If the function is a view function, it has the same StorageImpl as that of
405
+ # the input it is aliased with. Otherwise, its StorageImpl has use_count of 1
406
+ #
407
+ # The following code templates implement the checks for this invariant:
408
+ SAVE_TENSOR_STORAGE = CodeTemplate(
409
+ """\
410
+ c10::optional<Storage> ${tensor_name}_storage_saved =
411
+ ${tensor_name}.has_storage() ? c10::optional<Storage>(${tensor_name}.storage()) : c10::nullopt;
412
+ """
413
+ )
414
+
415
+
416
+ # If tensor_name == out_tensor_name, used to enforce (1), otherwise used for (2)
417
+ ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate(
418
+ """\
419
+ if (${tensor_name}_storage_saved.has_value() &&
420
+ !at::impl::dispatch_mode_enabled() &&
421
+ !at::impl::tensor_has_dispatch(${tensor_name}))
422
+ TORCH_INTERNAL_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${out_tensor_name}.storage()));
423
+ """
424
+ )
425
+
426
+ SAVE_TENSORLIST_STORAGE = CodeTemplate(
427
+ """\
428
+ std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
429
+ for (const Tensor& tensor : ${tensorlist_name})
430
+ ${tensorlist_name}_storage_saved.push_back(
431
+ tensor.has_storage() ? c10::optional<Storage>(tensor.storage()) : c10::nullopt);
432
+ """
433
+ )
434
+
435
+ ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate(
436
+ """\
437
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
438
+ if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
439
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage()));
440
+ }
441
+ """
442
+ )
443
+
444
+ SAVE_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
445
+ """\
446
+ std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
447
+ for (const c10::optional<Tensor>& tensor : ${tensorlist_name})
448
+ ${tensorlist_name}_storage_saved.push_back(
449
+ tensor.has_value() && tensor->has_storage() ? c10::optional<Storage>(tensor->storage()) : c10::nullopt);
450
+ """
451
+ )
452
+
453
+ ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
454
+ """\
455
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
456
+ if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
457
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(
458
+ static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->storage()));
459
+ }
460
+ """
461
+ )
462
+
463
+ SAVE_TENSOR_IMPL = CodeTemplate(
464
+ """\
465
+ c10::intrusive_ptr<TensorImpl> ${tensor_name}_impl_saved;
466
+ if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr();
467
+ """
468
+ )
469
+
470
+ ENFORCE_SAME_TENSOR_IMPL = CodeTemplate(
471
+ """\
472
+ if (${tensor_name}_impl_saved && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
473
+ TORCH_INTERNAL_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr());
474
+ """
475
+ )
476
+
477
+ ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE = CodeTemplate(
478
+ """\
479
+ if (!at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
480
+ TORCH_INTERNAL_ASSERT(${tensor_name}.use_count() <= 1, "function: ${fn_name}");
481
+ """
482
+ )
483
+
484
+ ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE = CodeTemplate(
485
+ """\
486
+ if (${tensor_name}.has_storage() && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) {
487
+ TORCH_INTERNAL_ASSERT(${tensor_name}.storage().use_count() == 1, "function: ${fn_name}");
488
+ }
489
+ """
490
+ )
491
+
492
+ SAVE_TENSORLIST_IMPL = CodeTemplate(
493
+ """\
494
+ std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
495
+ for (size_t i=0; i<${tensorlist_name}.size(); i++)
496
+ if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr();
497
+ """
498
+ )
499
+
500
+ ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate(
501
+ """\
502
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
503
+ if (${tensorlist_name}_impl_saved[i] && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
504
+ TORCH_INTERNAL_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr());
505
+ }
506
+ """
507
+ )
508
+
509
+ SAVE_OPTIONALTENSORLIST_IMPL = CodeTemplate(
510
+ """\
511
+ std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
512
+ for (size_t i=0; i<${tensorlist_name}.size(); i++) {
513
+ c10::optional<Tensor> t = ${tensorlist_name}[i];
514
+ if (t.has_value() && t->defined()) ${tensorlist_name}_impl_saved[i] = t->getIntrusivePtr();
515
+ }
516
+ """
517
+ )
518
+
519
+ ENFORCE_SAME_OPTIONALTENSORLIST_IMPL = CodeTemplate(
520
+ """\
521
+ for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
522
+ if (${tensorlist_name}_impl_saved[i])
523
+ TORCH_INTERNAL_ASSERT(
524
+ ${tensorlist_name}_impl_saved[i] == static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->getIntrusivePtr());
525
+ }
526
+ """
527
+ )
528
+
529
+ # The following list contains functions that we don't enforce the invariant on.
530
+ DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
531
+ # These functions are expected to change impl or storage of input tensors
532
+ "set_",
533
+ "_cudnn_rnn_flatten_weight",
534
+ }
535
+ DONT_ENFORCE_TENSOR_IMPL_USE_COUNT = {
536
+ # These non-inplace, non-out functions return tensors with use_count > 1
537
+ # Therefore, they MAY (but not necessarily) return one of its inputs as-is
538
+ # See https://github.com/pytorch/pytorch/issues/60426 for more information
539
+ "_embedding_bag",
540
+ "_embedding_bag_forward_only",
541
+ "q_per_channel_scales",
542
+ "q_per_channel_zero_points",
543
+ "lu_unpack",
544
+ "_cudnn_rnn_backward",
545
+ # The below failed StorageImpl use_count check but we skip tensor_impl check
546
+ # just in case
547
+ "_cudnn_rnn",
548
+ "dequantize_self",
549
+ # lift() should never actually be called with a requires_grad=True tensor,
550
+ "lift",
551
+ "lift_fresh",
552
+ "lift_fresh_copy",
553
+ # Nested Tensors related functions
554
+ # _nested_tensor_size() should never actually be called with requires_grad=True tensor
555
+ "_nested_tensor_size",
556
+ "_nested_tensor_strides",
557
+ "_nested_tensor_storage_offsets",
558
+ }
559
+
560
+ DONT_ENFORCE_STORAGE_IMPL_USE_COUNT = {
561
+ # These non-view functions return tensors with storage use_count != 1
562
+ "_slow_conv2d_forward",
563
+ "slow_conv3d_forward",
564
+ "channel_shuffle",
565
+ # If an input is returned as-is in output, we cannot guarantee its storage_impl
566
+ # use count to be 1 either.
567
+ *DONT_ENFORCE_TENSOR_IMPL_USE_COUNT,
568
+ }
569
+ # END CHECKS FOR [ TensorImpl and Storage Pointer Sanity Checks ]
570
+
571
+ DECLARE_GRAD_FN = CodeTemplate(
572
+ """\
573
+ std::shared_ptr<${op}> grad_fn;
574
+ """
575
+ )
576
+
577
+ DECLARE_VECTOR_OF_GRAD_FN = CodeTemplate(
578
+ """\
579
+ std::vector<std::shared_ptr<${op}>> grad_fns;
580
+ """
581
+ )
582
+
583
+ SETUP_ANY_REQUIRES_GRAD = CodeTemplate(
584
+ """\
585
+ [[maybe_unused]] auto _any_requires_grad = compute_requires_grad( ${args_with_derivatives} );
586
+ ${extra_differentiability_conditions}
587
+ """
588
+ )
589
+
590
+ SETUP_DERIVATIVE = CodeTemplate(
591
+ """\
592
+ if (_any_requires_grad) {
593
+ ${setup}
594
+ }
595
+ """
596
+ )
597
+
598
+ SETUP_NONE_REQUIRES_GRAD = CodeTemplate(
599
+ """\
600
+ if (compute_requires_grad( ${args_to_check} )) {
601
+ throw_error_out_requires_grad("${base_name}");
602
+ }
603
+ """
604
+ )
605
+
606
+ ASSIGN_GRAD_FN = CodeTemplate(
607
+ """\
608
+ grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
609
+ grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
610
+ """
611
+ )
612
+
613
+ # note(crcrpar): `compute_requires_grad` in the template below is supplied with arguments indexed with `i`
614
+ # while the `SETUP_ANY_REQUIRES_GRAD` above takes whole tensors and scalars.
615
+ ASSIGN_VECTOR_OF_GRAD_FN = CodeTemplate(
616
+ """\
617
+ for (const auto& i : c10::irange( ${irange} )) {
618
+ const auto ith_requires_grad = compute_requires_grad(${args_with_derivatives});
619
+ check_inplace(self[i], ith_requires_grad);
620
+ grad_fns.push_back([&]() -> std::shared_ptr<${op}> {
621
+ if (!ith_requires_grad) {
622
+ return nullptr;
623
+ } else {
624
+ auto grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
625
+ grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
626
+ return grad_fn;
627
+ }
628
+ }());
629
+ }
630
+ """
631
+ )
632
+
633
+ CALL_REDISPATCH = CodeTemplate(
634
+ """\
635
+ at::redispatch::${api_name}(${unpacked_args})"""
636
+ )
637
+ # If the non-variable operation has return values, we use the `tmp` variable to hold the
638
+ # values temporarily and pass the values to the return variables outside of the
639
+ # `at::AutoDispatchBelowAutograd` guard block.
640
+ DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP = CodeTemplate(
641
+ """\
642
+ auto ${tmp_var} = ([&]() {
643
+ if (${any_has_forward_grad}) {
644
+ static c10::OperatorName full_name("aten::${op_name}", "${op_overload}");
645
+ static c10::optional<c10::OperatorHandle> opt_op = c10::Dispatcher::singleton().findSchema(full_name);
646
+ return impl::run_jit_decomposition_with_args_for_jvp<${return_types}>("${op_name}", *opt_op, ks, ${arg_names});
647
+ } else {
648
+ ${guard}
649
+ return ${base_type_call};
650
+ }
651
+ })();
652
+ """
653
+ )
654
+
655
+ DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES = CodeTemplate(
656
+ """\
657
+ auto ${tmp_var} = ([&]() {
658
+ ${guard}
659
+ return ${base_type_call};
660
+ })();
661
+ """
662
+ )
663
+
664
+ DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate(
665
+ """\
666
+ {
667
+ ${guard}
668
+ ${base_type_call};
669
+ }
670
+ """
671
+ )
672
+
673
+ SET_HISTORY = CodeTemplate(
674
+ """\
675
+ if (grad_fn) {
676
+ ${fn}_history(${differentiable_outputs}, grad_fn);
677
+ }
678
+ """
679
+ )
680
+
681
+ LOOP_OVER_VECTOR_OF_GRAD_FNS = CodeTemplate(
682
+ """\
683
+ if (!grad_fns.empty()) {
684
+ ${preamble}
685
+ for (const auto& i : c10::irange(grad_fns.size())) {
686
+ auto grad_fn = grad_fns[i];
687
+ if (grad_fn != nullptr) {
688
+ ${statements}
689
+ }
690
+ }
691
+ }
692
+ """
693
+ )
694
+
695
+ CONDITIONAL = CodeTemplate(
696
+ """\
697
+ if (${cond}) {
698
+ ${statements}
699
+ }
700
+ """
701
+ )
702
+
703
+ RUN_ONLY_IN_DEBUG_MODE = CodeTemplate(
704
+ """\
705
+ #ifndef NDEBUG
706
+ ${statements}
707
+ #endif
708
+ """
709
+ )
710
+
711
+ FW_DERIVATIVE_CHECK_TEMPLATE = CodeTemplate(
712
+ """\
713
+ isFwGradDefined(${req_inp})\
714
+ """
715
+ )
716
+ FW_DERIVATIVE_SIZE_CHECK_TEMPLATE = CodeTemplate(
717
+ """\
718
+ TORCH_CHECK(
719
+ self.size() == ${inp_name}.size(),
720
+ "Tensor lists must have the same number of tensors, got ",
721
+ self.size(),
722
+ " and ",
723
+ ${inp_name}.size());
724
+ """
725
+ )
726
+
727
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE = CodeTemplate(
728
+ """\
729
+ isFwGradDefinedTensorList(${req_inp})\
730
+ """
731
+ )
732
+
733
+ FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE = CodeTemplate(
734
+ """\
735
+ auto ${inp_name}_t_raw = toNonOptFwGrad(${inp});
736
+ auto ${inp_name}_tensor = toNonOptTensor(${inp});
737
+ auto ${inp_name}_t = (${inp_name}_t_raw.defined() || !${inp_name}_tensor.defined())
738
+ ? ${inp_name}_t_raw : at::${zeros_fn}(${inp_name}_tensor.sizes(), ${inp_name}_tensor.options());
739
+ """
740
+ )
741
+
742
+ FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE = CodeTemplate(
743
+ """\
744
+ auto ${inp_name}_p = toNonOptPrimal(${inp});
745
+ """
746
+ )
747
+
748
+ FW_DERIVATIVE_SETTER_TENSOR = CodeTemplate(
749
+ """\
750
+ if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}.defined()) {
751
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
752
+ ${out_arg}._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
753
+ }
754
+ """
755
+ )
756
+
757
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH = CodeTemplate(
758
+ """\
759
+ for (const auto& i : c10::irange(${out_arg}_new_fw_grad_opts.size())) {
760
+ auto& ${out_arg}_new_fw_grad_opt = ${out_arg}_new_fw_grad_opts[i];
761
+ if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}[i].defined()) {
762
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
763
+ ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
764
+ }
765
+ }
766
+ """
767
+ )
768
+
769
+ FW_DERIVATIVE_SETTER_MULTI_OUTPUT = CodeTemplate(
770
+ """\
771
+ if (${all_res}_new_fw_grad_opt.has_value() && std::get<${idx}>(${all_res}_new_fw_grad_opt.value()).defined()
772
+ && ${out_arg}.defined()) {
773
+ ${out_arg}._set_fw_grad(std::get<${idx}>(${all_res}_new_fw_grad_opt.value()), /* level */ 0, /* is_inplace_op */ false);
774
+ }
775
+ """
776
+ )
777
+
778
+ FW_DERIVATIVE_SETTER_TENSOR_LIST = CodeTemplate(
779
+ """\
780
+ if (${out_arg}_new_fw_grad_opt.has_value()) {
781
+ auto ${out_arg}_new_fw_grad = ${out_arg}_new_fw_grad_opt.value();
782
+ TORCH_INTERNAL_ASSERT(${out_arg}.size() == ${out_arg}_new_fw_grad.size());
783
+ for (const auto i : c10::irange(${out_arg}.size())) {
784
+ if (${out_arg}_new_fw_grad[i].defined() && ${out_arg}[i].defined()) {
785
+ // The hardcoded 0 here will need to be updated once we support multiple levels.
786
+ ${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad[i], /* level */ 0, /* is_inplace_op */ ${is_inplace});
787
+ }
788
+ }
789
+ }
790
+ """
791
+ )
792
+
793
+ FW_DERIVATIVE_TEMPLATE = CodeTemplate(
794
+ """\
795
+ ${fw_grad_opt_definition}
796
+ if (${requires_fw_grad}) {
797
+ ${unpacked_arguments}
798
+ ${out_arg}_new_fw_grad_opt = ${formula};
799
+ }
800
+ """
801
+ )
802
+
803
+ FW_DERIVATIVE_FOREACH_TEMPLATE = CodeTemplate(
804
+ """\
805
+ ${fw_grad_opt_definition}
806
+ for (const auto& i : c10::irange(${vector_of_optional_tensor}.size())) {
807
+ if (${any_has_forward_grad_for_current_index}) {
808
+ ${unpacked_arguments}
809
+ ${vector_of_optional_tensor}[i] = ${formula};
810
+ }
811
+ }
812
+ """
813
+ )
814
+
815
+ FW_DERIVATIVE_FORBID_TEMPLATE = CodeTemplate(
816
+ """\
817
+ TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
818
+ """
819
+ )
820
+
821
+ FW_DERIVATIVE_FORBID_LIST_TEMPLATE = CodeTemplate(
822
+ """\
823
+ for (const auto& _t: ${arg}) {
824
+ TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
825
+ }
826
+ """
827
+ )
828
+
829
+
830
+ def gen_variable_type(
831
+ out: str,
832
+ native_yaml_path: str,
833
+ tags_yaml_path: str,
834
+ fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo],
835
+ template_path: str,
836
+ used_keys: Set[str],
837
+ ) -> None:
838
+ """VariableType.h and VariableType.cpp body
839
+
840
+ This is the at::Type subclass for differentiable tensors. The
841
+ implementation of each function dispatches to the base tensor type to
842
+ compute the output. The grad_fn is attached to differentiable functions.
843
+ """
844
+ fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
845
+ fm.write(
846
+ "VariableType.h",
847
+ lambda: {
848
+ "generated_comment": "@"
849
+ + f"generated from {fm.template_dir_for_comments()}/VariableType.h"
850
+ },
851
+ )
852
+
853
+ # helper that generates a TORCH_LIBRARY_IMPL macro for each
854
+ # dispatch key that appears in derivatives.yaml
855
+ def wrapper_registrations(used_keys: Set[str]) -> str:
856
+ library_impl_macro_list: List[str] = []
857
+ for key in sorted(used_keys):
858
+ dispatch_key = key
859
+ if key == "Default":
860
+ dispatch_key = "Autograd"
861
+ library_impl_macro = (
862
+ f"TORCH_LIBRARY_IMPL(aten, {dispatch_key}, m) "
863
+ + "{\n"
864
+ + "${"
865
+ + f"wrapper_registrations_{key}"
866
+ + "}\n}"
867
+ )
868
+ library_impl_macro_list += [library_impl_macro]
869
+ return "\n\n".join(library_impl_macro_list)
870
+
871
+ # Generate a new template from VariableType.cpp which replaces ${wrapper_registrations}
872
+ # with per key TORCH_LIBRARY_IMPL macros for each key that appears in derivatives.yaml
873
+ fm1 = FileManager(
874
+ install_dir=out + "/templates", template_dir=template_path, dry_run=False
875
+ )
876
+ fm1.write(
877
+ "VariableType.cpp",
878
+ lambda: {
879
+ "type_derived_method_definitions": "\n\n".join(
880
+ [
881
+ "${" + f"type_derived_method_definitions_{key}" + "}"
882
+ for key in sorted(used_keys)
883
+ ]
884
+ ),
885
+ "wrapper_registrations": wrapper_registrations(used_keys),
886
+ },
887
+ )
888
+
889
+ # Generate final VariableType_*.cpp files from the generated template
890
+ fm2 = FileManager(install_dir=out, template_dir=out + "/templates", dry_run=False)
891
+
892
+ sharded_keys = set(
893
+ [f"type_derived_method_definitions_{key}" for key in sorted(used_keys)]
894
+ + [f"wrapper_registrations_{key}" for key in sorted(used_keys)]
895
+ )
896
+ # NOTE: see Note [Sharded File] at the top of the VariableType.cpp
897
+ # template regarding sharding of the generated files.
898
+ fm2.write_sharded(
899
+ "VariableType.cpp",
900
+ [fn for fn in fns_with_diff_infos if use_derived(fn)],
901
+ key_fn=lambda fn: cpp.name(fn.func.func),
902
+ base_env={
903
+ "generated_comment": "@"
904
+ + f"generated from {fm.template_dir_for_comments()}/VariableType.cpp",
905
+ },
906
+ env_callable=gen_variable_type_func,
907
+ num_shards=5,
908
+ sharded_keys=sharded_keys,
909
+ )
910
+
911
+
912
+ @with_native_function_and
913
+ def gen_wrapper_registration(f: NativeFunction, key: str = "Default") -> str:
914
+ return WRAPPER_REGISTRATION.substitute(
915
+ unqual_operator_name_with_overload=f.func.name,
916
+ type_wrapper_name=type_wrapper_name(f, key),
917
+ class_type="VariableType",
918
+ )
919
+
920
+
921
+ def gen_variable_type_func(
922
+ fn: NativeFunctionWithDifferentiabilityInfo,
923
+ ) -> Dict[str, List[str]]:
924
+ f = fn.func
925
+ result = {}
926
+ with native_function_manager(f):
927
+ name = cpp.name(f.func)
928
+ formals = gen_formals(f)
929
+
930
+ if (
931
+ fn.info is None
932
+ and str(f.func.name.name) not in RESET_GRAD_ACCUMULATOR
933
+ and get_base_name(f) not in DONT_REQUIRE_DERIVATIVE
934
+ and len(gen_differentiable_outputs(fn)) > 0
935
+ and cpp.name(f.func) not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE
936
+ and type_wrapper_name(f) not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
937
+ and type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT
938
+ ):
939
+ # NOTE: [ Registering AutogradNotImplemented boxed kernel ]
940
+ #
941
+ # When there is no derivatives.yaml entry, we register a generic boxed
942
+ # NotImplemented kernel to set grad_fn to be NotImplemented, so that forward
943
+ # proceeds as usual but an error is properly produced on backward.
944
+ # TODO: it would be nice to not have these special cases
945
+ #
946
+ # There are several cases where still let codegen handle it:
947
+ # 1) ops that need to reset grad accumulator (we let codegen handle this case
948
+ # because) the list is (currently) only accessible in Python.
949
+ # 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes
950
+ # autograd a fallthrough with NDEBUG checks. This can be useful for when all
951
+ # outputs are integral.
952
+ # 3) When there are no differentiable outputs. This is similar to (2).
953
+ # 4) There are certain ops where we skip certain NDEBUG checks. this is similar
954
+ # to (1).
955
+ type_definition = ""
956
+ wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute(
957
+ unqual_operator_name_with_overload=f.func.name
958
+ )
959
+ result["type_derived_method_definitions_Default"] = [type_definition]
960
+ result["wrapper_registrations_Default"] = [wrapper_registration]
961
+ else:
962
+ if not fn.info:
963
+ key = "Default"
964
+ type_definition = METHOD_DEFINITION.substitute(
965
+ return_type=cpp.returns_type(
966
+ f.func.returns, symint=True
967
+ ).cpp_type(),
968
+ type_wrapper_name=type_wrapper_name(f, key),
969
+ type_definition_body=emit_body(fn, key),
970
+ formals=formals,
971
+ )
972
+ wrapper_registration = gen_wrapper_registration(f, key)
973
+ result[f"type_derived_method_definitions_{key}"] = [type_definition]
974
+ result[f"wrapper_registrations_{key}"] = [wrapper_registration]
975
+ else:
976
+ for key in fn.info.keys():
977
+ type_definition = METHOD_DEFINITION.substitute(
978
+ return_type=cpp.returns_type(
979
+ f.func.returns, symint=True
980
+ ).cpp_type(),
981
+ type_wrapper_name=type_wrapper_name(f, key),
982
+ type_definition_body=emit_body(fn, key),
983
+ formals=formals,
984
+ )
985
+ wrapper_registration = gen_wrapper_registration(f, key)
986
+ result[f"type_derived_method_definitions_{key}"] = [type_definition]
987
+ result[f"wrapper_registrations_{key}"] = [wrapper_registration]
988
+ # See Note [Manual Backend kernels]
989
+ assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
990
+ # If you want to register a kernel to Autograd, you must make the op abstract.
991
+ # In other words, this op must have dispatch section in native_functions.yaml.
992
+ if name in MANUAL_AUTOGRAD_AND_TRACER or (
993
+ fn.info and any(info.has_derivatives for info in fn.info.values())
994
+ ):
995
+ msg = (
996
+ f"There's a formula for {name}(or its functional variant) in derivatives.yaml. "
997
+ f"It's required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA "
998
+ f"or CompositeExplicitAutograd in native_functions.yaml. Please see "
999
+ f"https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword "
1000
+ f"for instructions to choose the right dispatch keyword."
1001
+ )
1002
+ assert f.is_abstract, msg
1003
+
1004
+ return result
1005
+
1006
+
1007
+ _foreach_ops_without_differentiability_info = {
1008
+ # No reference backward available due to the lack of `{maximum, minimum}(tensor, scalar)`.
1009
+ ("_foreach_maximum", "Scalar"),
1010
+ ("_foreach_maximum", "ScalarList"),
1011
+ ("_foreach_minimum", "Scalar"),
1012
+ ("_foreach_minimum", "ScalarList"),
1013
+ # No reference backward available as addcdiv/addcmul don't support Tensor as scaling factor.
1014
+ ("_foreach_addcdiv", "Tensor"),
1015
+ ("_foreach_addcmul", "Tensor"),
1016
+ ("_foreach_copy", ""),
1017
+ }
1018
+
1019
+ _foreach_ops_with_different_arity = {
1020
+ # These ops lack `alpha` of scaling factor to applied to the right hand side argument.
1021
+ ("_foreach_add", "Scalar"),
1022
+ ("_foreach_add", "ScalarList"),
1023
+ ("_foreach_sub", "Scalar"),
1024
+ ("_foreach_sub", "ScalarList"),
1025
+ }
1026
+
1027
+
1028
+ @with_native_function_with_differentiability_info_and_key
1029
+ def emit_body(
1030
+ fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
1031
+ ) -> List[str]:
1032
+ assert dispatch_strategy(fn) == "use_derived"
1033
+ f = fn.func
1034
+ info = fn.info[key] if fn.info else None
1035
+ fw_derivatives = fn.fw_derivatives.get(key, []) if fn.fw_derivatives else []
1036
+
1037
+ name = cpp.name(f.func)
1038
+ inplace = f.func.kind() == SchemaKind.inplace
1039
+ is_out_fn = f.func.kind() == SchemaKind.out
1040
+ returns_void = len(f.func.returns) == 0
1041
+ base_name = get_base_name(f)
1042
+ view_info = get_view_info(f)
1043
+
1044
+ is_foreach = name.startswith("_foreach")
1045
+ is_inplace_foreach = is_foreach and inplace
1046
+ if is_inplace_foreach:
1047
+ inplace_foreacharg2refarg: Dict[Argument, Argument] = {}
1048
+ refargname2inplace_foreacharg: Dict[str, Argument] = {}
1049
+ base_name_and_overload_name = (f.func.name.name.base, f.func.name.overload_name)
1050
+ if info is None:
1051
+ assert (
1052
+ base_name_and_overload_name
1053
+ in _foreach_ops_without_differentiability_info
1054
+ ), f"{'.'.join(base_name_and_overload_name)} should have a differentiability info"
1055
+ else:
1056
+ assert (
1057
+ len(f.func.arguments.flat_non_out)
1058
+ == len(info.func.func.arguments.flat_non_out)
1059
+ ) or (base_name_and_overload_name in _foreach_ops_with_different_arity), (
1060
+ f"{'.'.join(base_name_and_overload_name)} has {len(f.func.arguments.flat_non_out)} args "
1061
+ f"but the reference has {len(info.func.func.arguments.flat_non_out)}"
1062
+ )
1063
+ for foreach_arg, ref_arg in zip(
1064
+ f.func.arguments.flat_non_out, info.func.func.arguments.flat_non_out
1065
+ ):
1066
+ foreach_arg_type = foreach_arg.type
1067
+ if isinstance(foreach_arg_type, ListType):
1068
+ foreach_arg_type = foreach_arg_type.elem
1069
+ assert foreach_arg_type == ref_arg.type
1070
+ inplace_foreacharg2refarg[foreach_arg] = ref_arg
1071
+ refargname2inplace_foreacharg[ref_arg.name] = foreach_arg
1072
+
1073
+ def gen_differentiable_input(
1074
+ arg: Union[Argument, SelfArgument, TensorOptionsArguments]
1075
+ ) -> Optional[DifferentiableInput]:
1076
+ if isinstance(arg, TensorOptionsArguments):
1077
+ return None
1078
+ a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg
1079
+
1080
+ # TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
1081
+ # NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are
1082
+ # not handled properly as they are irrelevant for this codegen.
1083
+ cpp_type = cpp.argument_type(a, binds=a.name, symint=True).cpp_type()
1084
+
1085
+ if not is_differentiable(a.name, a.type, info):
1086
+ return None
1087
+ return DifferentiableInput(
1088
+ name=a.name,
1089
+ type=a.type,
1090
+ cpp_type=cpp_type,
1091
+ )
1092
+
1093
+ @with_native_function
1094
+ def gen_differentiable_inputs(f: NativeFunction) -> List[DifferentiableInput]:
1095
+ arguments = list(f.func.arguments.non_out)
1096
+ if is_inplace_foreach and info is not None:
1097
+ for i, arg in enumerate(f.func.arguments.flat_non_out):
1098
+ if arg in inplace_foreacharg2refarg:
1099
+ # note(crcrpar): From what I understand, what matters is only the name.
1100
+ # Thus originally I only replace argument only when the names are different.
1101
+ # TODO(crcrpar): Make it simpler.
1102
+ mapped_arg = inplace_foreacharg2refarg[arg]
1103
+ arguments[i] = Argument(
1104
+ mapped_arg.name,
1105
+ mapped_arg.type,
1106
+ mapped_arg.default,
1107
+ mapped_arg.annotation,
1108
+ )
1109
+ return list(mapMaybe(gen_differentiable_input, arguments))
1110
+
1111
+ def find_args_with_derivatives(
1112
+ differentiable_inputs: List[DifferentiableInput],
1113
+ ) -> List[DifferentiableInput]:
1114
+ """Find arguments that have derivative definitions"""
1115
+ if info is None or not info.has_derivatives:
1116
+ return differentiable_inputs
1117
+ names = {name for d in info.derivatives for name in d.var_names}
1118
+ differentiable = [arg for arg in differentiable_inputs if arg.name in names]
1119
+ if len(differentiable) != len(names):
1120
+ missing = names - {arg.name for arg in differentiable}
1121
+ raise RuntimeError(
1122
+ f"Missing arguments for derivatives: {missing} in {info.name}"
1123
+ )
1124
+ return differentiable
1125
+
1126
+ differentiable_inputs = gen_differentiable_inputs(f)
1127
+ args_with_derivatives = find_args_with_derivatives(differentiable_inputs)
1128
+ differentiable_outputs = gen_differentiable_outputs(fn, key)
1129
+
1130
+ undifferentiable = (base_name in DONT_REQUIRE_DERIVATIVE) or (
1131
+ name in DONT_REQUIRE_DERIVATIVE
1132
+ )
1133
+
1134
+ requires_derivative = (
1135
+ (not undifferentiable)
1136
+ and (len(differentiable_inputs) > 0)
1137
+ and (
1138
+ (len(differentiable_outputs) > 0)
1139
+ # note(crcrpar): In-place foreach functions are a void function.
1140
+ or is_inplace_foreach
1141
+ )
1142
+ )
1143
+
1144
+ if (
1145
+ info is not None
1146
+ and info.has_derivatives
1147
+ and not requires_derivative
1148
+ # out= ops are allowed to have zero returns which cause requires_derivative to be False
1149
+ # we shouldn't error out though (out= ops for autograd just redispatch)
1150
+ and len(f.func.returns) > 0
1151
+ ):
1152
+ raise RuntimeError(
1153
+ f"ERROR: derivative ignored for {name} -- specified an autograd function without derivative"
1154
+ )
1155
+
1156
+ # note(crcrpar): In-place foreach functions do not support forward AD
1157
+ if requires_derivative and len(fw_derivatives) > 0 and not is_inplace_foreach:
1158
+ assert sum(len(derivative.var_names) for derivative in fw_derivatives) == len(
1159
+ differentiable_outputs
1160
+ ), (
1161
+ "Expected the number of forward derivatives implemented to match the "
1162
+ "number of differentiable outputs. NB: This only applies when at least "
1163
+ "one forward derivative is implemented. Not implementing any forward "
1164
+ "derivatives is also okay, and we would require inputs to the op to "
1165
+ "not have associated tangents in that case."
1166
+ )
1167
+
1168
+ try_jit_decomposition = (
1169
+ requires_derivative
1170
+ and len(fw_derivatives) == 0
1171
+ and (not modifies_arguments(f))
1172
+ and (not returns_void)
1173
+ )
1174
+
1175
+ def emit_save_inputs() -> List[str]:
1176
+ setup: List[str] = []
1177
+ if info is None or not info.has_derivatives:
1178
+ return setup
1179
+
1180
+ has_tensorlist_arg = any(
1181
+ is_tensor_list_type(arg.type) for arg in args_with_derivatives
1182
+ )
1183
+
1184
+ # We don't want to save tensors if we know that they will never be used
1185
+ # when computing the derivative, so we add guards to those statements
1186
+ def guard_for(arg: SavedAttribute) -> Optional[str]:
1187
+ assert info is not None
1188
+
1189
+ # It's hard to determine the edge offset if we have TensorLists
1190
+ # NOTE(crcrpar): in-place foreach functions' arguments include tensorlist
1191
+ # but their derivatives don't use it, so let them bypass this check.
1192
+ if has_tensorlist_arg and (not is_inplace_foreach):
1193
+ return None
1194
+
1195
+ # Empirical evaluation of the cases where we insert those guards in
1196
+ # backward show that they are somewhat useless. E.g. there's no need
1197
+ # to guard on some values captured from forward, because they had to
1198
+ # require_grad if the backward function even gets executed. I don't
1199
+ # have any good ideas for detecting those cases, so I simply disabled the
1200
+ # checks.
1201
+ if "backward" in info.name:
1202
+ return None
1203
+
1204
+ # If there's a single derivative we could compute, we already have
1205
+ # a requires_grad check that is sufficient
1206
+ if len(args_with_derivatives) <= 1:
1207
+ return None
1208
+
1209
+ # We really only care about trimming down the amount of tensors we save
1210
+ if arg.nctype.type != BaseCType(tensorT):
1211
+ return None
1212
+
1213
+ # We want to emit simple guards, so we only allow that if checking one
1214
+ # input is enough to determine whether we need that value
1215
+ used_in = [d for d in info.derivatives if arg in d.saved_inputs]
1216
+ assert len(used_in) > 0
1217
+ if len(used_in) != 1:
1218
+ return None
1219
+ derivative = used_in[0]
1220
+
1221
+ # Case with multioutput formulas
1222
+ # TODO: process all derivative formulas!!!
1223
+ if len(derivative.var_names) != 1:
1224
+ wrap_opt_if_start = derivative.formula.find(
1225
+ f"wrap_opt_if({arg.nctype.name}"
1226
+ )
1227
+ if wrap_opt_if_start == -1:
1228
+ return None
1229
+
1230
+ wrap_opt_if_match = re.match(
1231
+ rf"wrap_opt_if\({arg.nctype.name},(.*?)\)",
1232
+ derivative.formula[wrap_opt_if_start:],
1233
+ )
1234
+ assert wrap_opt_if_match is not None
1235
+
1236
+ # Condition is between 'wrap_opt_if(var_name,' and ')'.
1237
+ condition_slice = slice(len(rf"wrap_opt_if\({arg.nctype.name},"), -1)
1238
+ wrap_opt_if_condition = wrap_opt_if_match.group(0)[
1239
+ condition_slice
1240
+ ].strip()
1241
+ # replace 'grad_input_mask[num]' with 'grad_fn->should_compute_output(num)'
1242
+ wrap_opt_if_condition = re.sub(
1243
+ r"grad_input_mask\[(\d+)\]",
1244
+ r"grad_fn->should_compute_output(\1)",
1245
+ wrap_opt_if_condition,
1246
+ )
1247
+ return f"{wrap_opt_if_condition}"
1248
+
1249
+ # Figure out the offset of the edge that uses this variable
1250
+ derivative_var_name = derivative.var_names[0]
1251
+ for edge_off, a in enumerate(args_with_derivatives):
1252
+ if a.name == derivative_var_name:
1253
+ break
1254
+ else:
1255
+ raise AssertionError()
1256
+ return f"grad_fn->should_compute_output({edge_off})"
1257
+
1258
+ if is_inplace_foreach:
1259
+ save_input_stmts = save_variables(info.all_saved_inputs, False, guard_for)
1260
+ if save_input_stmts:
1261
+ setup.append(
1262
+ LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1263
+ preamble="", statements=save_input_stmts
1264
+ )
1265
+ )
1266
+ else:
1267
+ setup.extend(save_variables(info.all_saved_inputs, False, guard_for))
1268
+ for arg in args_with_derivatives:
1269
+ if is_tensor_list_type(arg.type):
1270
+ setup.append(f"grad_fn->{arg.name}_size_ = {arg.name}.size();")
1271
+ return setup
1272
+
1273
+ def setup_derivative(differentiable_inputs: List[DifferentiableInput]) -> List[str]:
1274
+ body: List[str] = []
1275
+ if is_out_fn:
1276
+ # For out functions, ensure that no input or output requires grad
1277
+ body.append(DECLARE_GRAD_FN.substitute(op="Node"))
1278
+ body.append(
1279
+ SETUP_NONE_REQUIRES_GRAD.substitute(
1280
+ base_name=base_name,
1281
+ args_to_check=[arg.name for arg in differentiable_inputs],
1282
+ )
1283
+ )
1284
+ body.append(
1285
+ SETUP_NONE_REQUIRES_GRAD.substitute(
1286
+ base_name=base_name,
1287
+ args_to_check=[arg.name for arg in differentiable_outputs],
1288
+ )
1289
+ )
1290
+ return body
1291
+
1292
+ op = info.op if info is not None and info.has_derivatives else "NotImplemented"
1293
+ setup = []
1294
+ if not is_inplace_foreach:
1295
+ setup.extend(
1296
+ ASSIGN_GRAD_FN.substitute(
1297
+ op=op,
1298
+ op_ctor=""
1299
+ if info is not None and info.has_derivatives
1300
+ else f'"{cpp.name(f.func)}"',
1301
+ args_with_derivatives=[arg.name for arg in args_with_derivatives],
1302
+ ).split("\n")
1303
+ )
1304
+ else:
1305
+ # note(crcrpar): Assuming in-place foreach function's self_arg is always TensorList.
1306
+ list_like_arg = "self"
1307
+ args = [arg.name for arg in args_with_derivatives]
1308
+ for i, arg in enumerate(args):
1309
+ if is_inplace_foreach and info is not None:
1310
+ if arg in refargname2inplace_foreacharg:
1311
+ foreach_arg = refargname2inplace_foreacharg[arg]
1312
+ args[i] = foreach_arg.name + (
1313
+ "[i]" if isinstance(foreach_arg.type, ListType) else ""
1314
+ )
1315
+ else:
1316
+ if arg == list_like_arg:
1317
+ args[i] = arg + "[i]"
1318
+ setup.extend(
1319
+ ASSIGN_VECTOR_OF_GRAD_FN.substitute(
1320
+ op=op,
1321
+ op_ctor=""
1322
+ if info is not None and info.has_derivatives
1323
+ else f'"{cpp.name(f.func)}"',
1324
+ args_with_derivatives=args,
1325
+ irange=f"{list_like_arg}.size()",
1326
+ ).split("\n")
1327
+ )
1328
+ setup.extend(emit_save_inputs())
1329
+
1330
+ body.extend(
1331
+ emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives)
1332
+ )
1333
+ declare_grad_fn_template = (
1334
+ DECLARE_GRAD_FN if not is_inplace_foreach else DECLARE_VECTOR_OF_GRAD_FN
1335
+ )
1336
+ body.append(declare_grad_fn_template.substitute(op=op))
1337
+ body.append(SETUP_DERIVATIVE.substitute(setup=setup))
1338
+ return body
1339
+
1340
+ def emit_check_if_in_complex_autograd_allowlist() -> List[str]:
1341
+ body: List[str] = []
1342
+ if base_name in GRADIENT_IMPLEMENTED_FOR_COMPLEX:
1343
+ return body
1344
+ for arg in differentiable_outputs:
1345
+ name = arg.name
1346
+ # TODO: should be `arg.type.is_tensor_like()`?
1347
+ if arg.cpp_type == "at::Tensor" or arg.cpp_type in TENSOR_LIST_LIKE_CTYPES:
1348
+ body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");')
1349
+ return body
1350
+
1351
+ def emit_check_no_requires_grad(
1352
+ tensor_args: List[DifferentiableInput],
1353
+ args_with_derivatives: List[DifferentiableInput],
1354
+ ) -> List[str]:
1355
+ """Checks that arguments without derivatives don't require grad"""
1356
+ body: List[str] = []
1357
+ for arg in tensor_args:
1358
+ if arg in args_with_derivatives:
1359
+ continue
1360
+ arg_name = arg.name
1361
+ if info and arg_name in info.non_differentiable_arg_names:
1362
+ continue
1363
+ if arg_name == "output":
1364
+ # Double-backwards definitions sometimes take in 'input' and
1365
+ # 'output', but only define the derivative for input.
1366
+ continue
1367
+ body.append(f'check_no_requires_grad({arg_name}, "{arg_name}", "{name}");')
1368
+ return body
1369
+
1370
+ def emit_original_self_definition() -> List[str]:
1371
+ body: List[str] = []
1372
+ if inplace:
1373
+ if is_inplace_foreach:
1374
+ body.append(
1375
+ "std::vector<c10::optional<at::Tensor>> original_selfs(self.size());"
1376
+ )
1377
+ else:
1378
+ body.append("c10::optional<at::Tensor> original_self;")
1379
+
1380
+ all_forward_grad_cond = []
1381
+ for derivative in fw_derivatives:
1382
+ if derivative.required_original_self_value:
1383
+ all_forward_grad_cond.append(
1384
+ get_any_has_forward_grad_name(derivative.var_names)
1385
+ )
1386
+
1387
+ if all_forward_grad_cond:
1388
+ if not is_inplace_foreach:
1389
+ body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{')
1390
+ body.append(" original_self = self.clone();")
1391
+ body.append("}")
1392
+ else:
1393
+ current_all_forward_grad_cond = [
1394
+ f"{cond}[i]" for cond in all_forward_grad_cond
1395
+ ]
1396
+ body.append("for (const auto& i : c10::irange(self.size())) {")
1397
+ body.append(
1398
+ f" if ({' || '.join(current_all_forward_grad_cond)}) {{"
1399
+ )
1400
+ body.append(" original_selfs[i] = self[i].clone();")
1401
+ body.append(" }")
1402
+ body.append("}")
1403
+
1404
+ return body
1405
+
1406
+ def save_variables(
1407
+ saved_variables: Sequence[SavedAttribute],
1408
+ is_output: bool,
1409
+ guard_for: Callable[[SavedAttribute], Optional[str]] = lambda name: None,
1410
+ ) -> Sequence[str]:
1411
+ # assign the saved variables to the generated grad_fn
1412
+ stmts: List[str] = []
1413
+ for arg in sorted(saved_variables, key=lambda sa: str(sa.nctype.name)):
1414
+ name = (
1415
+ arg.nctype.name.name
1416
+ if isinstance(arg.nctype.name, SpecialArgName)
1417
+ else arg.nctype.name
1418
+ )
1419
+ foreacharg: Optional[Argument] = None
1420
+ is_foreacharg_list_type: bool = False
1421
+ type = arg.nctype.type
1422
+ expr = arg.expr
1423
+ stmts_prepend = None
1424
+ if is_inplace_foreach and info is not None:
1425
+ # todo(crcrpar): See if we can add some check e.g. `assert foreacharg is not None`.
1426
+ # for now the example assert would fail.
1427
+ name_to_query = name.split("_scalar_type")[0]
1428
+ if name_to_query in refargname2inplace_foreacharg:
1429
+ foreacharg = refargname2inplace_foreacharg[name_to_query]
1430
+ is_foreacharg_list_type = isinstance(foreacharg.type, ListType)
1431
+ if foreacharg is not None:
1432
+ name_in_expr = (
1433
+ f"{foreacharg.name}{'[i]' if is_foreacharg_list_type else ''}"
1434
+ )
1435
+ src_name = name
1436
+ if "_scalar_type" in src_name:
1437
+ split_src_name = src_name.split("_scalar_type")
1438
+ assert len(split_src_name) == 2
1439
+ src_name = split_src_name[0]
1440
+ expr = expr.replace(src_name, name_in_expr)
1441
+ if (
1442
+ type == BaseCType(tensorT)
1443
+ or type == OptionalCType(BaseCType(tensorT))
1444
+ or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
1445
+ or (is_output and type == BaseCType(scalarT))
1446
+ ):
1447
+ # note(crcrpar): Here `expr` is generated from scratch, `arg.expr` is ignored.
1448
+ var = name
1449
+ name += "_"
1450
+ if var == "self" and inplace:
1451
+ original_self_var = (
1452
+ "original_self"
1453
+ if not is_inplace_foreach
1454
+ else "original_selfs[i]"
1455
+ )
1456
+ self_var = var if not is_inplace_foreach else var + "[i]"
1457
+ stmts_prepend = f"if (!{original_self_var}.has_value()) {original_self_var} = {self_var}.clone()"
1458
+ var = f"{original_self_var}.value()"
1459
+ assert not is_output
1460
+ if inplace and is_output:
1461
+ assert name == "result_"
1462
+ var = (
1463
+ "self[i]"
1464
+ if is_inplace_foreach or is_foreacharg_list_type
1465
+ else "self"
1466
+ )
1467
+ is_inplace_view = f"{var}.is_view()"
1468
+ expr = f"SavedVariable({var}, {str(is_output).lower()}, {is_inplace_view})"
1469
+ else:
1470
+ expr = f"SavedVariable({var}, {str(is_output).lower()})"
1471
+ if foreacharg is not None and "original_selfs" not in expr:
1472
+ expr = expr.replace(src_name, name_in_expr)
1473
+ elif (
1474
+ type == BaseCType(tensorListT)
1475
+ or type == ListCType(OptionalCType(BaseCType(tensorT)))
1476
+ or type == BaseCType(iTensorListRefT)
1477
+ or type == VectorCType(BaseCType(tensorT))
1478
+ ):
1479
+ # See Note [nuanced return type of out-of-place foreach functions]
1480
+ if type == VectorCType(BaseCType(tensorT)):
1481
+ assert is_foreach and is_output
1482
+ expr = f"make_saved_variable_list({name}, {str(is_foreach and is_output).lower()})"
1483
+ name += "_"
1484
+ elif type == BaseCType(intArrayRefT):
1485
+ expr = expr + ".vec()"
1486
+ elif type == BaseCType(symIntArrayRefT):
1487
+ expr = expr + ".vec()"
1488
+ elif type == BaseCType(stringT):
1489
+ expr = f"std::string({expr})"
1490
+ elif type == OptionalCType(BaseCType(stringT)):
1491
+ expr = f"{expr}.has_value() ? c10::optional<std::string>(std::string({expr}.value())) : c10::nullopt"
1492
+ elif type == ArrayRefCType(
1493
+ elem=BaseCType(type=BaseCppType(ns="at", name="Scalar"))
1494
+ ):
1495
+ expr = expr + ".vec()"
1496
+
1497
+ guard = guard_for(arg)
1498
+ if guard is None:
1499
+ if stmts_prepend:
1500
+ stmts.append(f"{stmts_prepend};")
1501
+ stmts.append(f"grad_fn->{name} = {expr};")
1502
+ else:
1503
+ stmts.append(f"if ({guard}) {{")
1504
+ if stmts_prepend:
1505
+ stmts.append(f" {stmts_prepend};")
1506
+ stmts.append(f" grad_fn->{name} = {expr};")
1507
+ stmts.append("}")
1508
+ return stmts
1509
+
1510
+ # Generates a Dispatcher::redispatch() call into the dispatcher. We do this mainly for performance reasons:
1511
+ # - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS.
1512
+ # - redispatch() avoids a redundant call to RecordFunction, which was already called right before
1513
+ # we entered this autograd kernel.
1514
+ def emit_dispatch_call(
1515
+ f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
1516
+ ) -> str:
1517
+ """Dispatch call via function in a namespace or method on Tensor."""
1518
+ dispatcher_sig = DispatcherSignature.from_schema(f.func)
1519
+ dispatcher_exprs = dispatcher_sig.exprs()
1520
+
1521
+ # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
1522
+ # Ops also always have a function variant of the redispatch API.
1523
+ # See Note [Plumbing Keys Through The Dispatcher] for details.
1524
+ dispatch_key_set = "ks & c10::after_autograd_keyset"
1525
+ call = CALL_REDISPATCH.substitute(
1526
+ api_name=cpp.name(
1527
+ f.func,
1528
+ faithful_name_for_out_overloads=True,
1529
+ symint_overload=f.func.has_symint(),
1530
+ ),
1531
+ unpacked_args=[dispatch_key_set] + list(unpacked_args),
1532
+ )
1533
+ return call
1534
+
1535
+ def wrap_output(
1536
+ f: NativeFunction, unpacked_bindings: List[Binding], var: str
1537
+ ) -> str:
1538
+ call = ""
1539
+ rhs_value: Optional[str] = None
1540
+ if not any(r.type.is_tensor_like() for r in f.func.returns):
1541
+ rhs_value = var
1542
+ else:
1543
+ rhs_value = f"std::move({var})"
1544
+ assert rhs_value is not None
1545
+ call += ASSIGN_RETURN_VALUE.substitute(
1546
+ return_values=tie_return_values(f), rhs_value=rhs_value
1547
+ )
1548
+ return call
1549
+
1550
+ def check_tensorimpl_and_storage(
1551
+ call: str, unpacked_bindings: List[Binding]
1552
+ ) -> str:
1553
+ # See NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
1554
+ stmts_before_call: List[str] = []
1555
+ stmts_after_call: List[str] = []
1556
+
1557
+ if cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE:
1558
+ return call
1559
+
1560
+ # Check properties of inputs (enforce (1))
1561
+ for unpacked_binding in unpacked_bindings:
1562
+ arg = unpacked_binding.name
1563
+ noref_cpp_type = unpacked_binding.nctype.type.remove_const_ref()
1564
+ if noref_cpp_type == BaseCType(tensorListT) or noref_cpp_type == BaseCType(
1565
+ iTensorListRefT
1566
+ ):
1567
+ stmts_before_call += [
1568
+ SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1569
+ SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
1570
+ ]
1571
+ stmts_after_call += [
1572
+ ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1573
+ ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
1574
+ ]
1575
+ elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
1576
+ stmts_before_call += [
1577
+ SAVE_OPTIONALTENSORLIST_STORAGE.substitute(tensorlist_name=arg),
1578
+ SAVE_OPTIONALTENSORLIST_IMPL.substitute(tensorlist_name=arg),
1579
+ ]
1580
+ stmts_after_call += [
1581
+ ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE.substitute(
1582
+ tensorlist_name=arg
1583
+ ),
1584
+ ENFORCE_SAME_OPTIONALTENSORLIST_IMPL.substitute(
1585
+ tensorlist_name=arg
1586
+ ),
1587
+ ]
1588
+ elif noref_cpp_type == BaseCType(tensorT):
1589
+ stmts_before_call += [
1590
+ SAVE_TENSOR_STORAGE.substitute(tensor_name=arg),
1591
+ SAVE_TENSOR_IMPL.substitute(tensor_name=arg),
1592
+ ]
1593
+ stmts_after_call += [
1594
+ ENFORCE_SAME_TENSOR_STORAGE.substitute(
1595
+ tensor_name=arg, out_tensor_name=arg
1596
+ ),
1597
+ ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg),
1598
+ ]
1599
+
1600
+ assert (stmts_before_call and stmts_after_call) or (
1601
+ not stmts_before_call and not stmts_after_call
1602
+ )
1603
+
1604
+ # Check properties of outputs (enforce (2), (3))
1605
+ if f.func.kind() not in (SchemaKind.inplace, SchemaKind.out):
1606
+ base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)?
1607
+ aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None)
1608
+ if aliased_arg_name is not None:
1609
+ aliased_arg_name = unpacked_name(aliased_arg_name)
1610
+ for i, (ret, ret_name) in enumerate(
1611
+ zip(f.func.returns, cpp.return_names(f))
1612
+ ):
1613
+ noref_cpp_type = cpp.return_type(ret, symint=True).remove_const_ref()
1614
+ if noref_cpp_type == BaseCType(tensorT):
1615
+ if aliased_arg_name is not None:
1616
+ assert (
1617
+ i == 0
1618
+ ), "Expect non-CompositeImplicitAutograd view function {base} to return single output"
1619
+ stmts_after_call += [
1620
+ ENFORCE_SAME_TENSOR_STORAGE.substitute(
1621
+ tensor_name=aliased_arg_name, out_tensor_name=ret_name
1622
+ )
1623
+ ]
1624
+ else:
1625
+ if (
1626
+ type_wrapper_name(f)
1627
+ not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
1628
+ ):
1629
+ stmts_after_call += [
1630
+ ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE.substitute(
1631
+ tensor_name=ret_name, fn_name=type_wrapper_name(f)
1632
+ )
1633
+ ]
1634
+
1635
+ if type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT:
1636
+ stmts_after_call += [
1637
+ ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE.substitute(
1638
+ tensor_name=ret_name, fn_name=type_wrapper_name(f)
1639
+ )
1640
+ ]
1641
+
1642
+ # Currently we don't have any functions that return the following types, but
1643
+ # we should update the checks once we do
1644
+ elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
1645
+ raise AssertionError(
1646
+ f"Please add use_count checks for {noref_cpp_type}"
1647
+ )
1648
+ elif noref_cpp_type == BaseCType(tensorListT):
1649
+ raise AssertionError(
1650
+ f"Please add use_count checks for {noref_cpp_type}"
1651
+ )
1652
+
1653
+ if stmts_before_call and stmts_after_call:
1654
+ call = (
1655
+ RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_before_call)
1656
+ + call
1657
+ + RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_after_call)
1658
+ )
1659
+ return call
1660
+
1661
+ def emit_call(
1662
+ f: NativeFunction, unpacked_bindings: List[Binding], try_jit_decomposition: bool
1663
+ ) -> str:
1664
+ # We only care about adding `at::AutoDispatchBelowAutograd` guard for non-variable dispatch
1665
+ # (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure
1666
+ # the baseType operations still dispatch to non-Variable type, even if the arguments passed
1667
+ # in are now Variables.
1668
+ # See NOTE [ Treating Variables as non-Variables in type dispatch ] for details.
1669
+ unpacked_args = [b.name for b in unpacked_bindings]
1670
+ base_type_call = emit_dispatch_call(f, "self_", unpacked_args)
1671
+
1672
+ if get_view_info(f) is not None or modifies_arguments(f):
1673
+ guard = "at::AutoDispatchBelowAutograd guard;"
1674
+ else:
1675
+ guard = "at::AutoDispatchBelowADInplaceOrView guard;"
1676
+
1677
+ any_has_forward_grad = (
1678
+ get_any_has_fw_grad_cond(derivative=None)
1679
+ if requires_derivative
1680
+ else "false"
1681
+ )
1682
+ return_types = ", ".join(
1683
+ [cpp.return_type(a, symint=True).cpp_type() for a in f.func.returns]
1684
+ )
1685
+ if len(f.func.returns) > 1:
1686
+ return_types = f"std::tuple<{return_types}>"
1687
+
1688
+ arg_names = [
1689
+ a.name
1690
+ for a in cpp.arguments(
1691
+ f.func.arguments,
1692
+ faithful=True,
1693
+ symint=True,
1694
+ method=False,
1695
+ cpp_no_default_args=set(),
1696
+ )
1697
+ ]
1698
+
1699
+ if not modifies_arguments(f) and not returns_void:
1700
+ if try_jit_decomposition:
1701
+ call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES_JVP_DECOMP.substitute(
1702
+ base_type_call=base_type_call,
1703
+ tmp_var=TMP_VAR,
1704
+ guard=guard,
1705
+ any_has_forward_grad=any_has_forward_grad,
1706
+ op_name=cpp.name(f.func),
1707
+ op_overload=f.func.name.overload_name,
1708
+ return_types=return_types,
1709
+ arg_names=arg_names,
1710
+ )
1711
+ else:
1712
+ call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES.substitute(
1713
+ base_type_call=base_type_call,
1714
+ tmp_var=TMP_VAR,
1715
+ guard=guard,
1716
+ )
1717
+
1718
+ call += wrap_output(f, unpacked_bindings, TMP_VAR)
1719
+ else:
1720
+ assert not try_jit_decomposition
1721
+ call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute(
1722
+ base_type_call=base_type_call, guard=guard
1723
+ )
1724
+ call = check_tensorimpl_and_storage(call, unpacked_bindings)
1725
+ return call
1726
+
1727
+ def emit_history() -> str:
1728
+ fn = "rebase" if modifies_arguments(f) and view_info is None else "set"
1729
+ output_names = [r.name for r in differentiable_outputs]
1730
+ # TODO: flatten allocates a std::vector, which could be expensive
1731
+ outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute(
1732
+ outs=output_names if not is_inplace_foreach else "self"
1733
+ )
1734
+ if not is_inplace_foreach:
1735
+ return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs)
1736
+ else:
1737
+ return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1738
+ preamble=(
1739
+ f"auto differentiable_outputs = {outs};\n"
1740
+ f"TORCH_INTERNAL_ASSERT(differentiable_outputs.size() == grad_fns.size());"
1741
+ ),
1742
+ statements=f"{fn}_history(differentiable_outputs[i], grad_fns[i]);",
1743
+ )
1744
+
1745
+ def emit_save_outputs() -> str:
1746
+ if is_out_fn:
1747
+ # out functions don't currently support differentiation
1748
+ return ""
1749
+ if info is not None and info.has_derivatives:
1750
+ stmts = save_variables(info.all_saved_outputs, True)
1751
+ if len(stmts) == 0:
1752
+ return ""
1753
+ if not is_inplace_foreach:
1754
+ return CONDITIONAL.substitute(cond="grad_fn", statements=stmts)
1755
+ else:
1756
+ return LOOP_OVER_VECTOR_OF_GRAD_FNS.substitute(
1757
+ preamble="", statements=stmts
1758
+ )
1759
+ return ""
1760
+
1761
+ def emit_any_requires_grad() -> List[str]:
1762
+ extra_condition = ""
1763
+ if info and info.output_differentiability_conditions:
1764
+ assert len(info.output_differentiability_conditions) == 1
1765
+ extra_condition = f"_any_requires_grad &= ({info.output_differentiability_conditions[0]});"
1766
+ names_of_args_with_derivatives = [arg.name for arg in args_with_derivatives]
1767
+ if is_inplace_foreach and info is not None:
1768
+ for i, arg in enumerate(names_of_args_with_derivatives):
1769
+ for f_arg, r_arg in inplace_foreacharg2refarg.items():
1770
+ if arg == r_arg.name:
1771
+ names_of_args_with_derivatives[i] = f_arg.name
1772
+ return [
1773
+ SETUP_ANY_REQUIRES_GRAD.substitute(
1774
+ args_with_derivatives=names_of_args_with_derivatives,
1775
+ extra_differentiability_conditions=extra_condition,
1776
+ )
1777
+ ]
1778
+
1779
+ def get_any_has_forward_grad_name(var_names: Tuple[str, ...]) -> str:
1780
+ if len(var_names) == 1:
1781
+ return f"_any_has_forward_grad_{var_names[0]}"
1782
+ else:
1783
+ return f'_any_has_forward_grad_{"_".join(var_names)}'
1784
+
1785
+ def emit_any_has_forward_grad() -> List[str]:
1786
+ content: List[str] = []
1787
+ if not is_foreach:
1788
+ for derivative in fw_derivatives:
1789
+ requires_fw_grad = get_any_has_fw_grad_cond(derivative=derivative)
1790
+ if info and info.output_differentiability_conditions:
1791
+ assert len(info.output_differentiability_conditions) == 1
1792
+ requires_fw_grad = f"({info.output_differentiability_conditions[0]}) && {requires_fw_grad}"
1793
+ content.append(
1794
+ f"[[maybe_unused]] auto {get_any_has_forward_grad_name(derivative.var_names)} = {requires_fw_grad};"
1795
+ )
1796
+ else:
1797
+ for derivative in fw_derivatives:
1798
+ bool_vector_name = get_any_has_forward_grad_name(derivative.var_names)
1799
+ cur_derivative_conditions = []
1800
+ for inp in differentiable_inputs:
1801
+ if derivative.required_inputs_fw_grad is None:
1802
+ continue
1803
+ if inp.name not in derivative.required_inputs_fw_grad:
1804
+ continue
1805
+ inp_name = (
1806
+ inp.name
1807
+ if not inplace
1808
+ else refargname2inplace_foreacharg[inp.name].name
1809
+ )
1810
+ inp_type = (
1811
+ inp.type
1812
+ if not inplace
1813
+ else refargname2inplace_foreacharg[inp.name].type
1814
+ )
1815
+ is_list_type = is_tensor_list_type(inp_type)
1816
+ if is_list_type:
1817
+ if inp_name != "self":
1818
+ content.append(
1819
+ FW_DERIVATIVE_SIZE_CHECK_TEMPLATE.substitute(
1820
+ inp_name=inp_name
1821
+ )
1822
+ )
1823
+ cur_derivative_conditions.append(
1824
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(
1825
+ req_inp=inp_name + "[i]"
1826
+ )
1827
+ )
1828
+ else:
1829
+ cur_derivative_conditions.append(
1830
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp_name)
1831
+ )
1832
+
1833
+ content.append(f"std::vector<bool> {bool_vector_name}(self.size());")
1834
+ content.append("for (const auto& i : c10::irange(self.size())) {")
1835
+ content.append(
1836
+ f" {bool_vector_name}[i] = {' || '.join(cur_derivative_conditions)};"
1837
+ )
1838
+ content.append("}")
1839
+ return content
1840
+
1841
+ def emit_check_inplace() -> List[str]:
1842
+ if not inplace:
1843
+ return []
1844
+ return [
1845
+ f"check_inplace({arg.name}, _any_requires_grad);"
1846
+ for arg in differentiable_outputs
1847
+ ]
1848
+
1849
+ def emit_fw_derivatives() -> List[str]:
1850
+ content: List[str] = []
1851
+ fw_grad_setters: List[str] = []
1852
+ for derivative in fw_derivatives:
1853
+ res = derivative.var_names
1854
+ if f.func.name.name.inplace:
1855
+ assert (
1856
+ len(res) == 1
1857
+ ), "Expected number of outputs to be 1 if function is inplace"
1858
+ # TODO update this when inplace namings are unified
1859
+ res = ("self",)
1860
+
1861
+ assert derivative.required_inputs_fw_grad is not None
1862
+
1863
+ unpacked_arguments = ""
1864
+ for inp in differentiable_inputs:
1865
+ inp_name = inp.name
1866
+ is_input_tensorlist = is_foreach and is_tensor_list_type(
1867
+ inp.type
1868
+ if not inplace
1869
+ else refargname2inplace_foreacharg[inp.name].type
1870
+ )
1871
+ input_suffix = "[i]" if is_input_tensorlist else ""
1872
+ if is_inplace_foreach:
1873
+ if inp.name in refargname2inplace_foreacharg:
1874
+ inp_name = refargname2inplace_foreacharg[inp.name].name
1875
+ zeros_fn = (
1876
+ "zeros"
1877
+ if inplace and inp.name == "self"
1878
+ else "_efficientzerotensor"
1879
+ )
1880
+ if inp.name in derivative.required_inputs_fw_grad:
1881
+ unpacked_arguments += (
1882
+ FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
1883
+ inp_name=inp.name,
1884
+ inp=inp_name + input_suffix,
1885
+ zeros_fn=zeros_fn,
1886
+ )
1887
+ )
1888
+ if inp.name in (derivative.required_inputs_primal or []):
1889
+ unpacked_arguments += (
1890
+ FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
1891
+ inp_name=inp.name,
1892
+ inp=inp_name + input_suffix,
1893
+ )
1894
+ )
1895
+ if derivative.required_original_self_value:
1896
+ input_suffix = "s[i]" if is_inplace_foreach else ""
1897
+ unpacked_arguments += FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
1898
+ inp_name="original_self",
1899
+ inp="original_self" + input_suffix,
1900
+ zeros_fn=zeros_fn,
1901
+ )
1902
+ unpacked_arguments += FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
1903
+ inp_name="original_self",
1904
+ inp="original_self" + input_suffix,
1905
+ )
1906
+ elif inplace and derivative.is_reusing_outplace_formula:
1907
+ # The gradient wasn't already cloned, do it if grad mode is enabled
1908
+ unpacked_arguments += (
1909
+ "self_t = GradMode::is_enabled() ? self_t.clone() : self_t;"
1910
+ )
1911
+
1912
+ if inplace:
1913
+ is_inplace_str = "true"
1914
+ else:
1915
+ is_inplace_str = "false"
1916
+
1917
+ requires_fw_grad = get_any_has_forward_grad_name(derivative.var_names)
1918
+
1919
+ if all(
1920
+ (isinstance(var_type, BaseType) and var_type.is_tensor_like())
1921
+ for var_type in derivative.var_types
1922
+ ):
1923
+ # Is there a way to get from BaseType to BaseCType
1924
+ if len(derivative.var_types) == 1:
1925
+ opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
1926
+ if not is_foreach:
1927
+ fw_grad_setters.append(
1928
+ FW_DERIVATIVE_SETTER_TENSOR.substitute(
1929
+ out_arg=res[0], is_inplace=is_inplace_str
1930
+ )
1931
+ )
1932
+ else:
1933
+ assert res[0] == ("result" if not inplace else "self")
1934
+ fw_grad_setters.append(
1935
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
1936
+ out_arg=res[0], is_inplace=is_inplace_str
1937
+ )
1938
+ )
1939
+ requires_fw_grad += f" && ({derivative.var_names[0]}.defined())"
1940
+ else:
1941
+ tuple_type = TupleCType(
1942
+ [BaseCType(tensorT)] * len(derivative.var_types)
1943
+ )
1944
+ opt_res_grad_type = OptionalCType(tuple_type).cpp_type()
1945
+ for idx, single_res in enumerate(res):
1946
+ fw_grad_setters.append(
1947
+ FW_DERIVATIVE_SETTER_MULTI_OUTPUT.substitute(
1948
+ idx=idx, all_res="_".join(res), out_arg=single_res
1949
+ )
1950
+ )
1951
+ elif (
1952
+ isinstance(derivative.var_types[0], ListType)
1953
+ and derivative.var_types[0].is_tensor_like()
1954
+ ):
1955
+ assert (
1956
+ len(derivative.var_types) == 1
1957
+ ), "Expected number of outputs to be 1 if function returns ListType"
1958
+ if not is_foreach:
1959
+ opt_res_grad_type = OptionalCType(
1960
+ VectorCType(BaseCType(tensorT))
1961
+ ).cpp_type()
1962
+ fw_grad_setters.append(
1963
+ FW_DERIVATIVE_SETTER_TENSOR_LIST.substitute(
1964
+ out_arg=res[0], is_inplace=is_inplace_str
1965
+ )
1966
+ )
1967
+ else:
1968
+ # TODO(crcrpar): Should this (= the foreach specific logic) be refactored somehow?
1969
+ # Only out-place foreach functions that have entries in `tools/autograd/derivatives.yaml`
1970
+ # can reach here.
1971
+ opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
1972
+ fw_grad_setters.append(
1973
+ FW_DERIVATIVE_SETTER_TENSOR_FOREACH.substitute(
1974
+ out_arg=res[0], is_inplace=is_inplace_str
1975
+ )
1976
+ )
1977
+ else:
1978
+ raise RuntimeError("Unsupported output type for forward derivative")
1979
+
1980
+ if not is_foreach:
1981
+ fw_grad_opt_definition = f"{opt_res_grad_type} {'_'.join(res)}_new_fw_grad_opt = c10::nullopt;"
1982
+ # View ops create fw_grad that already is a view of the base's fw_grad so just use that
1983
+ content.append(
1984
+ FW_DERIVATIVE_TEMPLATE.substitute(
1985
+ fw_grad_opt_definition=fw_grad_opt_definition,
1986
+ requires_fw_grad=requires_fw_grad,
1987
+ formula=derivative.formula,
1988
+ out_arg="_".join(res),
1989
+ unpacked_arguments=unpacked_arguments,
1990
+ )
1991
+ )
1992
+ else:
1993
+ # note(crcrpar): Assuming `self` is TensorList.
1994
+ fw_grad_opt_definition = (
1995
+ f"std::vector<{opt_res_grad_type}> {'_'.join(res)}_new_fw_grad_opts"
1996
+ "(self.size(), c10::nullopt);"
1997
+ )
1998
+ foreach_forward_grad_formula = derivative.formula
1999
+ _foreach_arg: Union[Argument, DifferentiableInput]
2000
+ if inplace:
2001
+ for _foreach_arg, _ref_arg in inplace_foreacharg2refarg.items():
2002
+ # note(crcrpar): Massage only Scalar and ArrayRef<Scalar> here.
2003
+ if not (
2004
+ is_tensor_type(_foreach_arg.type)
2005
+ or is_tensor_list_type(_foreach_arg.type)
2006
+ ):
2007
+ pattern = _foreach_arg.name
2008
+ if isinstance(_foreach_arg.type, ListType):
2009
+ pattern += "[i]"
2010
+ foreach_forward_grad_formula = (
2011
+ foreach_forward_grad_formula.replace(
2012
+ _ref_arg.name, pattern
2013
+ )
2014
+ )
2015
+ else:
2016
+ if (
2017
+ "result" in foreach_forward_grad_formula
2018
+ and "result[i]" not in foreach_forward_grad_formula
2019
+ ):
2020
+ foreach_forward_grad_formula = (
2021
+ foreach_forward_grad_formula.replace("result", "result[i]")
2022
+ )
2023
+
2024
+ content.append(
2025
+ FW_DERIVATIVE_FOREACH_TEMPLATE.substitute(
2026
+ fw_grad_opt_definition=fw_grad_opt_definition,
2027
+ vector_of_optional_tensor=f"{'_'.join(res)}_new_fw_grad_opts",
2028
+ any_has_forward_grad_for_current_index=" || ".join(
2029
+ get_any_has_forward_grad_name(derivative.var_names) + "[i]"
2030
+ for derivative in fw_derivatives
2031
+ ),
2032
+ formula=foreach_forward_grad_formula,
2033
+ unpacked_arguments=unpacked_arguments,
2034
+ )
2035
+ )
2036
+
2037
+ # Set all the grads at the end to avoid: https://github.com/pytorch/pytorch/issues/67367
2038
+ content.append("\n".join(fw_grad_setters))
2039
+ return content
2040
+
2041
+ def get_any_has_fw_grad_cond(derivative: Optional[ForwardDerivative]) -> str:
2042
+ #
2043
+ # Produces a condition string (e.g, "isFwGradDefined(grad_output) || isFwGradDefined(output)")
2044
+ #
2045
+ if derivative is None:
2046
+ # (1) If a derivative is NOT provided, cond will check fw_grad of ALL differentiable inputs
2047
+ # - Used in the out_fn case when we want to forbid fw derivatives
2048
+ # - Used in the case where the fw_derivative is not defined, but we want
2049
+ # To check if there is a decomposition registered for jvp
2050
+ to_check: List[str] = []
2051
+ for inp in list(
2052
+ mapMaybe(
2053
+ gen_differentiable_input,
2054
+ f.func.arguments.non_out + list(f.func.arguments.out), # type: ignore[operator]
2055
+ )
2056
+ ):
2057
+ if is_tensor_type(inp.type):
2058
+ to_check.append(
2059
+ FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name)
2060
+ )
2061
+ elif is_tensor_list_type(inp.type):
2062
+ to_check.append(
2063
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE.substitute(
2064
+ req_inp=inp.name
2065
+ )
2066
+ )
2067
+ else:
2068
+ raise RuntimeError(
2069
+ f'Unsupported input type for "{name}" when forbidding forward AD usage.'
2070
+ )
2071
+ return f'({" || ".join(to_check)})'
2072
+ else:
2073
+ # (2) If derivative is provided, use that information to determine which inputs
2074
+ # to check fw_grad for
2075
+ assert derivative.required_inputs_fw_grad is not None
2076
+
2077
+ if len(derivative.required_inputs_fw_grad) == 0:
2078
+ # Handle functions like stack
2079
+ # For these, we don't unpack anything and always call the user function
2080
+ if not (
2081
+ len(differentiable_inputs) == 1
2082
+ and is_tensor_list_type(differentiable_inputs[0].type)
2083
+ ):
2084
+ raise RuntimeError(
2085
+ f'No differentiable input to "{name}" is a differentiable Tensor (as the provided '
2086
+ "forward AD formula does not use any input tangent) even though a forward gradient "
2087
+ "formula has been defined for it. This case should only happen for function that "
2088
+ "take a single TensorList as input. All other cases are not supported right now."
2089
+ )
2090
+ any_has_fw_grad = "true"
2091
+ else:
2092
+ any_has_fw_grad = " || ".join(
2093
+ [
2094
+ (
2095
+ FW_DERIVATIVE_TENSORLIST_CHECK_TEMPLATE
2096
+ if is_tensor_list_type(inp.type)
2097
+ else FW_DERIVATIVE_CHECK_TEMPLATE
2098
+ ).substitute(req_inp=inp.name)
2099
+ for inp in differentiable_inputs
2100
+ if inp.name in derivative.required_inputs_fw_grad
2101
+ ]
2102
+ )
2103
+ any_has_fw_grad = f"({any_has_fw_grad})"
2104
+
2105
+ return any_has_fw_grad
2106
+
2107
+ def emit_forbid_fw_derivatives(is_out_fn: bool = False) -> str:
2108
+ if is_out_fn:
2109
+ msg = "because it is an out= function"
2110
+ else:
2111
+ msg = (
2112
+ "because it has not been implemented yet.\\nPlease file an issue "
2113
+ "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
2114
+ "so that we can prioritize its implementation."
2115
+ )
2116
+ cond = get_any_has_fw_grad_cond(derivative=None)
2117
+ return (
2118
+ FW_DERIVATIVE_FORBID_TEMPLATE.substitute(cond=cond, name=name, msg=msg)
2119
+ if cond != ""
2120
+ else ""
2121
+ )
2122
+
2123
+ body: List[str] = []
2124
+ unpack_args_stats, unpacked_bindings = unpack_args(f)
2125
+
2126
+ body.extend(unpack_args_stats)
2127
+ if requires_derivative:
2128
+ body.extend(emit_any_requires_grad())
2129
+ body.extend(emit_any_has_forward_grad())
2130
+ body.extend(emit_check_inplace())
2131
+ body.extend(emit_original_self_definition())
2132
+ body.extend(setup_derivative(differentiable_inputs))
2133
+ body.append(declare_returned_variables(f))
2134
+
2135
+ body.append(emit_call(f, unpacked_bindings, try_jit_decomposition))
2136
+ if requires_derivative:
2137
+ # set_flags has to appear after version_counter, because rebase_history
2138
+ # requires that the counter is incremented before it is called
2139
+ body.append(emit_history())
2140
+ body.extend(emit_check_if_in_complex_autograd_allowlist())
2141
+
2142
+ if is_out_fn:
2143
+ body.append(emit_forbid_fw_derivatives(is_out_fn=True))
2144
+ else:
2145
+ if requires_derivative and not try_jit_decomposition:
2146
+ if len(fw_derivatives) > 0:
2147
+ body.extend(emit_fw_derivatives())
2148
+ else:
2149
+ body.append(emit_forbid_fw_derivatives())
2150
+
2151
+ if requires_derivative:
2152
+ # Save only after the forward AD has been set up
2153
+ body.append(emit_save_outputs())
2154
+
2155
+ if str(f.func.name.name) in RESET_GRAD_ACCUMULATOR:
2156
+ # `inplace` implies that there is exactly one output named `self`,
2157
+ # so we can keep the generated code easy. If you need to
2158
+ # `reset_grad_accumulator` in an operator that's not `inplace`, you can
2159
+ # remove this assert but the code generation will get more elaborate
2160
+ assert inplace
2161
+ body.append("reset_grad_accumulator(self);")
2162
+ if not returns_void:
2163
+ body.append(f"return {get_return_value(f)};")
2164
+ return body
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/load_derivatives.py ADDED
@@ -0,0 +1,1011 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Parses derivatives.yaml into autograd functions
2
+ #
3
+ # Each autograd function is represented by `DifferentiabilityInfo` containing
4
+ # a list of `Derivative`. See `torchgen.api.autograd` for the data models.
5
+ import re
6
+ from collections import defaultdict
7
+ from typing import Any, Counter, Dict, List, Match, Optional, Sequence, Set, Tuple
8
+
9
+ import yaml
10
+ from torchgen.api import cpp
11
+
12
+ from torchgen.api.autograd import (
13
+ Derivative,
14
+ DifferentiabilityInfo,
15
+ ForwardDerivative,
16
+ SavedAttribute,
17
+ )
18
+ from torchgen.api.types import (
19
+ BaseCType,
20
+ Binding,
21
+ boolT,
22
+ CppSignatureGroup,
23
+ layoutT,
24
+ longT,
25
+ NamedCType,
26
+ OptionalCType,
27
+ scalarTypeT,
28
+ SpecialArgName,
29
+ stringT,
30
+ symIntArrayRefT,
31
+ SymIntT,
32
+ tensorGeometryT,
33
+ tensorOptionsT,
34
+ typeAndSizeT,
35
+ VectorCType,
36
+ )
37
+ from torchgen.context import with_native_function
38
+ from torchgen.gen import get_grouped_by_view_native_functions, parse_native_yaml
39
+ from torchgen.model import (
40
+ AUTOGRAD_KEYS,
41
+ FunctionSchema,
42
+ NativeFunction,
43
+ NativeFunctionsViewGroup,
44
+ OperatorName,
45
+ SchemaKind,
46
+ Type,
47
+ Variant,
48
+ )
49
+ from torchgen.utils import concatMap, IDENT_REGEX, split_name_params
50
+ from torchgen.yaml_utils import YamlLoader
51
+
52
+ _GLOBAL_LOAD_DERIVATIVE_CACHE = {}
53
+
54
+ _VALID_AUTOGRAD_KEYS = set(AUTOGRAD_KEYS)
55
+
56
+
57
+ # This function directly adds per-dispatchkey derivative entries for {view}_copy variants of each view op.
58
+ # Since every {view} and {view}_copy op shares the same derivative formula,
59
+ # we generate them here instead of duplicating them in the yaml.
60
+ # See Note [Codegen'd {view}_copy Operators]
61
+ def add_view_copy_derivatives(
62
+ infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
63
+ view_groups: List[NativeFunctionsViewGroup],
64
+ ) -> None:
65
+ # Get the map from each view op's name to its corresponding view group
66
+ view_name_to_group: Dict[OperatorName, NativeFunctionsViewGroup] = {
67
+ g.view.func.name: g for g in view_groups
68
+ }
69
+
70
+ view_infos = {}
71
+
72
+ for info_dispatch_dict in infos.values():
73
+ # maybe_view_group only needs to be calculated once per info_dispatch_dict
74
+ maybe_view_group = None
75
+ view_copy_differentiability_infos = {}
76
+ for dispatch_key, info in info_dispatch_dict.items():
77
+ maybe_view_group = view_name_to_group.get(info.func.func.name, None)
78
+ if maybe_view_group is not None and maybe_view_group.view_copy is not None:
79
+ view_copy_info = info.create_view_copy_from_view_derivative(
80
+ maybe_view_group
81
+ )
82
+ if view_copy_info is not None:
83
+ fn_schema = view_copy_info.func.func
84
+ view_copy_differentiability_infos[dispatch_key] = view_copy_info
85
+ else:
86
+ break
87
+ if len(view_copy_differentiability_infos) > 0:
88
+ assert fn_schema is not None
89
+ view_infos[fn_schema] = view_copy_differentiability_infos
90
+
91
+ infos.update(view_infos)
92
+
93
+
94
+ def load_derivatives(
95
+ derivatives_yaml_path: str, native_yaml_path: str, tags_yaml_path: str
96
+ ) -> Tuple[Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]], Set[str]]:
97
+ # Do some caching as this is a deterministic function
98
+ global _GLOBAL_LOAD_DERIVATIVE_CACHE
99
+ key = (derivatives_yaml_path, native_yaml_path)
100
+ if key not in _GLOBAL_LOAD_DERIVATIVE_CACHE:
101
+ with open(derivatives_yaml_path) as f:
102
+ definitions = yaml.load(f, Loader=YamlLoader)
103
+
104
+ funcs = parse_native_yaml(native_yaml_path, tags_yaml_path).native_functions
105
+ # From the parsed native functions, separate out the (generated) view_copy functions,
106
+ # so we can generate derivatives for them separately.
107
+ native_functions_with_view_groups = get_grouped_by_view_native_functions(funcs)
108
+ native_functions_without_view_copies = concatMap(
109
+ # We need to pull out the view_inplace ops too, since they might have their own derivative entries.
110
+ lambda g: [g]
111
+ if isinstance(g, NativeFunction)
112
+ else list(g.functions(include_copy=False)),
113
+ native_functions_with_view_groups,
114
+ )
115
+ view_groups = [
116
+ g
117
+ for g in native_functions_with_view_groups
118
+ if isinstance(g, NativeFunctionsViewGroup)
119
+ ]
120
+
121
+ # What's the difference between function schema v.s. signature?
122
+ # function schema is the complete declaration including mutability annotation / default value and etc.
123
+ # signature is the canonical schema for a group of functions (in-place/out/functional variants)
124
+ # that are semantically related.
125
+ functions_by_signature: Dict[
126
+ FunctionSchema, List[NativeFunction]
127
+ ] = defaultdict(list)
128
+ functions_by_schema: Dict[str, NativeFunction] = {}
129
+ for function in native_functions_without_view_copies:
130
+ functions_by_signature[function.func.signature()].append(function)
131
+ assert str(function.func) not in functions_by_schema
132
+ functions_by_schema[str(function.func)] = function
133
+
134
+ # Keep track of how many of which ops we've seen so we can
135
+ # disambiguate them with a numeric suffix.
136
+ op_counter = Counter[str]()
137
+
138
+ # infos is a dict that maps FunctionSchema -> a dict of per dispatch key DifferentiabilityInfos
139
+ # this is useful because in tools/autograd/gen_autograd.py:match_differentiability_info
140
+ # we ultimately need to categorize the DifferentiabilityInfos by FunctionSchema
141
+ infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]] = {}
142
+ used_dispatch_keys: Set[str] = set()
143
+ for defn_dict in definitions:
144
+ # Ensure that the old derivatives.yaml schema with no dispatch key can be loaded.
145
+ if "dispatch" not in defn_dict:
146
+ specification = defn_dict.pop("name")
147
+ output_differentiability = defn_dict.pop(
148
+ "output_differentiability", None
149
+ )
150
+ defn_dict = {"name": specification, "dispatch": {"Default": defn_dict}}
151
+ if output_differentiability:
152
+ defn_dict["output_differentiability"] = output_differentiability
153
+ name, per_dispatch_diffinfos = create_differentiability_info(
154
+ defn_dict,
155
+ functions_by_signature,
156
+ functions_by_schema,
157
+ op_counter,
158
+ used_dispatch_keys,
159
+ )
160
+ infos[name] = per_dispatch_diffinfos
161
+
162
+ add_view_copy_derivatives(infos, view_groups)
163
+
164
+ # cache both loaded infos as well a a set of all the dispatch_keys/aliases
165
+ # that appear in derivatives.yaml. used_dispatch_keys is useful for generating
166
+ # VariableType.cpp where we need a TORCH_LIBRARY_IMPL for every autograd dispatch key used
167
+ _GLOBAL_LOAD_DERIVATIVE_CACHE[key] = infos, used_dispatch_keys
168
+
169
+ return _GLOBAL_LOAD_DERIVATIVE_CACHE[key]
170
+
171
+
172
+ # TODO: Why is this going through CppSignatureGroup, that doesn't make sense...
173
+ @with_native_function
174
+ def cpp_arguments(f: NativeFunction) -> Sequence[Binding]:
175
+ sigs = CppSignatureGroup.from_native_function(f, method=False)
176
+ if sigs.symint_signature is not None:
177
+ return sigs.symint_signature.arguments()
178
+ else:
179
+ return sigs.signature.arguments()
180
+
181
+
182
+ def create_derivative(
183
+ f: NativeFunction,
184
+ formula: str,
185
+ var_names: Tuple[str, ...],
186
+ available_named_gradients: Sequence[str],
187
+ ) -> Derivative:
188
+ original_formula = formula
189
+ arguments: List[NamedCType] = [
190
+ a.nctype.remove_const_ref() for a in cpp_arguments(f)
191
+ ]
192
+
193
+ return_names = tuple(n if n != "self" else "result" for n in cpp.return_names(f))
194
+ return_types = tuple(
195
+ cpp.return_type(r, symint=True).remove_const_ref() for r in f.func.returns
196
+ )
197
+
198
+ named_returns = [
199
+ NamedCType(name, type) for name, type in zip(return_names, return_types)
200
+ ]
201
+
202
+ formula, saved_inputs = saved_variables(formula, arguments, var_names)
203
+ formula, saved_outputs = saved_variables(formula, named_returns, var_names)
204
+
205
+ used_named_gradients = {
206
+ name
207
+ for name in available_named_gradients
208
+ if re.search(IDENT_REGEX.format(name), formula)
209
+ }
210
+
211
+ # Check that the referenced derivatives in the formula are in bounds
212
+ for i in used_gradient_indices(formula):
213
+ if i >= len(f.func.returns):
214
+ raise RuntimeError(
215
+ f"Out of bounds grads access: derivative formula for {cpp.name(f.func)} "
216
+ f"used grads[{i}], but the forward only returns {len(f.func.returns)} outputs."
217
+ )
218
+
219
+ return Derivative(
220
+ formula=formula,
221
+ original_formula=original_formula,
222
+ var_names=var_names,
223
+ saved_inputs=saved_inputs,
224
+ saved_outputs=saved_outputs,
225
+ named_gradients=used_named_gradients,
226
+ )
227
+
228
+
229
+ def create_forward_derivative(
230
+ f: NativeFunction, formula: str, names: Tuple[str, ...]
231
+ ) -> ForwardDerivative:
232
+ var_names = names
233
+ var_types: Optional[Tuple[Type, ...]] = None
234
+ for r in f.func.returns:
235
+ if r.name in var_names:
236
+ if var_types is None:
237
+ var_types = tuple()
238
+ var_types = var_types + (r.type,)
239
+
240
+ # Handle default return names
241
+ if var_types is None:
242
+ if var_names == ("result",):
243
+ assert len(f.func.returns) == 1
244
+ var_types = (f.func.returns[0].type,)
245
+ else:
246
+ for var_name in var_names:
247
+ res = re.findall(r"^result(\d+)$", var_name)
248
+ if len(res) == 1:
249
+ if var_types is None:
250
+ var_types = tuple()
251
+ arg_idx = int(res[0])
252
+ var_types = var_types + (f.func.returns[arg_idx].type,)
253
+
254
+ assert var_types is not None, "No matching output for forward derivative definition"
255
+ return ForwardDerivative(
256
+ formula=formula,
257
+ var_names=var_names,
258
+ var_types=var_types,
259
+ required_inputs_fw_grad=None,
260
+ required_inputs_primal=None,
261
+ required_original_self_value=False,
262
+ is_reusing_outplace_formula=False,
263
+ )
264
+
265
+
266
+ def postprocess_forward_derivatives(
267
+ f: NativeFunction,
268
+ defn_name: str,
269
+ all_arg_names: List[str],
270
+ derivatives: List[Derivative],
271
+ forward_derivatives: List[ForwardDerivative],
272
+ args_with_derivatives: Sequence[Binding],
273
+ ) -> List[ForwardDerivative]:
274
+ def find_required_inputs(formula: str, postfix: str) -> Tuple[str, ...]:
275
+ is_foreach = f.func.name.name.base.startswith("_foreach_")
276
+ required_inputs = set()
277
+ for arg in args_with_derivatives:
278
+ if (
279
+ arg.type in ("at::TensorList", "const at::ITensorListRef &")
280
+ and not is_foreach
281
+ ):
282
+ # The functions taking TensorList handle everything internally
283
+ continue
284
+ arg_name = arg.name
285
+
286
+ found = re.search(IDENT_REGEX.format(arg_name), formula)
287
+ if found:
288
+ raise RuntimeError(
289
+ f"The forward formula for {defn_name} is using the base name of the {arg_name} "
290
+ f"argument which is ambiguous. You should use {arg_name}_p to access the primal "
291
+ f"value and {arg_name}_t to access the tangent."
292
+ )
293
+
294
+ found = re.search(IDENT_REGEX.format(arg_name + postfix), formula)
295
+ if found:
296
+ required_inputs.add(arg_name)
297
+
298
+ return tuple(required_inputs)
299
+
300
+ updated_derivatives: List[ForwardDerivative] = []
301
+
302
+ for defn in forward_derivatives:
303
+ formula = defn.formula
304
+ required_inputs_tangent = find_required_inputs(formula, "_t")
305
+ if formula == "auto_element_wise":
306
+ assert (
307
+ f.func.kind() != SchemaKind.inplace
308
+ ), f"Cannot use auto_element_wise with {f.func.name} because it is an in-place variant"
309
+ if (
310
+ (not len(args_with_derivatives) == 1)
311
+ or len(forward_derivatives) > 1
312
+ or len(forward_derivatives[0].var_names) > 1
313
+ ):
314
+ raise RuntimeError(
315
+ f"Derivative definition of {defn_name} in derivatives.yaml defines the "
316
+ "forward definition of gradient as element_wise but this only "
317
+ "works for functions with a single differentiable input and a "
318
+ "single differentiable output."
319
+ )
320
+ if not len(derivatives) == 1:
321
+ raise RuntimeError(
322
+ f"Derivative definition of {defn_name} in derivatives.yaml defines the "
323
+ "forward definition of gradient as element_wise but it does not "
324
+ "defines the gradient formula for its argument which is required."
325
+ )
326
+ # This transformation is based on the observation that for element-wise functions, the Jacobian
327
+ # matrix is diagonal and thus doing J * v is the same as (v^T J)^T (in practice, we ignore the transpositions)
328
+ # For the complex case, we use hermitian transpose and get (v.conj() J).conj()
329
+ # So here we are going to re-use the backward formula and replace two things:
330
+ # 1) all occurrences of "grad" with "foo_t.conj()", where foo is the name of the unique differentiable input.
331
+ # 2) all usage of an original input "foo" with its primal value "foo_p".
332
+ # 3) conjugate the final result
333
+ # For example, for abs, the backward formula is:
334
+ # grad * self.sgn()
335
+ # And this function generates a forward formula that is:
336
+ # (self_t.conj() * self_p.sgn()).conj()
337
+
338
+ backward_formula = derivatives[0].original_formula
339
+ input_name = args_with_derivatives[0].name
340
+
341
+ # Do replacement 1) of the grad
342
+ def repl(m: Any) -> str:
343
+ return f"{m.group(1)}{input_name}_t.conj(){m.group(2)}"
344
+
345
+ fw_formula = re.sub(IDENT_REGEX.format("grad"), repl, backward_formula)
346
+
347
+ # Do replacement 2) of the input variables
348
+ for arg in args_with_derivatives:
349
+ arg_name = arg.name
350
+
351
+ def repl(m: Any) -> str:
352
+ return f"{m.group(1)}{arg_name}_p{m.group(2)}"
353
+
354
+ fw_formula = re.sub(IDENT_REGEX.format(arg_name), repl, fw_formula)
355
+
356
+ # Do the final conjugate 3)
357
+ fw_formula = f"({fw_formula}).conj()"
358
+
359
+ # Since there is a single differentiable inputs and we necessarily need its tangent we can
360
+ # simply require all differentiable input's tangent.
361
+ required_inputs_tangent = tuple(all_arg_names)
362
+ formula = fw_formula
363
+ elif formula == "auto_linear":
364
+ if (
365
+ len(forward_derivatives) > 1
366
+ or len(forward_derivatives[0].var_names) > 1
367
+ ):
368
+ raise RuntimeError(
369
+ f"Derivative definition of {defn_name} in derivatives.yaml defines the "
370
+ "forward definition of gradient as linear but this only works "
371
+ "for functions with a single differentiable output."
372
+ )
373
+ # This transformation is based on the observation that linear functions can be written as:
374
+ # y = f(x) = A * x
375
+ # For some matrix A and the Jacobian of the function f is also A.
376
+ # So doing J * v = A * v = f(v).
377
+ # Hence to do the jvp, we simply need to evaluate the function at the point v instead of x.
378
+ # We do this by calling the forward again by replacing any occurrence of the differentiable
379
+ # input "foo" by it's tangent "foo_t".
380
+ # Note that multiple inputs are not a problem as long as the function is truly linear wrt to
381
+ # the vector where all the differentiable inputs are stacked.
382
+
383
+ diff_arg_names = [arg.name for arg in args_with_derivatives]
384
+ assert len(diff_arg_names) > 0
385
+
386
+ # Do replacement of input variables
387
+ new_args = []
388
+ for arg_name in all_arg_names:
389
+ if arg_name in diff_arg_names:
390
+ arg_name = arg_name + "_t"
391
+ new_args.append(arg_name)
392
+
393
+ # TODO we are trolling
394
+ if f.func.has_symint():
395
+ defn_name += "_symint"
396
+
397
+ # Call into the forward again. We need two cases here to handle both Tensor methods and at:: functions.
398
+ if Variant.function in f.variants:
399
+ fw_formula = f"at::{defn_name}({', '.join(new_args)})"
400
+ else:
401
+ assert Variant.method in f.variants
402
+ fw_formula = f"{new_args[0]}.{defn_name}({', '.join(new_args[1:])})"
403
+
404
+ # All of the input tangents are always used so all of them are required here.
405
+ required_inputs_tangent = tuple(diff_arg_names)
406
+ formula = fw_formula
407
+
408
+ # At this point, the formula is final and is not modified anymore.
409
+
410
+ # During forward formula, we use the primal instead of the input Tensors.
411
+ # This call inspects the formula to find for which input's primal are used.
412
+ required_inputs_primal = find_required_inputs(formula, "_p")
413
+
414
+ updated_derivatives.append(
415
+ ForwardDerivative(
416
+ formula=formula,
417
+ var_names=defn.var_names,
418
+ var_types=defn.var_types,
419
+ required_inputs_fw_grad=required_inputs_tangent,
420
+ required_inputs_primal=required_inputs_primal,
421
+ required_original_self_value=False,
422
+ is_reusing_outplace_formula=False,
423
+ )
424
+ )
425
+
426
+ return updated_derivatives
427
+
428
+
429
+ def is_forward_derivative_definition(
430
+ all_arg_names: List[str], names: Tuple[str, ...]
431
+ ) -> bool:
432
+ for name in names:
433
+ if name not in all_arg_names:
434
+ return True
435
+ else:
436
+ return False
437
+ raise RuntimeError("Expected `names` to be non-empty")
438
+
439
+
440
+ def create_differentiability_info(
441
+ defn_dict: Dict[Any, Any],
442
+ functions_by_signature: Dict[FunctionSchema, List[NativeFunction]],
443
+ functions_by_schema: Dict[str, NativeFunction],
444
+ op_counter: Counter[str],
445
+ used_dispatch_keys: Set[str],
446
+ ) -> Tuple[FunctionSchema, Dict[str, DifferentiabilityInfo]]:
447
+ """Processes a single entry `defn` in derivatives.yaml"""
448
+
449
+ def canonical_function(
450
+ functions: Sequence[NativeFunction], name: str
451
+ ) -> NativeFunction:
452
+ for f in functions:
453
+ if (
454
+ not f.func.is_functional_fn()
455
+ and not f.func.is_out_fn()
456
+ and name == str(f.func.name.name)
457
+ ):
458
+ return f
459
+ # some functions only have in-place variants
460
+ assert name + "_" == cpp.name(functions[0].func)
461
+ return functions[0]
462
+
463
+ def split_names(raw_names: str) -> Tuple[str, ...]:
464
+ """Given "foo, bar", return ["foo", "bar"]."""
465
+ return tuple(x.strip() for x in raw_names.split(","))
466
+
467
+ def check_grad_usage(defn_name: str, derivatives: Sequence[Derivative]) -> None:
468
+ """
469
+ Check for some subtle mistakes one might make when writing derivatives.
470
+ These mistakes will compile, but will be latent until a function is
471
+ used with double backwards.
472
+ """
473
+
474
+ uses_grad = False # true if any derivative uses "grad"
475
+ num_grads_uses = 0 # count of uses of "grads" or "grads[INDEX]"
476
+ uses_named_grads = False # true if any derivative uses "grad_{name}"
477
+ used_grads_indices: List[int] = [] # which indices of grads are used
478
+ for d in derivatives:
479
+ formula = d.formula
480
+ uses_grad = uses_grad or bool(
481
+ re.findall(IDENT_REGEX.format("grad"), formula)
482
+ )
483
+ num_grads_uses += len(re.findall(IDENT_REGEX.format("grads"), formula))
484
+ uses_named_grads = uses_named_grads or bool(d.named_gradients)
485
+ used_grads_indices.extend(used_gradient_indices(formula))
486
+ # This is a basic sanity check: the number of places we see
487
+ # "grads" should be no fewer than the number of indices we see
488
+ # inside "grads". They may not be equal because we may use
489
+ # "grads" without an index.
490
+ assert num_grads_uses >= len(used_grads_indices)
491
+ # Thus if the number is equal, every use of grads is also
492
+ # indexed.
493
+ only_used_grads_indices = num_grads_uses == len(used_grads_indices)
494
+
495
+ if uses_grad and num_grads_uses > 0:
496
+ raise RuntimeError(
497
+ f"Derivative definition of {defn_name} in derivatives.yaml illegally "
498
+ "mixes use of 'grad' and 'grads'. Consider replacing "
499
+ "occurrences of 'grad' with 'grads[0]'"
500
+ )
501
+
502
+ if only_used_grads_indices and set(used_grads_indices) == {0}:
503
+ raise RuntimeError(
504
+ f"Derivative definition of {defn_name} in derivatives.yaml solely "
505
+ "refers to 'grads[0]'. If the first output is indeed the "
506
+ "only differentiable output, replace 'grads[0]' with 'grad'; "
507
+ "otherwise, there is a likely error in your derivatives "
508
+ "declaration."
509
+ )
510
+
511
+ if uses_named_grads and (uses_grad or num_grads_uses > 0):
512
+ raise RuntimeError(
513
+ f"Derivative definition of {defn_name} in derivatives.yaml illegally "
514
+ 'mixes use of "grad_RETURN_NAME" and "grad" or "grads[x]". Use '
515
+ "only one method for identifying gradients."
516
+ )
517
+
518
+ @with_native_function
519
+ def set_up_derivatives(
520
+ f: NativeFunction,
521
+ ) -> Tuple[
522
+ Sequence[Derivative],
523
+ Sequence[ForwardDerivative],
524
+ Sequence[Binding],
525
+ Sequence[str],
526
+ Sequence[str],
527
+ ]:
528
+ # Set up the derivative information
529
+ derivatives: List[Derivative] = []
530
+ forward_derivatives: List[ForwardDerivative] = []
531
+ non_differentiable_arg_names: List[str] = []
532
+ args_with_derivatives_set: Set[str] = set()
533
+
534
+ all_arg_names = [a.name for a in cpp_arguments(f)]
535
+ all_ret_names = [
536
+ r.name for r in f.func.returns
537
+ ] # only used for the assert below
538
+ # output_differentiability is captured from the enclosed
539
+ # scope. Don't modify it.
540
+ #
541
+ # If it is not present, then no output is explicitly
542
+ # undifferentiable.
543
+ #
544
+ # It may be present and shorter than the length of return
545
+ # values. If that's the case, any return value that does not
546
+ # have a corresponding entry is considered not differentiable.
547
+ differentiability = output_differentiability or [True] * len(f.func.returns)
548
+ # A return is available as a named gradient ...
549
+ available_named_gradients = [
550
+ f"grad_{ret.name}"
551
+ for ret, differentiable in zip(f.func.returns, differentiability)
552
+ # if it has not been explicitly made undifferentiable
553
+ if differentiable
554
+ # and if it has a name
555
+ and ret.name is not None
556
+ # and if its type is differentiable
557
+ and ret.type.is_tensor_like()
558
+ ]
559
+
560
+ for raw_names in sorted(defn.keys()):
561
+ formula = defn[raw_names]
562
+ names = split_names(raw_names)
563
+
564
+ for name in names:
565
+ assert not (name in all_arg_names and name in all_ret_names), (
566
+ f"While processing the derivative formula for '{f.func.name}' wrt '{name}', "
567
+ f"expected '{name}' to not be both an input arg and named return. "
568
+ )
569
+
570
+ if is_forward_derivative_definition(all_arg_names, names):
571
+ forward_derivatives.append(create_forward_derivative(f, formula, names))
572
+ else:
573
+ if formula.lower().strip() == "non_differentiable":
574
+ non_differentiable_arg_names += names
575
+ else:
576
+ derivative = create_derivative(
577
+ f, formula, names, available_named_gradients
578
+ )
579
+ derivatives.append(derivative)
580
+ args_with_derivatives_set |= set(names)
581
+
582
+ overlap = args_with_derivatives_set.intersection(non_differentiable_arg_names)
583
+ if overlap:
584
+ raise RuntimeError(
585
+ f"derivatives definition for {defn} have overlapped non_differentiable "
586
+ f"and differentiable variables: {overlap}"
587
+ )
588
+
589
+ # Next, let us determine the list of inputs in order.
590
+ # TODO: do we need eagerly calculate and save it here? Can it be derived
591
+ # from NativeFunction and `derivatives` on callsites instead?
592
+ args_with_derivatives = [
593
+ a for a in cpp_arguments(f) if a.name in args_with_derivatives_set
594
+ ]
595
+
596
+ # Postprocess forward derivatives definitions now that we know the differentiable arguments
597
+ forward_derivatives = postprocess_forward_derivatives(
598
+ f,
599
+ defn_name,
600
+ all_arg_names,
601
+ derivatives,
602
+ forward_derivatives,
603
+ args_with_derivatives,
604
+ )
605
+
606
+ # Test to see if the use of 'grads' makes sense.
607
+ check_grad_usage(defn_name, derivatives)
608
+
609
+ return (
610
+ derivatives,
611
+ forward_derivatives,
612
+ args_with_derivatives,
613
+ non_differentiable_arg_names,
614
+ available_named_gradients,
615
+ )
616
+
617
+ # NB: Removes 'name' from defn dictionary
618
+ specification = defn_dict.pop("name")
619
+ defn_name, _ = split_name_params(specification)
620
+ # NB: Removes 'output_differentiability' from defn dictionary
621
+ # `None` means all differentiable.
622
+ output_differentiability = defn_dict.pop("output_differentiability", None)
623
+ output_differentiability_conditions = None
624
+ if output_differentiability and any(
625
+ isinstance(diff, str) for diff in output_differentiability
626
+ ):
627
+ if len(output_differentiability) != 1:
628
+ raise RuntimeError(
629
+ f"Not supported: for {specification},"
630
+ f"output_differentiability must either be "
631
+ f"List[bool] or a List[str] where each str is a "
632
+ f"condition. In the case where it is a condition, "
633
+ f"we only support single-output functions. "
634
+ f"Please file us an issue. "
635
+ )
636
+ output_differentiability_conditions = output_differentiability
637
+ output_differentiability = [True]
638
+
639
+ schema_function = functions_by_schema.get(specification)
640
+ if not schema_function:
641
+ avail = "\n".join(
642
+ k for k, v in functions_by_schema.items() if cpp.name(v.func) == defn_name
643
+ )
644
+ raise RuntimeError(
645
+ f"could not find ATen function for schema: {specification} "
646
+ f". Available signatures:\n{avail}"
647
+ )
648
+
649
+ # now map this to the legacy schema; this isn't technically necessary, but we'd need some logic here
650
+ # to map in-place schemas to the out-of-place variants.
651
+ # TODO: maybe the logic to handle the legacy schema is no longer necessary?
652
+ signature = schema_function.func.signature()
653
+ functions = functions_by_signature[signature]
654
+ if len(functions) == 0:
655
+ avail = "\n".join(
656
+ str(k)
657
+ for k, v in functions_by_signature.items()
658
+ if cpp.name(k) == defn_name
659
+ )
660
+ raise RuntimeError(
661
+ f"could not find ATen function for legacy signature: {signature} "
662
+ f"corresponding to schema {specification}. Please report a bug to PyTorch. "
663
+ f"Available signatures:\n{avail}"
664
+ )
665
+
666
+ canonical = canonical_function(functions, defn_name)
667
+ if "grad_input_mask" in (a.name for a in cpp_arguments(canonical)):
668
+ raise RuntimeError(
669
+ f"Schema for {defn_name} has an argument named grad_input_mask, "
670
+ "but this name would be shadowed by our codegen. "
671
+ "Please use a different name in native_functions.yaml."
672
+ )
673
+
674
+ if "result" in (a.name for a in cpp_arguments(canonical)):
675
+ raise RuntimeError(
676
+ f"Schema for {defn_name} has an argument named result, "
677
+ "but this is only allowed for outputs."
678
+ "Please use a different name in native_functions.yaml."
679
+ )
680
+
681
+ diffinfo_dict = {}
682
+ for key, defn in defn_dict["dispatch"].items():
683
+ if key != "Default" and key not in _VALID_AUTOGRAD_KEYS:
684
+ raise RuntimeError(
685
+ f"Invalid dispatch key {key} in derivatives.yaml for {specification},"
686
+ f" expected key to be one of {_VALID_AUTOGRAD_KEYS}"
687
+ )
688
+ if key not in used_dispatch_keys:
689
+ used_dispatch_keys.add(key)
690
+
691
+ (
692
+ derivatives,
693
+ forward_derivatives,
694
+ args_with_derivatives,
695
+ non_differentiable_arg_names,
696
+ available_named_gradients,
697
+ ) = set_up_derivatives(canonical)
698
+
699
+ used_named_gradients: Set[str] = set()
700
+ for d in derivatives:
701
+ used_named_gradients |= d.named_gradients
702
+
703
+ # only assign an op name if we are actually going to calculate a derivative
704
+ op = None
705
+ if args_with_derivatives:
706
+ op_prefix = _create_op_prefix(defn_name)
707
+ if key != "Default":
708
+ op_prefix = op_prefix + key
709
+ op = f"{op_prefix}{op_counter[op_prefix]}"
710
+ op_counter[op_prefix] += 1
711
+
712
+ diffinfo_dict[key] = DifferentiabilityInfo(
713
+ name=defn_name,
714
+ func=canonical,
715
+ op=op,
716
+ derivatives=derivatives,
717
+ forward_derivatives=forward_derivatives,
718
+ all_saved_inputs=dedup_vars(
719
+ [v for d in derivatives for v in d.saved_inputs]
720
+ ),
721
+ all_saved_outputs=dedup_vars(
722
+ [v for d in derivatives for v in d.saved_outputs]
723
+ ),
724
+ available_named_gradients=available_named_gradients,
725
+ used_named_gradients=used_named_gradients,
726
+ args_with_derivatives=args_with_derivatives,
727
+ non_differentiable_arg_names=non_differentiable_arg_names,
728
+ output_differentiability=output_differentiability,
729
+ output_differentiability_conditions=output_differentiability_conditions,
730
+ )
731
+
732
+ return canonical.func, diffinfo_dict
733
+
734
+
735
+ GRAD_INDEX_REGEX = r"(?:^|\W)grads\[(\d+)\]"
736
+
737
+
738
+ def used_gradient_indices(formula: str) -> List[int]:
739
+ """Determine a list of gradient indices (the i in grads[i]) that
740
+ are used by the formula.
741
+
742
+ >>> used_gradient_indices("foo(grads[0], grads[1])")
743
+ [0, 1]
744
+ """
745
+ return [int(i) for i in re.findall(GRAD_INDEX_REGEX, formula)]
746
+
747
+
748
+ def saved_variables(
749
+ formula: str,
750
+ nctypes: List[NamedCType],
751
+ var_names: Tuple[str, ...],
752
+ ) -> Tuple[str, Tuple[SavedAttribute, ...]]:
753
+ def stride_expr(name: str) -> str:
754
+ assert var_names == (name,), (
755
+ 'Replacement for ".strides()" is currently only supported for single derivatives of the same tensor '
756
+ 'that ".strides()" is being called on.'
757
+ )
758
+ return f'strides_or_error({name}, "{name}")'
759
+
760
+ REPLACEMENTS: List[Tuple[str, Dict[str, Any]]] = [
761
+ # replace self.sym_sizes() with self_sym_sizes
762
+ (
763
+ r"{}.sym_sizes\(\)",
764
+ {
765
+ "suffix": "_sym_sizes",
766
+ "nctype": lambda name: NamedCType(name, BaseCType(symIntArrayRefT)),
767
+ },
768
+ ),
769
+ # replace self->sym_sizes() with self_sym_sizes_opt
770
+ (
771
+ r"{}->sym_sizes\(\)",
772
+ {
773
+ "suffix": "_sym_sizes_opt",
774
+ "nctype": lambda name: NamedCType(
775
+ name, OptionalCType(BaseCType(symIntArrayRefT))
776
+ ),
777
+ "expr": lambda name: f"{name}.has_value() ? c10::optional<c10::SymIntArrayRef>({name}->sym_sizes()) : c10::nullopt",
778
+ },
779
+ ),
780
+ # replace self.sym_blocksize() with self_sym_blocksize_opt
781
+ (
782
+ r"{}.sym_blocksize\(\)",
783
+ {
784
+ "suffix": "_self_sym_blocksize_opt",
785
+ "nctype": lambda name: NamedCType(
786
+ name, OptionalCType(BaseCType(symIntArrayRefT))
787
+ ),
788
+ "expr": lambda name: f"at::sparse_csr::getSymIntBlockSize({name})",
789
+ },
790
+ ),
791
+ # replace self.options() with self_options
792
+ (
793
+ r"{}.options\(\)",
794
+ {
795
+ "suffix": "_options",
796
+ "nctype": lambda name: NamedCType(name, BaseCType(tensorOptionsT)),
797
+ },
798
+ ),
799
+ # replace zeros_like(self) with self_info
800
+ (
801
+ r"zeros_like\({}\)",
802
+ {
803
+ "suffix": "_info",
804
+ "nctype": lambda name: NamedCType(name, BaseCType(typeAndSizeT)),
805
+ "expr": lambda name: name, # at save-time
806
+ "res": lambda name: name + "_info.zeros()", # at eval-time
807
+ },
808
+ ),
809
+ # replace self.sym_size(2) with self_sym_size_2
810
+ (
811
+ r"{}.sym_size\((-?\w+)\)",
812
+ {
813
+ "suffix": lambda m: f"_sym_argsize_{m.groups()[0].replace('-', 'minus_')}",
814
+ "nctype": lambda name: NamedCType(name, BaseCType(SymIntT)),
815
+ },
816
+ ),
817
+ # replace self.numel() with self_numel
818
+ (
819
+ r"{}.numel\(\)",
820
+ {
821
+ "suffix": "_numel",
822
+ "nctype": lambda name: NamedCType(name, BaseCType(longT)),
823
+ },
824
+ ),
825
+ # replace self.sym_numel() with self_sym_numel
826
+ (
827
+ r"{}.sym_numel\(\)",
828
+ {
829
+ "suffix": "_sym_numel",
830
+ "nctype": lambda name: NamedCType(name, BaseCType(SymIntT)),
831
+ },
832
+ ),
833
+ # replace to_args_sizes(self) with self_args_sizes
834
+ (
835
+ r"to_args_sizes\({}\)",
836
+ {
837
+ "suffix": "_args_sizes",
838
+ "nctype": lambda name: NamedCType(
839
+ name, VectorCType(VectorCType(BaseCType(longT)))
840
+ ),
841
+ },
842
+ ),
843
+ # replace to_args_sizes_symint(self) with self_args_sizes
844
+ (
845
+ r"to_args_sizes_symint\({}\)",
846
+ {
847
+ "suffix": "_args_sizes_symint",
848
+ "nctype": lambda name: NamedCType(
849
+ name, VectorCType(VectorCType(BaseCType(SymIntT)))
850
+ ),
851
+ },
852
+ ),
853
+ # replace to_args_scalartypes(self) with self_args_scalartypes
854
+ (
855
+ r"to_args_scalartypes\({}\)",
856
+ {
857
+ "suffix": "_args_scalartypes",
858
+ "nctype": lambda name: NamedCType(
859
+ name, VectorCType(BaseCType(scalarTypeT))
860
+ ),
861
+ },
862
+ ),
863
+ # replace TensorGeometry(self) with self_geometry
864
+ (
865
+ r"TensorGeometry\({}\)",
866
+ {
867
+ "suffix": "_geometry",
868
+ "nctype": lambda name: NamedCType(name, BaseCType(tensorGeometryT)),
869
+ },
870
+ ),
871
+ (
872
+ r"{}.scalar_type\(\)",
873
+ {
874
+ "suffix": "_scalar_type",
875
+ "nctype": lambda name: NamedCType(name, BaseCType(scalarTypeT)),
876
+ },
877
+ ),
878
+ # replace self.dim() with self_dim
879
+ (
880
+ r"{}.dim\(\)",
881
+ {
882
+ "suffix": "_dim",
883
+ "nctype": lambda name: NamedCType(name, BaseCType(longT)),
884
+ },
885
+ ),
886
+ # replace self.sym_strides() with self_sym_strides
887
+ (
888
+ r"{}.sym_strides\(\)",
889
+ {
890
+ "suffix": "_sym_strides",
891
+ "nctype": lambda name: NamedCType(name, BaseCType(symIntArrayRefT)),
892
+ "expr": stride_expr,
893
+ },
894
+ ),
895
+ # replace self.layout() with self_layout
896
+ (
897
+ r"{}.layout\(\)",
898
+ {
899
+ "suffix": "_layout",
900
+ "nctype": lambda name: NamedCType(name, BaseCType(layoutT)),
901
+ },
902
+ ),
903
+ # replace self.is_conj() with self_conjugate
904
+ (
905
+ r"{}.is_conj\(\)",
906
+ {
907
+ "suffix": "_conjugate",
908
+ "nctype": lambda name: NamedCType(name, BaseCType(boolT)),
909
+ },
910
+ ),
911
+ ]
912
+
913
+ # find which arguments need to be saved
914
+ saved: List[SavedAttribute] = []
915
+
916
+ if ".sizes()" in formula or "->sizes()" in formula:
917
+ raise RuntimeError(
918
+ ".sizes() is not supported in derivative formulas. Instead, please use the SymInt version,"
919
+ + f".sym_sizes(), which returned a c10::SymIntArrayRef. formula={formula}"
920
+ )
921
+ if re.search(r"\.size\([-]?\d+\)", formula) or re.search(
922
+ r"->size\([-]?\d+\)", formula
923
+ ):
924
+ raise RuntimeError(
925
+ ".size(int) is not supported in derivative formulas. Instead, please use the SymInt version,"
926
+ + f".sym_size(int), which returned a c10::SymIntArrayRef. formula={formula}"
927
+ )
928
+ if ".strides()" in formula or "->strides()" in formula:
929
+ raise RuntimeError(
930
+ ".strides() is not supported in derivative formulas. Instead, please use the SymInt version,"
931
+ + f".sym_strides(), which returned a c10::SymIntArrayRef. formula={formula}"
932
+ )
933
+ for nctype in nctypes:
934
+ name = (
935
+ nctype.name.name if isinstance(nctype.name, SpecialArgName) else nctype.name
936
+ )
937
+ # First search the formula for expressions which can be evaluated
938
+ # when the autograd Function is created to avoid saving variables
939
+ for regex, info in REPLACEMENTS:
940
+
941
+ def repl(m: Match[str]) -> str:
942
+ suffix: str = (
943
+ info["suffix"](m) if callable(info["suffix"]) else info["suffix"]
944
+ )
945
+ expr: str = info["expr"](name) if "expr" in info else m.group(0)
946
+ saved.append(
947
+ SavedAttribute(
948
+ nctype=info["nctype"](name + suffix),
949
+ expr=expr,
950
+ )
951
+ )
952
+ if "res" in info:
953
+ replacement: str = info["res"](name)
954
+ return replacement
955
+ return name + suffix
956
+
957
+ formula = re.sub(regex.format(name), repl, formula)
958
+
959
+ # c10::optional<std::string> types stored in Backward nodes must be
960
+ # converted to c10::optional<c10::string_view> before being passed into
961
+ # the backward function
962
+ if nctype.type == OptionalCType(BaseCType(stringT)):
963
+ formula = re.sub(
964
+ rf"\b{name}\b",
965
+ f"{name}.has_value() ? c10::optional<c10::string_view>({name}.value()) : c10::nullopt",
966
+ formula,
967
+ )
968
+
969
+ # Find any variables which remain in the formula and save them
970
+ if re.search(IDENT_REGEX.format(name), formula):
971
+ saved.append(
972
+ SavedAttribute(
973
+ nctype=nctype,
974
+ expr=name,
975
+ )
976
+ )
977
+
978
+ return formula, tuple(saved)
979
+
980
+
981
+ def _create_op_prefix(name: str) -> str:
982
+ """Takes a native function name converts to a op prefix name.
983
+
984
+ Note that the "name" parameter must be the native function name
985
+ without the optional variant suffix, so "add" instead of
986
+ "add.out".
987
+
988
+ OP names correspond to classes, hence the change to title case.
989
+
990
+ Example::
991
+ >>> _create_op_prefix('add')
992
+ 'AddBackward'
993
+ """
994
+ camel_case = "".join([p.title() for p in name.split("_")])
995
+ return (camel_case + "Backward").replace("ForwardBackward", "Backward")
996
+
997
+
998
+ def dedup_vars(vars: Sequence[SavedAttribute]) -> Sequence[SavedAttribute]:
999
+ seen: Set[str] = set()
1000
+ saved: List[SavedAttribute] = []
1001
+ for var in vars:
1002
+ name = (
1003
+ var.nctype.name.name
1004
+ if isinstance(var.nctype.name, SpecialArgName)
1005
+ else var.nctype.name
1006
+ )
1007
+ if name in seen:
1008
+ continue
1009
+ seen.add(name)
1010
+ saved.append(var)
1011
+ return saved
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/ADInplaceOrViewType.cpp ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2
+ #include "torch/csrc/autograd/VariableTypeUtils.h"
3
+
4
+ #include <torch/library.h>
5
+
6
+ // ${generated_comment}
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Operators.h>
10
+ #else
11
+ $ops_headers
12
+ #endif
13
+
14
+ using namespace at;
15
+ using torch::autograd::CreationMeta;
16
+ using torch::autograd::as_view;
17
+ using torch::autograd::increment_version;
18
+
19
+ namespace torch {
20
+
21
+ namespace ADInplaceOrView {
22
+
23
+ namespace {
24
+ ${inplace_or_view_method_definitions}
25
+ } // namespace
26
+ } // namespace ADInplaceOrView
27
+
28
+ namespace {
29
+
30
+ TORCH_LIBRARY_IMPL(aten, ADInplaceOrView, m) {
31
+ ${inplace_or_view_wrapper_registrations};
32
+ }
33
+
34
+ } // namespace
35
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.cpp ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "torch/csrc/autograd/FunctionsManual.h"
2
+ #include "torch/csrc/dynamo/compiled_autograd.h"
3
+
4
+ // ${generated_comment}
5
+
6
+ // The manual function definitions that used to be here are now in torch/csrc/autograd/FunctionsManual.cpp
7
+ // This speeds up re-compilation and allow to share these implementations so that they can be
8
+ // used for forward mode AD formulas as well.
9
+
10
+ using namespace torch::autograd::generated::details;
11
+ using at::Tensor;
12
+ using at::Scalar;
13
+ using at::IntArrayRef;
14
+ using at::TensorList;
15
+
16
+ namespace torch::autograd::generated {
17
+
18
+ ${autograd_function_definitions}
19
+
20
+ } // namespace torch::autograd::generated
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/Functions.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/ATen.h>
6
+ #include <ATen/core/functional.h>
7
+ #include <ATen/TensorGeometry.h>
8
+
9
+ #include "torch/csrc/autograd/function.h"
10
+ #include "torch/csrc/autograd/variable.h"
11
+ #include "torch/csrc/autograd/saved_variable.h"
12
+ #include <torch/csrc/Export.h>
13
+
14
+ #include <c10/core/SymIntArrayRef.h>
15
+
16
+ namespace torch { namespace autograd { namespace generated {
17
+
18
+ using at::Scalar;
19
+ using at::Tensor;
20
+ using at::IntArrayRef;
21
+ using at::ArrayRef;
22
+ using at::Type;
23
+ using at::TensorGeometry;
24
+ using at::ScalarType;
25
+ using c10::optional;
26
+ using c10::fmap;
27
+
28
+ inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
29
+ // NB: we must explicitly do the conversion in the lambda, otherwise template
30
+ // deduction will give a Tensor of Variable which is not convertible
31
+ return fmap(xs, [&saved_for](const SavedVariable& x) {
32
+ // TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring.
33
+ return static_cast<Tensor>(x.unpack(saved_for));
34
+ });
35
+ }
36
+
37
+ inline c10::List<c10::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
38
+ torch::List<c10::optional<Tensor>> result;
39
+ result.reserve(xs.size());
40
+ for (const SavedVariable& v : xs) {
41
+ auto var = v.unpack(saved_for);
42
+ result.push_back(var.defined() ? c10::optional<Tensor>(var) : c10::nullopt);
43
+ }
44
+ return result;
45
+ }
46
+
47
+ using torch::autograd::TypeAndSize;
48
+
49
+ ${autograd_function_declarations}
50
+
51
+ }}} // namespace torch::autograd::generated
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/TraceType.cpp ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2
+ #include "torch/csrc/jit/frontend/tracer.h"
3
+
4
+ #include <torch/library.h>
5
+
6
+ #include "torch/csrc/autograd/function.h"
7
+
8
+ #include "ATen/quantized/Quantizer.h"
9
+
10
+ // ${generated_comment}
11
+
12
+ // See the `Tracer` section in `torch/csrc/jit/OVERVIEW.md`.
13
+ // NOTE See [Sharded File] comment in VariableType
14
+
15
+ #ifndef AT_PER_OPERATOR_HEADERS
16
+ #include <ATen/Operators.h>
17
+ #else
18
+ $ops_headers
19
+ #endif
20
+
21
+ using namespace at;
22
+
23
+ namespace torch {
24
+
25
+ namespace TraceType {
26
+
27
+ namespace {
28
+ ${trace_method_definitions}
29
+ } // namespace
30
+ } // namespace TraceType
31
+
32
+ namespace {
33
+
34
+ TORCH_LIBRARY_IMPL(aten, Tracer, m) {
35
+ ${trace_wrapper_registrations};
36
+ }
37
+
38
+ } // namespace
39
+
40
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torchgen/packaged/autograd/templates/VariableType.cpp ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "torch/csrc/autograd/VariableTypeUtils.h"
2
+ #include "torch/csrc/autograd/generated/VariableType.h"
3
+ #include "torch/csrc/autograd/FunctionsManual.h"
4
+
5
+ #include <ATen/RedispatchFunctions.h>
6
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
7
+ #include <ATen/core/TorchDispatchUtils.h>
8
+ #include <torch/library.h>
9
+
10
+ #include <ATen/SparseCsrTensorUtils.h>
11
+
12
+
13
+ // ${generated_comment}
14
+
15
+ // NOTE [Sharded File]: on this file's split-into-shards state
16
+ //
17
+ // Back in the good old days, VariableType.cpp was generated as one
18
+ // file with every function in it, and everything was great and
19
+ // simple.
20
+ //
21
+ // However, this file was also very large (over 36,000 lines), and
22
+ // compiling it was very slow, and in fact was a significant
23
+ // bottleneck for incremental rebuilds. To address this, we now
24
+ // generate the file split across multiple shards, named
25
+ // VariableType_0.cpp and so on, which can be compiled in parallel.
26
+ //
27
+ // For ease of inspection and debugging, so that it's not necessary to
28
+ // go rooting around in multiple files, we also generate all the
29
+ // functions together in VariableTypeEverything.cpp. This generated
30
+ // file is only for convenience; it's not actually used in the
31
+ // build. If the file you're looking at now is one of the shards, you
32
+ // may want to switch over to the Everything variant to make you
33
+ // grepping smoother.
34
+
35
+ using namespace at;
36
+ using namespace torch::autograd::generated;
37
+ using namespace torch::autograd::generated::details;
38
+
39
+
40
+ namespace torch::autograd {
41
+
42
+ namespace VariableType {
43
+ namespace{
44
+ C10_UNUSED void reset_grad_accumulator(Variable & self) {
45
+ AutogradMeta* meta = torch::autograd::impl::get_autograd_meta(self);
46
+ if (meta != nullptr) {
47
+ meta->grad_accumulator_.reset();
48
+ }
49
+ }
50
+ }
51
+
52
+ namespace {
53
+
54
+
55
+ ${type_derived_method_definitions}
56
+ }
57
+ }
58
+
59
+ namespace {
60
+
61
+ ${wrapper_registrations}
62
+
63
+ }
64
+
65
+ } // namespace torch::autograd