applied-ai-018 commited on
Commit
9f16638
·
verified ·
1 Parent(s): c6a9a4b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h +631 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h +441 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h +687 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_native.h +25 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h +30 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h +23 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h +24 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h +39 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h +101 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h +24 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h +26 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_native.h +22 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h +22 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h +44 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h +25 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_native.h +22 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h +23 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h +28 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h +22 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view_ops.h +39 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h +28 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_native.h +21 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h +25 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_ops.h +28 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors.h +30 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_ops.h +50 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts.h +39 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h +24 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta_dispatch.h +26 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h +39 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h +23 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta_dispatch.h +26 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_native.h +22 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h +22 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cuda_dispatch.h +26 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta_dispatch.h +26 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h +23 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h +28 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h +39 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h +23 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h +26 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h +39 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h +27 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h +24 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h +22 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_compositeexplicitautograd_dispatch.h +30 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward_cuda_dispatch.h +28 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h +23 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h ADDED
@@ -0,0 +1,631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue_to.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <functional>
8
+ #include <initializer_list>
9
+ #include <iterator>
10
+ #include <type_traits>
11
+
12
+ /*
13
+ * [Note: IListRef]
14
+ * Wrapper around different API containers (e.g. boxed and unboxed).
15
+ *
16
+ * What is it?
17
+ * ===========
18
+ * It is a tagged union of both boxed and unboxed API containers.
19
+ * Working implementations:
20
+ *
21
+ * - `IListRef<at::Tensor>`
22
+ * - `IListRef<at::OptionalTensorRef>`
23
+ *
24
+ * Note that `IListRef` is a view type. Meaning that it won't own the
25
+ * tensors it holds. It's intended to be used only as argument parameters.
26
+ * Specifically, where these 2 worlds overlap.
27
+ *
28
+ * What is this for?
29
+ * =================
30
+ * Historically, PyTorch has maintained 2 different APIs: the unboxed
31
+ * (called from C++ API and Python eager mode) and boxed APIs (called
32
+ * from the TorchScript JIT, mobile interpreter, and boxed fallbacks).
33
+ *
34
+ * Calling unboxed kernels from the boxed "world" and vice-versa may
35
+ * result in non-negligible overhead. Lists are one of those types:
36
+ *
37
+ * - Boxed world: `c10::List`
38
+ * - Unboxed world: `c10::ArrayRef`
39
+ *
40
+ * In this context, `c10::IListRef` solves this problem by wrapping those
41
+ * 2 container types, so that we don't need to convert from one to
42
+ * the other.
43
+ *
44
+ * (see https://github.com/pytorch/pytorch/issues/66328)
45
+ *
46
+ * What does it do?
47
+ * ================
48
+ * This container wraps around the different tagged containers
49
+ * (currently, only boxed and unboxed), without incurring in extra
50
+ * overhead for converting from one to another. It does so while
51
+ * exposing usual container methods, which dispatch to corresponding
52
+ * implementations.
53
+ *
54
+ * While it works with different container types, it introduces
55
+ * overhead for repeatedly calling member functions (since those will
56
+ * get dispatched, again). Therefore, you should only use it to iterate
57
+ * through the list up to one time. If you need to do more complex things,
58
+ * call `materialize()` first.
59
+ *
60
+ * Adding support for a new Tag
61
+ * ============================
62
+ * Suppose we want to add a new tag: `Chest`. Here are the steps
63
+ * we would have to go through:
64
+ *
65
+ * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`.
66
+ *
67
+ * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
68
+ * ...
69
+ * _(Chest, ##__VA_ARGS__)
70
+ *
71
+ * 2. Add type aliases, union members, and constructors.
72
+ *
73
+ * template <typename T>
74
+ * class IListRef {
75
+ * ...
76
+ * using chest_type =
77
+ * typename detail::IListRefTagImpl<T, IListRefTag::Chest>::list_type;
78
+ * ...
79
+ * IListRef(...) : tag_(IListRefTag::Chest) {
80
+ * ...
81
+ * }
82
+ * ...
83
+ * union Payload {
84
+ * ...
85
+ * chest_type chest;
86
+ * ...
87
+ * };
88
+ * ...
89
+ * };
90
+ *
91
+ * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's
92
+ * preferable to make the default implementation work for `T = Tensor`
93
+ * (both `Unboxed` and `Boxed` do it).
94
+ *
95
+ * template <typename T, typename ListElemT>
96
+ * class IListRefTagImplBase<IListRefTag::Chest, T, ListElemT> {
97
+ * public:
98
+ * using elem_type = ListElemT;
99
+ * using list_type = ChestContainer<elem_type>;
100
+ *
101
+ * static const list_type& unwrap(const IListRef<T>& ilist) { ... }
102
+ *
103
+ * static typename list_type::const_iterator& unwrap(
104
+ * IListRefIterator<T>& it) { ... }
105
+ *
106
+ * static const typename list_type::const_iterator& unwrap(
107
+ * const IListRefIterator<T>& it) { ... }
108
+ *
109
+ * static IListRefConstRef<T> iterator_get(
110
+ * const typename list_type::const_iterator& it) { ... }
111
+ * }
112
+ *
113
+ * 4. Add an specialization for each of the already supported types.
114
+ * Finally, for consistency, add them to the tracking list.
115
+ * (see [Note: IListRefTagImpl Specializations])
116
+ *
117
+ * template <>
118
+ * class IListRefTagImpl<IListRefTag::Chest, at::Tensor>
119
+ * : public IListRefTagImplBase<IListRefTag::Chest, at::Tensor> {};
120
+ *
121
+ * Adding support for a new Type
122
+ * =============================
123
+ * Suppose we want to add support for a new type: `Matrix`.
124
+ * Here are the steps we would have to go through:
125
+ *
126
+ * 1. Add an specialization for each of the existing tags.
127
+ * For consistency, add them to the tracking list.
128
+ * (see [Note: IListRefTagImpl Specializations])
129
+ *
130
+ * template <>
131
+ * class IListRefTagImpl<IListRefTag::Unboxed, Matrix>
132
+ * : public IListRefTagImplBase<IListRefTag::Unboxed, Matrix> {};
133
+ *
134
+ * template <>
135
+ * class IListRefTagImpl<Matrix, IListRefTag::Boxed>
136
+ * : public IListRefTagImplBase<IListRefTag::Boxed, Matrix> {};
137
+ *
138
+ * Common Problems
139
+ * ===============
140
+ * 1. One of `IListRef(Iterator)` methods are failing to compile.
141
+ *
142
+ * That may be happening because the container type you added
143
+ * is not compatible with the code written for that method. If
144
+ * that's true, then you might have to transform that code into
145
+ * a static method call (see `List::operator[]` method).
146
+ *
147
+ * 2. Can't make `IListRefIterator<T>::operator*` return a const-reference.
148
+ *
149
+ * First, keep in mind that we assume that boxed containers will
150
+ * have to deal with `IValue` (e.g. `c10::List`). In this context,
151
+ * what may be happening is that `IValue` doesn't store internally
152
+ * your type `T`. Instead, it constructs a type new `T` everytime
153
+ * you try to get `T` for it (see `IListRef<at::OptinalTensorRef>`).
154
+ */
155
+
156
+ namespace c10 {
157
+ template <typename T>
158
+ class IListRef;
159
+
160
+ /*
161
+ * Applies arbitrary macros to each `IListRefTag`.
162
+ */
163
+ #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
164
+ _(Unboxed, ##__VA_ARGS__) \
165
+ _(Boxed, ##__VA_ARGS__) \
166
+ _(Materialized, ##__VA_ARGS__)
167
+
168
+ /*
169
+ * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`,
170
+ * while bringing to scope:
171
+ *
172
+ * - `ImplT`: the implementation class for `TAG`
173
+ * - `this_`: the result of unwrapping `this`
174
+ */
175
+ #define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \
176
+ case c10::IListRefTag::TAG: { \
177
+ using ImplT = c10::detail::IListRefTagImpl<IListRefTag::TAG, T>; \
178
+ auto& this_ = ImplT::unwrap(*this); \
179
+ BODY \
180
+ } break;
181
+
182
+ /*
183
+ * Dispatches the unwrap call, depending on `TAG`, followed by
184
+ * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`.
185
+ *
186
+ * This macro is useful because it allows us to handle different
187
+ * types (that correspond to different tags) to be implemented
188
+ * only once. We can do it even when the implementation of the
189
+ * different tags aren't syntatically the same, by dispatching
190
+ * it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
191
+ */
192
+ #define TORCH_ILISTREF_UNWRAP(TAG, BODY) \
193
+ switch (TAG) { \
194
+ TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
195
+ break; \
196
+ default: \
197
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \
198
+ }
199
+
200
+ enum class IListRefTag {
201
+ #define DEFINE_TAG(tag, ...) tag,
202
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG)
203
+ #undef DEFINE_TAG
204
+ None
205
+ };
206
+
207
+ namespace detail {
208
+ /*
209
+ * Type alias that specifies whether we return a reference or a copy of `T`.
210
+ *
211
+ * What is this for?
212
+ * =================
213
+ * Since values in the boxed world are represented by an `IValue`, we also
214
+ * depend on whether it can be converted to a const-reference (`Tensor`) or
215
+ * has to create a new copy of `T` (`OptionalTensorRef`).
216
+ */
217
+ template <typename T>
218
+ using IListRefConstRef = typename ivalue_to_const_ref_overload_return<T>::type;
219
+
220
+ /*
221
+ * Interface that implements key functions for each `IListRefTag` type.
222
+ *
223
+ * What is this for?
224
+ * =================
225
+ * Given an `IListRef(Iterator)<T>`, some methods have to be implemented
226
+ * differently for each `TAG`. Therefore, the methods inside this class
227
+ * are used as dispatch targets for the different `IListRefTag` values.
228
+ *
229
+ * You should create an specialization of this class for each possible
230
+ * combination of `IListRefTag` type (except `None`) and element types
231
+ * (e.g. `Tensor`).
232
+ *
233
+ * What does it do?
234
+ * ================
235
+ * 1. defines static methods to be used as dispatch targets by both
236
+ * `IListRef<T>` and `IListRefIterator<T>` (see the implementation of
237
+ * `IListRefTagImplBase`).
238
+ *
239
+ * 2. defines the `elem_type` and `list_type` aliases that will be
240
+ * used in the definition of `IListRef<T>`. In general, we should do
241
+ * so by inheriting from `IListRefTagImplBase<TAG, T, ListElemT>`.
242
+ *
243
+ * [Note: IListRefTagImpl Specialization]
244
+ * ======================================
245
+ * For `IListRef(Iterator)<at::Tensor>`:
246
+ * - <IListRefTag::Unboxed, at::Tensor>
247
+ * - <IListRefTag::Boxed, at::Tensor>
248
+ * - <IListRefTag::Materialized, at::Tensor>
249
+ *
250
+ * For `IListRef(Iterator)<at::OptionalTensorRef>`:
251
+ * - <IListRefTag::Unboxed, at::OptionalTensorRef>
252
+ * - <IListRefTag::Boxed, at::OptionalTensorRef>
253
+ * - <IListRefTag::Materialized, at::OptionalTensorRef>
254
+ */
255
+ template <IListRefTag TAG, typename T>
256
+ class IListRefTagImpl {};
257
+
258
+ /*
259
+ * Base implementation of `IListRefTagImpl<TAG, T>` methods.
260
+ *
261
+ * What is this for?
262
+ * =================
263
+ * This should make adding specializations for new types easier. For
264
+ * example, one should be able to add a new type just by making its
265
+ * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`.
266
+ *
267
+ * You should create a partial specialization for this class only if
268
+ * you introduce a new `IListRefTag`. The idea being that there is one
269
+ * default implementation for each possible value of `IListRefTag`.
270
+ *
271
+ * What does it do?
272
+ * ================
273
+ * 1. defines `elem_type` as an alias to `ListElemT`.
274
+ *
275
+ * 1. defines `list_type` as an alias to the default container type
276
+ * that will hold a collection of `elem_type`. The idea being that
277
+ * all types tagged as `TAG` will have `list_type` as its container,
278
+ * with different `elem_type`.
279
+ *
280
+ * 3. defines the default implementation for each of the methods that
281
+ * are supposed to be defined on `IListRefTagImpl` specializations.
282
+ *
283
+ * 4. inheriting from `IListRefTagImplBase<TAG, T, ListElemT>` also means
284
+ * that the payload of the type `IListRef<T>` will be of type `list_type`
285
+ * when it is tagged as `TAG`.
286
+ */
287
+ template <IListRefTag TAG, typename T, typename ListElemT = T>
288
+ class IListRefTagImplBase {};
289
+
290
+ /*
291
+ * Materialized container for `IListRef<T>`.
292
+ *
293
+ * What is this for?
294
+ * =================
295
+ * Container that groups `T` references together. This exchanges the
296
+ * overhead of every method call from `IListRef<T>` for a dynamic allocation.
297
+ *
298
+ * You should use this container instead of `IListRef<T>` if:
299
+ *
300
+ * - You are going to iterate the list more than once
301
+ * - You need to repeatedly access arbitrary elements (using `operator[]`)
302
+ * What does it do?
303
+
304
+ * ================
305
+ * Removes the reference (&) from the type, and wraps it into a
306
+ * `std::reference_wrapper`. If `IListRefConstRef<T>` is not a
307
+ * reference type, then it's left unchanged.
308
+ */
309
+ template <typename T>
310
+ using _MaterializedIListRefElem = typename std::conditional<
311
+ std::is_reference<T>::value,
312
+ typename std::reference_wrapper<typename std::remove_reference<T>::type>,
313
+ T>::type;
314
+
315
+ template <typename T>
316
+ using MaterializedIListRefElem = _MaterializedIListRefElem<IListRefConstRef<T>>;
317
+
318
+ template <typename T>
319
+ using MaterializedIListRef = std::vector<MaterializedIListRefElem<T>>;
320
+
321
+ } // namespace detail
322
+
323
+ /*
324
+ * Iterator for `IListRef<T>`.
325
+ *
326
+ * What is it?
327
+ * ===========
328
+ * Currently, a `std::bidirectional_iterator` that wraps the iterator
329
+ * types defined for each of the `IListRefTag`.
330
+ *
331
+ * One should be able to use it, as if it were the unwrapped
332
+ * iterators themselves.
333
+
334
+ * What does it do?
335
+ * ================
336
+ * Similarly to `IListRef<T>`, this is a wrapper class. Specifically, it
337
+ * wraps each container's `const_iterator` type alias. So, for example,
338
+ * given that the container for `IListRefTag::Boxed` is `c10::List`, this
339
+ * iterator will wrap a `c10::List::const_iterator`.
340
+ *
341
+ * [Note: MSVC Iterator Debug]
342
+ * ===========================
343
+ * MSVC `vector<T>::iterator` implementation (used in the boxed variant)
344
+ * makes it so this union's destructor, copy-constructor (assignment), and
345
+ * move-constructor (assignment) are implicitly deleted.
346
+ *
347
+ * Therefore, we need to explicitly define them as needed. Follows a list
348
+ * of places where these are needed and their reason:
349
+ *
350
+ * - `Payload` destructor:
351
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2.
352
+ *
353
+ * - `IListRefIterator` destructor:
354
+ * same as above. However, we need to explicitly call the variant
355
+ * destructor explicitly.
356
+ *
357
+ * - `IListRefIterator` copy-constructor:
358
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different
359
+ * than 0.
360
+ */
361
+ template <typename T>
362
+ class IListRefIterator {
363
+ private:
364
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
365
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
366
+ friend class detail::IListRefTagImplBase< \
367
+ IListRefTag::TAG, \
368
+ T, \
369
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
370
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
371
+ #undef DEFINE_FRIEND_CLASS
372
+
373
+ public:
374
+ // C++17 friendly std::iterator implementation
375
+ using iterator_category = std::bidirectional_iterator_tag;
376
+ using value_type = T;
377
+ using difference_type = std::ptrdiff_t;
378
+ using pointer = T*;
379
+ using reference = T&;
380
+
381
+ using unboxed_iterator_type = typename detail::
382
+ IListRefTagImpl<IListRefTag::Unboxed, T>::list_type::const_iterator;
383
+ using boxed_iterator_type = typename detail::
384
+ IListRefTagImpl<IListRefTag::Boxed, T>::list_type::const_iterator;
385
+ using materialized_iterator_type =
386
+ typename detail::MaterializedIListRef<T>::const_iterator;
387
+
388
+ IListRefIterator() : tag_(IListRefTag::None) {}
389
+
390
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0
391
+ // See [Note: MSVC Iterator Debug]
392
+ IListRefIterator(const IListRefIterator& iterator)
393
+ : tag_(iterator.tag_) {
394
+ switch (tag_) {
395
+ case IListRefTag::Boxed:
396
+ payload_.boxed_iterator = iterator.payload_.boxed_iterator;
397
+ break;
398
+ case IListRefTag::Unboxed:
399
+ payload_.unboxed_iterator = iterator.payload_.unboxed_iterator;
400
+ break;
401
+ case IListRefTag::Materialized:
402
+ payload_.materialized_iterator = iterator.payload_.materialized_iterator;
403
+ break;
404
+ default:
405
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
406
+ }
407
+ }
408
+ #endif
409
+
410
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2
411
+ // See [Note: MSVC Iterator Debug]
412
+ ~IListRefIterator() noexcept(false) {
413
+ switch (tag_) {
414
+ case IListRefTag::Boxed:
415
+ payload_.boxed_iterator.~boxed_iterator_type();
416
+ break;
417
+ case IListRefTag::Unboxed:
418
+ payload_.unboxed_iterator.~unboxed_iterator_type();
419
+ break;
420
+ case IListRefTag::Materialized:
421
+ payload_.materialized_iterator.~materialized_iterator_type();
422
+ break;
423
+ default:
424
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
425
+ }
426
+ }
427
+ #endif
428
+
429
+ IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) {
430
+ payload_.boxed_iterator = boxed;
431
+ }
432
+
433
+ IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) {
434
+ payload_.unboxed_iterator = unboxed;
435
+ }
436
+
437
+ IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) {
438
+ payload_.materialized_iterator = materialized;
439
+ }
440
+
441
+ detail::IListRefConstRef<T> operator*() const {
442
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); });
443
+ }
444
+
445
+ IListRefIterator& operator++() {
446
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
447
+ return *this;
448
+ }
449
+
450
+ IListRefIterator operator++(int) {
451
+ auto old = *this;
452
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
453
+ return old;
454
+ }
455
+
456
+ IListRefIterator& operator--() {
457
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
458
+ return *this;
459
+ }
460
+
461
+ IListRefIterator operator--(int) {
462
+ auto old = *this;
463
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
464
+ return old;
465
+ }
466
+
467
+ bool operator==(const IListRefIterator& rhs) const {
468
+ if (tag_ != rhs.tag_) {
469
+ return false;
470
+ }
471
+ TORCH_ILISTREF_UNWRAP(tag_, {
472
+ auto& rhs_it = ImplT::unwrap(rhs);
473
+ return this_ == rhs_it;
474
+ });
475
+ }
476
+
477
+ bool operator!=(const IListRefIterator& rhs) const {
478
+ return !(*this == rhs);
479
+ }
480
+
481
+ private:
482
+ union Payload {
483
+ boxed_iterator_type boxed_iterator;
484
+ unboxed_iterator_type unboxed_iterator;
485
+ materialized_iterator_type materialized_iterator;
486
+ void* _init_ptr;
487
+ Payload() : _init_ptr(nullptr) {}
488
+ #if defined(_MSC_VER)
489
+ // See [Note: MSVC Iterator Debug]
490
+ ~Payload() {}
491
+ #endif
492
+ };
493
+
494
+ Payload payload_;
495
+ IListRefTag tag_;
496
+ };
497
+
498
+ /*
499
+ * See [Note: IListRef]
500
+ */
501
+ template <typename T>
502
+ class IListRef {
503
+ private:
504
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
505
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
506
+ friend class detail::IListRefTagImplBase< \
507
+ IListRefTag::TAG, \
508
+ T, \
509
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
510
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
511
+ #undef DEFINE_FRIEND_CLASS
512
+
513
+ public:
514
+ using unboxed_type =
515
+ typename detail::IListRefTagImpl<IListRefTag::Unboxed, T>::list_type;
516
+ using boxed_type =
517
+ typename detail::IListRefTagImpl<IListRefTag::Boxed, T>::list_type;
518
+ using materialized_type =
519
+ typename detail::MaterializedIListRef<T>;
520
+
521
+ using iterator = IListRefIterator<T>;
522
+ using const_iterator = IListRefIterator<T>;
523
+ using reverse_iterator = std::reverse_iterator<iterator>;
524
+ using value_type = typename iterator::value_type;
525
+
526
+ IListRef() : tag_(IListRefTag::None) {}
527
+
528
+ IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) {
529
+ payload_.boxed = &boxed;
530
+ }
531
+
532
+ IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) {
533
+ payload_.unboxed = unboxed;
534
+ }
535
+
536
+ IListRef(const std::initializer_list<T>& list) : tag_(IListRefTag::Unboxed) {
537
+ payload_.unboxed = at::ArrayRef<T>(list);
538
+ }
539
+
540
+ template <
541
+ typename... UnboxedConstructorArgs,
542
+ typename = std::enable_if_t<
543
+ std::is_constructible<unboxed_type, UnboxedConstructorArgs...>::value>>
544
+ IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) {
545
+ payload_.unboxed = unboxed_type(std::forward<UnboxedConstructorArgs>(args)...);
546
+ }
547
+
548
+ IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) {
549
+ payload_.materialized = &materialized;
550
+ }
551
+
552
+ size_t size() const {
553
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); });
554
+ }
555
+
556
+ bool empty() const {
557
+ return size() == 0;
558
+ }
559
+
560
+ iterator begin() const {
561
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); });
562
+ }
563
+
564
+ iterator end() const {
565
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); });
566
+ }
567
+
568
+ detail::IListRefConstRef<T> front() const {
569
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); });
570
+ }
571
+
572
+ /*
573
+ * Materializes the `IListRef` into a `std::vector`.
574
+ *
575
+ * This should be used when one wishes to either:
576
+ *
577
+ * - iterate over the list more than once: each `IListRefIterator`
578
+ * member function call has to go through a switch, introducing
579
+ * non-negligible overhead
580
+ *
581
+ * - randomly access an arbitrary element using `operator[]`:
582
+ * same reason as above
583
+ */
584
+ detail::MaterializedIListRef<T> materialize() const {
585
+ if (isMaterialized()) {
586
+ return toMaterialized();
587
+ }
588
+
589
+ detail::MaterializedIListRef<T> materialized;
590
+ materialized.reserve(size());
591
+ for (const auto& t : *this) {
592
+ materialized.emplace_back(t);
593
+ }
594
+ return materialized;
595
+ }
596
+
597
+ #define DEFINE_CHECK(TAG, ...) \
598
+ bool is##TAG() const { \
599
+ return tag_ == IListRefTag::TAG; \
600
+ }
601
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK);
602
+ #undef DEFINE_CHECK
603
+
604
+ bool isNone() const {
605
+ return tag_ == IListRefTag::None;
606
+ }
607
+
608
+ #define DEFINE_CASTING(TAG, ...) \
609
+ const typename detail::IListRefTagImpl<IListRefTag::TAG, T>::list_type& \
610
+ to##TAG() const { \
611
+ TORCH_INTERNAL_ASSERT(is##TAG()); \
612
+ return detail::IListRefTagImpl<IListRefTag::TAG, T>::unwrap(*this); \
613
+ }
614
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING);
615
+ #undef DEFINE_CASTING
616
+
617
+ private:
618
+ union Payload {
619
+ const boxed_type* boxed;
620
+ unboxed_type unboxed;
621
+ const materialized_type* materialized;
622
+ Payload() : boxed(nullptr) {}
623
+ };
624
+
625
+ Payload payload_;
626
+ IListRefTag tag_;
627
+ };
628
+
629
+ } // namespace c10
630
+
631
+ #include <ATen/core/IListRef_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ struct CompilationUnit;
12
+ struct Function;
13
+ } // namespace jit
14
+ } // namespace torch
15
+
16
+ namespace c10 {
17
+
18
+ struct FunctionSchema;
19
+
20
+ // This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither.
21
+ // This state is mutually exclusive. Buffers and Parameters can only appear on modules.
22
+ enum class AttributeKind {
23
+ BUFFER,
24
+ PARAMETER,
25
+ REGULAR_ATTRIBUTE
26
+ };
27
+
28
+ // This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr).
29
+ // Note: This structure does not represent the value of the attribute.
30
+ struct TORCH_API ClassAttribute {
31
+ public:
32
+ ClassAttribute(AttributeKind kind,
33
+ TypePtr attributeType,
34
+ std::string attributeName) :
35
+ kind_(kind),
36
+ attributeType_(std::move(attributeType)),
37
+ attributeName_(std::move(attributeName)) {}
38
+
39
+ AttributeKind getKind() const {
40
+ return kind_;
41
+ }
42
+
43
+ const TypePtr& getType() const {
44
+ return attributeType_;
45
+ }
46
+
47
+ const std::string& getName() const {
48
+ return attributeName_;
49
+ }
50
+
51
+ private:
52
+ AttributeKind kind_;
53
+ TypePtr attributeType_;
54
+ std::string attributeName_;
55
+ };
56
+
57
+ /**
58
+ * User Defined Types
59
+ */
60
+
61
+ struct ClassType;
62
+ using ClassTypePtr = std::shared_ptr<ClassType>;
63
+ using ::torch::jit::CompilationUnit;
64
+
65
+ // This represents a class in TorchScript.
66
+ struct TORCH_API ClassType : public NamedType {
67
+ // This represents an attribute of a class; a name associated with an attribute, and a
68
+ // getter and (optional) setter for that attribute.
69
+ struct Property {
70
+ std::string name;
71
+ torch::jit::Function* getter;
72
+ torch::jit::Function* setter;
73
+ };
74
+
75
+ // Create a class type with name `name` and its methods stored in `cu`.
76
+ static ClassTypePtr create(
77
+ c10::optional<QualifiedName> qualifiedName,
78
+ std::weak_ptr<CompilationUnit> cu,
79
+ bool is_module = false,
80
+ std::string doc_string = "",
81
+ std::vector<std::string> unresolved_class_attributes = {});
82
+
83
+ bool equals(const Type& rhs) const override {
84
+ if (this == &rhs) {
85
+ return true;
86
+ }
87
+ if (auto user_rhs = rhs.castRaw<ClassType>()) {
88
+ const auto& lhs_name = name().value();
89
+ const auto& rhs_name = user_rhs->name().value();
90
+
91
+ return lhs_name == rhs_name &&
92
+ this->compilation_unit() == user_rhs->compilation_unit();
93
+ }
94
+ return false;
95
+ }
96
+
97
+ std::string str() const override {
98
+ return annotation_str();
99
+ }
100
+
101
+ std::string repr_str() const override {
102
+ std::stringstream ss;
103
+ ss << str()
104
+ << " (of Python compilation unit at: " << compilation_unit().get() << ")";
105
+ return ss.str();
106
+ }
107
+
108
+ const std::vector<torch::jit::Function*>& methods() const;
109
+
110
+ TypePtr findAttribute(const std::string& name) const {
111
+ size_t pos = 0;
112
+ for (const auto& attr : attributes_) {
113
+ if (name == attr.getName()) {
114
+ break;
115
+ }
116
+ ++pos;
117
+ }
118
+
119
+ if (pos >= attributes_.size()) {
120
+ return nullptr;
121
+ }
122
+ return attributes_[pos].getType();
123
+ }
124
+
125
+ const TypePtr& getAttribute(const std::string& name) const {
126
+ auto slot = findAttributeSlot(name);
127
+ TORCH_CHECK(
128
+ slot,
129
+ repr_str(),
130
+ " does not have an attribute with name '",
131
+ name,
132
+ "'");
133
+ return attributes_[*slot].getType();
134
+ }
135
+
136
+ size_t numAttributes() const {
137
+ return attributes_.size();
138
+ }
139
+
140
+ const TypePtr& getAttribute(size_t slot) const {
141
+ AT_ASSERT(slot < attributes_.size());
142
+ return attributes_.at(slot).getType();
143
+ }
144
+
145
+ const std::string getAttributeName(size_t slot) const {
146
+ AT_ASSERT(slot < attributes_.size());
147
+ return attributes_[slot].getName();
148
+ }
149
+
150
+ void checkNotExist(const std::string& name, const std::string& what) const;
151
+
152
+ // Attributes are stored in a specific slot at runtime for effiency.
153
+ // When emitting instructions we specify the slot so that attribute access is
154
+ // a constant lookup
155
+ c10::optional<size_t> findAttributeSlot(const std::string& name) const {
156
+ size_t slot = 0;
157
+ for (const auto& attr : attributes_) {
158
+ if (name == attr.getName()) {
159
+ return slot;
160
+ }
161
+ slot++;
162
+ }
163
+ return c10::nullopt;
164
+ }
165
+ size_t getAttributeSlot(const std::string& name) const {
166
+ if (auto r = findAttributeSlot(name)) {
167
+ return *r;
168
+ }
169
+ TORCH_CHECK(
170
+ false,
171
+ repr_str(),
172
+ " does not have an attribute with name '",
173
+ name,
174
+ "'");
175
+ }
176
+
177
+ bool hasAttribute(const std::string& name) const {
178
+ return std::find_if(
179
+ attributes_.cbegin(),
180
+ attributes_.cend(),
181
+ [&](const ClassAttribute& attr) { return attr.getName() == name; }) !=
182
+ attributes_.cend();
183
+ }
184
+
185
+ bool isUnresolvedClassAttribute(const std::string& name) const;
186
+
187
+ at::ArrayRef<TypePtr> containedTypes() const override {
188
+ return attributeTypes_;
189
+ }
190
+
191
+ size_t addAttribute(
192
+ const std::string& name,
193
+ TypePtr type,
194
+ bool is_parameter = false,
195
+ bool is_buffer = false);
196
+
197
+ // [Internal Only] Remove attribute from the ClassType,
198
+ // caller is responsible to make sure the modification is safe:
199
+ // it is unsafe to having existing allocations
200
+ // of this object around anymore, and any code that works on
201
+ // the attribute is now invalid. Only newly created code is
202
+ // valid again.
203
+ void unsafeRemoveAttribute(const std::string& name);
204
+
205
+ // [Internal Only] Change the type of an attribute of the ClassType,
206
+ // The caller is responsible to make sure the modification is safe:
207
+ // it is unsafe to maintain uses of the old type of the attribute,
208
+ // and any code that works on the attribute is now invalid.
209
+ // Only newly created code is valid again.
210
+ void unsafeChangeAttributeType(const std::string& name, const TypePtr& new_ty);
211
+
212
+ // Add attribute \p NAME if it doesn't exist or verify that it has a
213
+ // compatible type otherwise.
214
+ size_t addOrCheckAttribute(
215
+ const std::string& name,
216
+ TypePtr ty,
217
+ bool is_parameter = false,
218
+ bool is_buffer = false) {
219
+ auto slot_idx = findAttributeSlot(name);
220
+ if (!slot_idx) {
221
+ return addAttribute(name, std::move(ty), is_parameter, is_buffer);
222
+ }
223
+
224
+ TORCH_CHECK(
225
+ is_parameter == this->is_parameter(*slot_idx),
226
+ "Parameter field mismatch for the field '",
227
+ name,
228
+ "'");
229
+ const TypePtr& atype = getAttribute(*slot_idx);
230
+ TORCH_CHECK(
231
+ ty->isSubtypeOf(*atype),
232
+ ty->repr_str(),
233
+ " is not compatible with the type ",
234
+ atype->repr_str(),
235
+ " for the field '",
236
+ name,
237
+ "'");
238
+ return *slot_idx;
239
+ }
240
+
241
+ // Get the property with the given \p name, if it exists on the class.
242
+ c10::optional<ClassType::Property> getProperty(const std::string& name);
243
+ // Add a property named \p name with \p getter and \p setter as its getter and setter.
244
+ void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter);
245
+ // Get a list of all properties.
246
+ const std::vector<Property>& properties() const {
247
+ return properties_;
248
+ }
249
+
250
+ bool hasConstant(const std::string& name) const {
251
+ return std::find_if(
252
+ constantNames_.cbegin(),
253
+ constantNames_.cend(),
254
+ [&](const std::string& constant) { return constant == name; }) !=
255
+ constantNames_.cend();
256
+ }
257
+
258
+ size_t addConstant(const std::string& name, const IValue& value);
259
+
260
+ c10::optional<size_t> findConstantSlot(const std::string& name) const;
261
+
262
+ size_t getConstantSlot(const std::string& name) const {
263
+ if (auto r = findConstantSlot(name)) {
264
+ return *r;
265
+ }
266
+ TORCH_CHECK(
267
+ false,
268
+ repr_str(),
269
+ " does not have constant field with the name '",
270
+ name,
271
+ "'");
272
+ }
273
+
274
+ const std::string& getConstantName(size_t slot) const;
275
+
276
+ const std::string& doc_string() const {
277
+ return doc_string_;
278
+ }
279
+
280
+ IValue getConstant(const std::string& name) const;
281
+
282
+ IValue getConstant(size_t slot) const;
283
+
284
+ c10::optional<IValue> findConstant(const std::string& name) const;
285
+
286
+ size_t numConstants() const;
287
+
288
+ at::ArrayRef<std::string> constantNames() const {
289
+ return constantNames_;
290
+ }
291
+
292
+ at::ArrayRef<IValue> constantValues() const;
293
+
294
+ // [Internal Only] Remove constant from the ClassType
295
+ // caller is responsible to make sure the modification is safe:
296
+ // it is unsafe to having existing allocations
297
+ // of this object around anymore, and any code that works on
298
+ // the attribute is now invalid. Only newly created code is
299
+ // valid again.
300
+ void unsafeRemoveConstant(const std::string& name);
301
+
302
+ TypePtr createWithContained(std::vector<TypePtr> contained_types) const override {
303
+ auto ptr = ClassType::create(name(), compilation_unit_, is_module());
304
+ AT_ASSERT(numAttributes() == contained_types.size());
305
+ for(size_t i = 0; i < attributes_.size(); ++i) {
306
+ AT_ASSERT(attributes_[i].getType()->isSubtypeOf(*contained_types[i]));
307
+ ptr->addAttribute(attributes_[i].getName(), std::move(contained_types[i]));
308
+ }
309
+ // Copy methods over
310
+ for (const auto& method : methods()) {
311
+ ptr->addMethod(method);
312
+ }
313
+ return ptr;
314
+ }
315
+
316
+ bool is_module() const override {
317
+ return isModule_;
318
+ }
319
+
320
+ const std::vector<ClassAttribute>& getAttributes() const {
321
+ return attributes_;
322
+ }
323
+
324
+ bool is_parameter(size_t slot) const {
325
+ TORCH_INTERNAL_ASSERT(
326
+ is_module(), "asking for parameterSlots of non-Module");
327
+ return attributes_.at(slot).getKind() == AttributeKind::PARAMETER;
328
+ }
329
+
330
+ bool is_buffer(size_t slot) const {
331
+ TORCH_INTERNAL_ASSERT(
332
+ is_module(), "asking for bufferWrittenSlots of non-Module");
333
+ return attributes_.at(slot).getKind() == AttributeKind::BUFFER;
334
+ }
335
+
336
+ void addForwardPreHook(torch::jit::Function* pre_hook_ptr);
337
+ void addForwardHook(torch::jit::Function* hook_ptr);
338
+ torch::jit::Function* findForwardPreHook(const std::string& name) const;
339
+ torch::jit::Function* findForwardHook(const std::string& name) const;
340
+ const std::vector<torch::jit::Function*>& getForwardHooks() const;
341
+ const std::vector<torch::jit::Function*>& getForwardPreHooks() const;
342
+
343
+ void checkForwardPreHookSchema(
344
+ int pre_hook_idx,
345
+ const FunctionSchema& pre_hook_schema) const;
346
+ void checkForwardHookSchema(
347
+ int hook_idx,
348
+ const FunctionSchema& hook_schema) const;
349
+
350
+ void addMethod(torch::jit::Function* method);
351
+ torch::jit::Function* findMethod(const std::string& name) const;
352
+ torch::jit::Function& getMethod(const std::string& name) const;
353
+ torch::jit::Function* findHook(const std::string& name) const;
354
+ torch::jit::Function& getHook(const std::string& name) const;
355
+ bool hasMethod(const std::string& name) const;
356
+
357
+ torch::jit::Function* findStaticMethod(const std::string& name) const;
358
+ void addStaticMethod(torch::jit::Function* method);
359
+
360
+ // [Internal Only] Remove method from the ClassType
361
+ // caller is responsible to make sure the modification is safe:
362
+ // it is unsafe to having existing allocations
363
+ // of this object around anymore, and any code that works on
364
+ // the attribute is now invalid. Only newly created code is
365
+ // valid again.
366
+ // Note this method is intended for freezing only.
367
+ void unsafeRemoveMethod(const std::string& name);
368
+
369
+ std::shared_ptr<CompilationUnit> compilation_unit();
370
+
371
+ std::shared_ptr<const CompilationUnit> compilation_unit() const;
372
+
373
+ // generate a refined version of this class.
374
+ // It has the same name but the slot Types are subtypes of
375
+ // the original slots. It is only valid to refine a class type in a context
376
+ // where it is know that there are not assignments to the objects slots
377
+ // that would invalidate the refinement.
378
+ // These variants are not registered in the global class table.
379
+ ClassTypePtr refine(at::ArrayRef<TypePtr> refined_slots) const;
380
+
381
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
382
+
383
+ static const TypeKind Kind = TypeKind::ClassType;
384
+
385
+ private:
386
+ ClassType(
387
+ c10::optional<QualifiedName> name,
388
+ std::weak_ptr<CompilationUnit> cu,
389
+ bool is_module = false,
390
+ std::string doc_string = "",
391
+ std::vector<std::string> unresolved_class_attributes = {});
392
+
393
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
394
+ const auto& n = name().value();
395
+ return n.qualifiedName();
396
+ }
397
+
398
+ void addAttribute(ClassAttribute classAttribute);
399
+ std::string getForwardPreHookErrorMessage(int pre_hook_idx) const;
400
+ std::string getForwardHookErrorMessage(int hook_idx) const;
401
+
402
+ // Mapping of attribute names -> their type.
403
+ // NOTE: this does not contain methods, which are stored in the module
404
+ // TODO: once modules support arbitrary ivalue attributes, we don't need this
405
+ // anymore.
406
+ // TODO: This is better represented as an OrderedDict, but alas it is not yet
407
+ // available from c10
408
+
409
+ // Mapping of constant names -> their value.
410
+ std::vector<std::string> constantNames_;
411
+ std::vector<IValue> constantValues_;
412
+ // Holds method attributes
413
+ std::weak_ptr<CompilationUnit> compilation_unit_;
414
+
415
+ // Holds all atrributes, attribute details are found on ClassAttribute
416
+ std::vector<ClassAttribute> attributes_;
417
+ // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef.
418
+ // Never fill this without using the appropriate provideNewClassAttribute method
419
+ std::vector<TypePtr> attributeTypes_;
420
+
421
+ // List of methods associated with this class.
422
+ std::vector<torch::jit::Function*> methods_;
423
+ std::vector<torch::jit::Function*> staticmethods_;
424
+
425
+ // List of hooks to be run before/after forward.
426
+ std::vector<torch::jit::Function*> forward_hooks_;
427
+ std::vector<torch::jit::Function*> forward_pre_hooks_;
428
+
429
+ // List of properties exposed by this class.
430
+ std::vector<Property> properties_;
431
+
432
+ bool isModule_ = false;
433
+
434
+ // Doc string of class.
435
+ std::string doc_string_ = "";
436
+
437
+ // For error reporting accesses to class level attributes.
438
+ std::vector<std::string> unresolved_class_attributes_;
439
+ };
440
+
441
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h ADDED
@@ -0,0 +1,687 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/StringUtil.h>
4
+ #include <c10/util/string_view.h>
5
+ #include <c10/util/irange.h>
6
+ #include <ATen/core/jit_type.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <ATen/core/alias_info.h>
10
+ #include <ATen/core/operator_name.h>
11
+ #include <ATen/core/dispatch/OperatorOptions.h>
12
+ #include <unordered_map>
13
+ #include <utility>
14
+
15
+ namespace c10 {
16
+
17
+ // schema as used in the compiler for resolving function calls and reporting
18
+ // errors. These objects should be constructed from C10 schema once those
19
+ // are available.
20
+
21
+ struct Argument;
22
+ struct FunctionSchema;
23
+
24
+ using AliasTypeSet = std::vector<TypePtr>;
25
+
26
+ bool operator==(const Argument& lhs, const Argument& rhs);
27
+
28
+ struct Argument {
29
+ Argument(
30
+ std::string name = "",
31
+ const TypePtr& type = nullptr,
32
+ c10::optional<int32_t> N = c10::nullopt,
33
+ c10::optional<IValue> default_value = c10::nullopt,
34
+ bool kwarg_only = false,
35
+ c10::optional<AliasInfo> alias_info = c10::nullopt)
36
+ : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
37
+
38
+ Argument(
39
+ std::string name,
40
+ TypePtr fake_type,
41
+ TypePtr real_type,
42
+ c10::optional<int32_t> N = c10::nullopt,
43
+ c10::optional<IValue> default_value = c10::nullopt,
44
+ bool kwarg_only = false,
45
+ c10::optional<AliasInfo> alias_info = c10::nullopt)
46
+ : name_(std::move(name)),
47
+ type_(fake_type ? std::move(fake_type) : TensorType::get()),
48
+ real_type_(real_type ? std::move(real_type) : type_),
49
+ N_(N),
50
+ default_value_(std::move(default_value)),
51
+ alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr),
52
+ kwarg_only_(kwarg_only) {
53
+ // this is an softly-enforced invariant for out arguments.
54
+ bool is_alias = alias_info_ != nullptr && alias_info_->isWrite();
55
+ is_out_ = kwarg_only_ && is_alias;
56
+ }
57
+
58
+ Argument(Argument&& rhs) noexcept = default;
59
+
60
+ Argument(const Argument& rhs)
61
+ : name_(rhs.name_),
62
+ type_(rhs.type_),
63
+ real_type_(rhs.real_type_),
64
+ N_(rhs.N_),
65
+ default_value_(rhs.default_value_),
66
+ alias_info_(rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr),
67
+ kwarg_only_(rhs.kwarg_only_),
68
+ is_out_(rhs.is_out_) {}
69
+
70
+ Argument& operator=(Argument&& rhs) = default;
71
+
72
+ Argument& operator=(const Argument& rhs) {
73
+ if (this != &rhs) {
74
+ name_ = rhs.name_;
75
+ type_ = rhs.type_;
76
+ real_type_ = rhs.real_type_;
77
+ N_ = rhs.N_;
78
+ default_value_ = rhs.default_value_;
79
+ alias_info_ = rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr;
80
+ kwarg_only_ = rhs.kwarg_only_;
81
+ is_out_ = rhs.is_out_;
82
+ }
83
+ return *this;
84
+ }
85
+
86
+ const std::string& name() const {
87
+ return name_;
88
+ }
89
+ const TypePtr& type() const {
90
+ return type_;
91
+ }
92
+ // if type() is non-null, this is guaranteed to be non-null (if no real
93
+ // type was provided, this takes on type()'s value)
94
+ const TypePtr& real_type() const {
95
+ return real_type_;
96
+ }
97
+ c10::optional<int32_t> N() const {
98
+ return N_;
99
+ }
100
+ const c10::optional<IValue>& default_value() const {
101
+ return default_value_;
102
+ }
103
+ bool kwarg_only() const {
104
+ return kwarg_only_;
105
+ }
106
+
107
+ bool is_out() const {
108
+ return is_out_;
109
+ }
110
+
111
+ C10_NODISCARD const AliasInfo* alias_info() const {
112
+ return alias_info_.get();
113
+ }
114
+
115
+ bool is_inferred_type() const {
116
+ bool is_inferred_type = false;
117
+ TORCH_INTERNAL_ASSERT(type_);
118
+ if (auto pt = type_->cast<TensorType>()) {
119
+ if (pt->isInferredType()) {
120
+ is_inferred_type = true;
121
+ }
122
+ }
123
+ return is_inferred_type;
124
+ }
125
+
126
+ std::string formatTypeMismatchMsg(const std::string& actual_type) const {
127
+ std::string inferred_type_hint;
128
+ if (is_inferred_type()) {
129
+ inferred_type_hint = c10::str(
130
+ "Inferred '",
131
+ name(),
132
+ "' to be of type 'Tensor' ",
133
+ "because it was not annotated with an explicit type.\n");
134
+ }
135
+ return c10::str(
136
+ "Expected a value of type '",
137
+ type()->repr_str(),
138
+ "' for argument '",
139
+ name(),
140
+ "' but instead found type '",
141
+ actual_type,
142
+ "'.\n",
143
+ inferred_type_hint);
144
+ }
145
+
146
+ Argument cloneWithType(TypePtr new_type) const {
147
+ return Argument(
148
+ name_,
149
+ std::move(new_type),
150
+ N_,
151
+ default_value_,
152
+ kwarg_only_,
153
+ alias_info_ ? c10::optional<AliasInfo>(*alias_info_) : c10::nullopt);
154
+ }
155
+
156
+ // this function checks whether this Argument is backward compatible with
157
+ // the old one. we consider the following cases are backward compatible:
158
+ // 1) two arguments are equal
159
+ // 2) this arg's type should be subtype of old
160
+ // 3) this arg must provide the same default value if old arg has one,
161
+ bool isBackwardCompatibleWith(
162
+ const Argument& old,
163
+ std::ostream* why_not=nullptr) const;
164
+
165
+ // this function checks whether this Argument is forward compatible with
166
+ // the old one. we consider the following cases are forward compatible:
167
+ // 1) two arguments are equal
168
+ // 2) this arg's type should be subtype of old
169
+ // 3) this arg must provide the same default value if old arg has one,
170
+ bool isForwardCompatibleWith(
171
+ const Argument& old,
172
+ std::ostream* why_not = nullptr) const;
173
+
174
+ private:
175
+ std::string name_;
176
+ TypePtr type_;
177
+ TypePtr real_type_; // this is ScalarType, not int, e.g.
178
+ // for list types, an optional statically known length for the list
179
+ // e.g. for int[3]: type = ListType::ofInts(), N = 3
180
+ // If present, this will allow scalars to be broadcast to this length to
181
+ // become a list.
182
+ c10::optional<int32_t> N_;
183
+
184
+ c10::optional<IValue> default_value_;
185
+ // AliasInfo is huge, so let's only allocate memory for it if
186
+ // necessary (which it isn't during schema parsing on startup, to
187
+ // give a pertinent example).
188
+ std::unique_ptr<AliasInfo> alias_info_;
189
+ // is this only specifiable as a keyword argument?
190
+ bool kwarg_only_;
191
+ // marks if the argument is out variant of the schema
192
+ bool is_out_;
193
+ };
194
+
195
+ inline bool operator==(const Argument& lhs, const Argument& rhs) {
196
+ return lhs.name() == rhs.name()
197
+ && *lhs.type() == *rhs.type()
198
+ && lhs.N() == rhs.N()
199
+ && lhs.default_value() == rhs.default_value()
200
+ && lhs.kwarg_only() == rhs.kwarg_only()
201
+ && (lhs.alias_info() == rhs.alias_info()
202
+ || (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr
203
+ && *lhs.alias_info() == *rhs.alias_info()));
204
+ }
205
+
206
+ inline bool operator!=(const Argument& lhs, const Argument& rhs) {
207
+ return !(lhs == rhs);
208
+ }
209
+
210
+ enum struct TORCH_API SchemaArgType { input, output };
211
+
212
+ /**
213
+ * struct SchemaArgument
214
+ *
215
+ * Structure used to represent arguments or returns for a schema.
216
+ */
217
+ struct TORCH_API SchemaArgument {
218
+ SchemaArgType type;
219
+ size_t index;
220
+ SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {}
221
+ bool operator==(const SchemaArgument& rhs) const {
222
+ return type == rhs.type && index == rhs.index;
223
+ }
224
+ };
225
+
226
+ bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs);
227
+
228
+ struct TORCH_API FunctionSchema {
229
+ FunctionSchema(
230
+ std::string name,
231
+ std::string overload_name,
232
+ std::vector<Argument> arguments,
233
+ std::vector<Argument> returns,
234
+ bool is_vararg = false,
235
+ bool is_varret = false)
236
+ : name_({std::move(name), std::move(overload_name)}),
237
+ arguments_(std::move(arguments)),
238
+ returns_(std::move(returns)),
239
+ is_vararg_(is_vararg),
240
+ is_varret_(is_varret) {
241
+ checkSchema();
242
+ }
243
+
244
+ FunctionSchema(
245
+ Symbol name,
246
+ std::string overload_name,
247
+ std::vector<Argument> arguments,
248
+ std::vector<Argument> returns,
249
+ bool is_vararg = false,
250
+ bool is_varret = false)
251
+ : FunctionSchema(
252
+ name.toQualString(),
253
+ std::move(overload_name),
254
+ std::move(arguments),
255
+ std::move(returns),
256
+ is_vararg,
257
+ is_varret) {
258
+ checkSchema();
259
+ }
260
+
261
+ // Checks whether this schema is backward compatible with the old one.
262
+ // The following conditions must be true:
263
+ // [Function structure] The new schema's name, overload-name, varargs, and
264
+ // return arity are the same.
265
+ // [Output Narrowing] The new schema's output type must be the same class
266
+ // or inherit from the old schema's output type.
267
+ // [Argument count] The new schema must have at least as many arguments as
268
+ // the old schema (considering the list of positional and kwargs).
269
+ // [Arg Compatibility] Every argument in the old schema has a corresponding
270
+ // argument in the new schema that:
271
+ // * is at the same position.
272
+ // * has the same name.
273
+ // * is either positional, or kwarg and the old argument was kwarg.
274
+ // * has the same type, or the old argument's type inherits from the
275
+ // new argument's type.
276
+ // [Default Values] Every new argument must have a default value.
277
+ // E.g.
278
+ // OK f_new(a, b, c=1) => f_old(a, b)
279
+ // NOK f_new(a, c=1, *, b) => f_old(a, *, b)
280
+ // OK f_new(a, b, *, c) => f_old(a, *, b, c)
281
+ // NOK f_new(a, *, b, c) -> f_old(a, b, *, c)
282
+ // NOK f_new(a, *, c, b) => f_old(a, *, b, c)
283
+ // OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c)
284
+ bool isBackwardCompatibleWith(
285
+ const FunctionSchema& old,
286
+ std::ostream* why_not = nullptr) const;
287
+
288
+ // Checks whether this schema is forward compatible with the old one.
289
+ // The following conditions must be true:
290
+ // [Function structure] The new schema's name, overload-name, varargs, and
291
+ // return arity are the same.
292
+ // [Output Narrowing] The new schema's output type must be the same class
293
+ // or inherit from the old schema's output type.
294
+ // [Arg Compatibility] Every argument in the old schema has a corresponding
295
+ // argument in the new schema that:
296
+ // * is at the same position.
297
+ // * has the same name.
298
+ // * is either positional, or kwarg and the old argument was kwarg.
299
+ // * has the same type, or the old argument's type inherits from the
300
+ // new argument's type.
301
+ // [Default Values] Every new argument must have a default value.
302
+ // Each default value type should NOT be a container type.
303
+ // [Positioning] All defaults arguments MUST go after either old
304
+ // default arguments or the end of positional arguments
305
+ // and right BEFORE all out arguments
306
+ bool isForwardCompatibleWith(
307
+ const FunctionSchema& old,
308
+ std::ostringstream& why_not) const;
309
+
310
+ private:
311
+ OperatorName name_;
312
+ std::vector<Argument> arguments_;
313
+ std::vector<Argument> returns_;
314
+ // if true then this schema takes an arbitrary number of additional arguments
315
+ // after the argument specified in arguments
316
+ // currently this is used primarily to represent 'primitive' operators whose
317
+ // arguments are not checked by schema
318
+ bool is_vararg_;
319
+ bool is_varret_;
320
+
321
+ // if no alias information is directly specified, what kind of "default"
322
+ // alias information should we infer?
323
+ // NB: due to alias analysis kind merging, this may be nullopt. Eventually
324
+ // this should always be set no matter what
325
+ c10::optional<AliasAnalysisKind> alias_kind_;
326
+
327
+ template <typename T>
328
+ void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const;
329
+
330
+ void checkSchema() const {
331
+ bool seen_default_arg = false;
332
+ for (const auto& arg : arguments()) {
333
+ if (arg.default_value()) {
334
+ seen_default_arg = true;
335
+ } else {
336
+ // we have historically serialized broadcasting lists wo/default values,
337
+ // so to not break BC allow lists here
338
+ if (arg.type()->kind() == ListType::Kind) {
339
+ continue;
340
+ }
341
+ TORCH_INTERNAL_ASSERT(
342
+ !seen_default_arg || arg.kwarg_only(),
343
+ "Non-default positional argument follows default argument. Parameter ",
344
+ arg.name(),
345
+ " in ",
346
+ *this);
347
+ }
348
+ }
349
+ }
350
+
351
+ public:
352
+
353
+ void dump() const;
354
+
355
+ const OperatorName& operator_name() const {
356
+ return name_;
357
+ }
358
+ const std::string& name() const {
359
+ return name_.name;
360
+ }
361
+ const std::string& overload_name() const {
362
+ return name_.overload_name;
363
+ }
364
+ const std::vector<Argument>& arguments() const {
365
+ return arguments_;
366
+ }
367
+ const std::vector<Argument>& returns() const {
368
+ return returns_;
369
+ }
370
+ bool is_vararg() const {
371
+ return is_vararg_;
372
+ }
373
+ bool is_varret() const {
374
+ return is_varret_;
375
+ }
376
+ bool is_aliasing(const c10::SchemaArgument &argument) const {
377
+ TORCH_INTERNAL_ASSERT(
378
+ argument.index < getCorrectList(argument.type).size(),
379
+ "Invalid index for schema.");
380
+ const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
381
+ return aliasInfo;
382
+ }
383
+ bool is_mutable() const {
384
+ return std::any_of(
385
+ arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) {
386
+ const AliasInfo* aliasInfo = arg.alias_info();
387
+ return aliasInfo && aliasInfo->isWrite();
388
+ });
389
+ }
390
+ bool is_mutable(const c10::SchemaArgument &argument) const {
391
+ TORCH_INTERNAL_ASSERT(
392
+ argument.index < getCorrectList(argument.type).size(),
393
+ "Invalid index for schema.");
394
+ const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
395
+ return aliasInfo && aliasInfo->isWrite();
396
+ }
397
+ bool is_mutable(c10::string_view name) const {
398
+ c10::optional<int> index = argumentIndexWithName(name);
399
+ TORCH_INTERNAL_ASSERT(
400
+ index != c10::nullopt, "Schema has no argument named ", name);
401
+
402
+ return is_mutable({c10::SchemaArgType::input, static_cast<size_t>(*index)});
403
+ }
404
+
405
+ // Returns whether lhs and rhs may alias directly.
406
+ // This does not account for cases where lhs or rhs are a container that
407
+ // may contain elements that alias the other argument.
408
+ // FunctionSchema::may_contain_alias will include that functionality.
409
+ bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const;
410
+
411
+ // Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container
412
+ // that may contain elements that alias the other argument.
413
+ // bidirectional = false only returns whether lhs may contain an alias of rhs
414
+ // while bidirectional = true returns both directions.
415
+ bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const;
416
+
417
+ // Returns whether the two AliasTypeSets contain any similarities
418
+ // ie: whether the two type sets can alias.
419
+ bool canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
420
+
421
+ // Recursively Finds all contained types within the AliasTypeSet.
422
+ c10::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
423
+
424
+ // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
425
+ // Used to map types to a type such that all types that can alias will be mapped to the same type.
426
+ // For example, calling this method on 'Optional[List[int]]' is the same as calling this method
427
+ // on 'List[int]'.
428
+ c10::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
429
+
430
+ // Returns either arguments() or returns() depending on the SchemaArgType
431
+ // output => returns(), input => arguments()
432
+ const std::vector<Argument>& getCorrectList(SchemaArgType type) const;
433
+
434
+ c10::optional<int> argumentIndexWithName(c10::string_view name) const {
435
+ for (const auto i : c10::irange(arguments().size())) {
436
+ if(name == arguments()[i].name())
437
+ return i;
438
+ }
439
+ return c10::nullopt;
440
+ }
441
+ FunctionSchema cloneWithName(std::string name, std::string overload_name) const {
442
+ return FunctionSchema(
443
+ std::move(name),
444
+ std::move(overload_name),
445
+ arguments(),
446
+ returns(),
447
+ is_vararg(),
448
+ is_varret()
449
+ );
450
+ }
451
+ FunctionSchema cloneWithArguments(std::vector<Argument> new_arguments) const {
452
+ return FunctionSchema(
453
+ name(),
454
+ overload_name(),
455
+ std::move(new_arguments),
456
+ returns(),
457
+ is_vararg(),
458
+ is_varret());
459
+ }
460
+ FunctionSchema cloneWithReturns(std::vector<Argument> new_returns) const {
461
+ return FunctionSchema(
462
+ name(),
463
+ overload_name(),
464
+ arguments(),
465
+ std::move(new_returns),
466
+ is_vararg(),
467
+ is_varret());
468
+ }
469
+
470
+ std::string formatTypeMismatchMsg(
471
+ const Argument& expected,
472
+ const std::string& actual_type,
473
+ c10::optional<size_t> position = c10::nullopt,
474
+ c10::optional<std::string> value = c10::nullopt) const;
475
+
476
+ FunctionSchema cloneWithRemappedTypes(
477
+ const std::function<TypePtr(TypePtr)> type_map) const;
478
+
479
+ FunctionSchema cloneWithRealTypes(bool with_symint=true) const;
480
+
481
+ // Check that inputs have the correct types and appends any missing default
482
+ // values.
483
+ template <typename T = c10::PlatformType>
484
+ void checkAndNormalizeInputs(
485
+ std::vector<IValue>& inputs,
486
+ const std::unordered_map<std::string, IValue>& kwargs =
487
+ std::unordered_map<std::string, IValue>{}) const;
488
+
489
+ std::string findErrorInKwargs(const std::vector<std::string>& kwargs) const;
490
+
491
+ bool hasAnyAliasInfo() const {
492
+ for (const auto& arg : arguments_) {
493
+ if (arg.alias_info() != nullptr) {
494
+ return true;
495
+ }
496
+ }
497
+ for (const auto& ret : returns_) {
498
+ if (ret.alias_info() != nullptr) {
499
+ return true;
500
+ }
501
+ }
502
+ return false;
503
+ }
504
+
505
+
506
+ // TODO remove the mutation here
507
+ bool isDefaultAliasAnalysisKind() const {
508
+ return !alias_kind_;
509
+ }
510
+ AliasAnalysisKind aliasAnalysis() const {
511
+ return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE);
512
+ }
513
+ void setAliasAnalysis(AliasAnalysisKind v) {
514
+ alias_kind_ = v;
515
+ }
516
+
517
+ c10::optional<c10::string_view> getNamespace() const {
518
+ return name_.getNamespace();
519
+ }
520
+
521
+ // Returns true if we successfully set the namespace (as there
522
+ // was none set, and false otherwise)
523
+ bool setNamespaceIfNotSet(const char* ns) {
524
+ return name_.setNamespaceIfNotSet(ns);
525
+ }
526
+
527
+ // can a function with this schema be substituted for a function of rhs's
528
+ // schema and have the program typecheck?
529
+ // as_method - if true, treat this schema as a method and ignore
530
+ // the first argument, which will be the object in both cases
531
+ bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const;
532
+ };
533
+
534
+ inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) {
535
+ return lhs.name() == rhs.name()
536
+ && lhs.overload_name() == rhs.overload_name()
537
+ && lhs.arguments() == rhs.arguments()
538
+ && lhs.returns() == rhs.returns()
539
+ && lhs.is_vararg() == rhs.is_vararg()
540
+ && lhs.is_varret() == rhs.is_varret();
541
+ }
542
+
543
+ inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) {
544
+ return !(lhs == rhs);
545
+ }
546
+
547
+ // print out Argument, which is compatible with FunctionSchema parser
548
+ // full format: Type(alias)? name=default_value
549
+ inline std::ostream& operator<<(std::ostream& out, const Argument& arg) {
550
+
551
+ // for adjusting the ? position.
552
+ // in schema, we have Tensor?(a!) input, and t(a!)?.
553
+ // however, t?(a!) doesn't work with schema parser.
554
+ // so we always use Type(alias)? format
555
+ // real_type versus fake_type: in order to be compatible with FunctionSchema
556
+ // parser, printing an argument with either MemoryFormat or Layout type should
557
+ // give us the original schema string, hence printing out real_type.
558
+ auto type = arg.real_type();
559
+ bool is_opt = type->kind() == OptionalType::Kind;
560
+ auto unopt_type = is_opt ? type->castRaw<OptionalType>()->getElementType() : type;
561
+
562
+ if (unopt_type->kind() == ListType::Kind) {
563
+ // sized lists get size N from arg, not type
564
+ auto list = unopt_type->cast<c10::ListType>();
565
+ out << list->getElementType()->str();
566
+ if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){
567
+ out << arg.alias_info()->containedTypes()[0];
568
+ }
569
+ std::string N = "";
570
+ if (arg.N()) {
571
+ N = std::to_string(*arg.N());
572
+ }
573
+ out << "[" << N << "]";
574
+ } else {
575
+ out << unopt_type->str();
576
+ }
577
+
578
+ // print alias info if it has beforeSets.
579
+ if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) {
580
+ out << *arg.alias_info();
581
+ }
582
+
583
+ if (is_opt) {
584
+ out << "?";
585
+ }
586
+
587
+ if (!arg.name().empty()) {
588
+ out << " " << arg.name();
589
+ }
590
+
591
+ if (arg.default_value()) {
592
+ out << "=";
593
+ if ((type->kind() == c10::TypeKind::StringType ||
594
+ unopt_type->kind() == c10::TypeKind::StringType) &&
595
+ arg.default_value().value().isString()) {
596
+ printQuotedString(out, arg.default_value().value().toStringRef());
597
+ } else if (type->kind() == TypeKind::ListType && type->castRaw<ListType>()->getElementType()->kind() == c10::TypeKind::IntType) {
598
+ // We want to faithfully replicate JIT schema.
599
+ // in native_functions.yaml defaults for int arrays with a single value always look like
600
+ // int[2] stride=1
601
+ // instead of
602
+ // int[2] stride=[1, 1]
603
+ auto default_val = arg.default_value().value().toIntList();
604
+ if (default_val.size() > 1) {
605
+ auto all_defaults_the_same = true;
606
+ for (const auto i : c10::irange(1, default_val.size())) {
607
+ if (default_val[0] != default_val[i]) all_defaults_the_same = false;
608
+ }
609
+ if (all_defaults_the_same) {
610
+ out << default_val[0];
611
+ } else {
612
+ out << arg.default_value().value();
613
+ }
614
+ } else {
615
+ out << arg.default_value().value();
616
+ }
617
+ } else {
618
+ out << arg.default_value().value();
619
+ }
620
+ }
621
+
622
+ return out;
623
+ }
624
+
625
+ inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema);
626
+
627
+ inline std::string toString(const FunctionSchema& schema) {
628
+ std::ostringstream str;
629
+ str << schema;
630
+ return str.str();
631
+ }
632
+
633
+ } // namespace c10
634
+
635
+ namespace std {
636
+ template<>
637
+ struct hash<c10::SchemaArgument> {
638
+ size_t operator()(const c10::SchemaArgument& arg) const
639
+ {
640
+ return c10::hash_combine(std::hash<size_t>()(arg.index), std::hash<size_t>()(static_cast<std::size_t>(arg.type)));
641
+ }
642
+ };
643
+ template<>
644
+ struct hash<c10::Argument> {
645
+ size_t operator()(const c10::Argument& arg) const
646
+ {
647
+ auto hash = std::hash<std::string>{}(arg.name());
648
+ auto type_hash = std::hash<c10::TypePtr>{}(arg.type());
649
+ auto kwarg_only_hash = std::hash<bool>{}(arg.kwarg_only());
650
+ hash = c10::hash_combine(hash, type_hash);
651
+ hash = c10::hash_combine(hash, kwarg_only_hash);
652
+ // hashing optional fields if they exist
653
+ if (arg.default_value()) {
654
+ auto default_value_hash = c10::hash<c10::IValue>{}(arg.default_value().value());
655
+ hash = c10::hash_combine(hash, default_value_hash);
656
+ }
657
+ if (arg.N()) {
658
+ auto N_hash = std::hash<int64_t>{}(*arg.N());
659
+ hash = c10::hash_combine(hash, N_hash);
660
+ }
661
+ if (arg.alias_info()) {
662
+ auto alias_info_hash = std::hash<c10::AliasInfo>{}(*arg.alias_info());
663
+ hash = c10::hash_combine(hash, alias_info_hash);
664
+ }
665
+ return hash;
666
+ }
667
+ };
668
+ template<>
669
+ struct hash<c10::FunctionSchema> {
670
+ size_t operator()(const c10::FunctionSchema& schema) const
671
+ {
672
+ auto hash = std::hash<c10::OperatorName>{}(schema.operator_name());
673
+ auto args_hash = c10::hash<std::vector<c10::Argument>>{}(schema.arguments());
674
+ auto returns_hash = c10::hash<std::vector<c10::Argument>>{}(schema.returns());
675
+ auto is_vararg_hash = std::hash<bool>{}(schema.is_vararg());
676
+ auto is_varret_hash = std::hash<bool>{}(schema.is_varret());
677
+ hash = c10::hash_combine(hash, args_hash);
678
+ hash = c10::hash_combine(hash, returns_hash);
679
+ hash = c10::hash_combine(hash, is_vararg_hash);
680
+ hash = c10::hash_combine(hash, is_varret_hash);
681
+ return hash;
682
+ }
683
+ };
684
+ } // namespace std
685
+
686
+
687
+ #include <ATen/core/function_schema_inl.h> // IWYU pragma: keep
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _adaptive_avg_pool2d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out);
20
+ TORCH_API at::Tensor adaptive_avg_pool2d_cpu(const at::Tensor & self, at::IntArrayRef output_size);
21
+ TORCH_API at::Tensor adaptive_avg_pool2d_cuda(const at::Tensor & self, at::IntArrayRef output_size);
22
+ TORCH_API at::Tensor adaptive_avg_pool2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size);
23
+ TORCH_API at::Tensor adaptive_avg_pool2d_quantized_cuda(const at::Tensor & self, at::IntArrayRef output_size);
24
+ } // namespace native
25
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
27
+ return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
28
+ }
29
+
30
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _conj(const at::Tensor & self);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _dirichlet_grad_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total);
21
+ TORCH_API at::Tensor & _dirichlet_grad_outf(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _embedding_bag_dense_backward {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional<at::Tensor> &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx);
26
+ };
27
+
28
+ struct TORCH_API _embedding_bag_dense_backward_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_foreach_add_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, const at::Scalar & scalar) {
27
+ return at::_ops::_foreach_add_Scalar::call(self, scalar);
28
+ }
29
+
30
+ // aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
31
+ inline void _foreach_add_(at::TensorList self, const at::Scalar & scalar) {
32
+ return at::_ops::_foreach_add__Scalar::call(self, scalar);
33
+ }
34
+
35
+ // aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
36
+ inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
37
+ return at::_ops::_foreach_add_List::call(self, other, alpha);
38
+ }
39
+
40
+ // aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
41
+ inline void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
42
+ return at::_ops::_foreach_add__List::call(self, other, alpha);
43
+ }
44
+
45
+ // aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
46
+ inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
47
+ return at::_ops::_foreach_add_ScalarList::call(self, scalars);
48
+ }
49
+
50
+ // aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
51
+ inline void _foreach_add_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
52
+ return at::_ops::_foreach_add__ScalarList::call(self, scalars);
53
+ }
54
+
55
+ // aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]
56
+ inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) {
57
+ return at::_ops::_foreach_add_Tensor::call(self, other, alpha);
58
+ }
59
+
60
+ // aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()
61
+ inline void _foreach_add_(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) {
62
+ return at::_ops::_foreach_add__Tensor::call(self, other, alpha);
63
+ }
64
+
65
+ // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
66
+ inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
67
+ return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
68
+ }
69
+ // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
70
+ inline void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
71
+ return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
72
+ }
73
+
74
+ // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
75
+ inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
76
+ return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
77
+ }
78
+ // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
79
+ inline void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
80
+ return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
81
+ }
82
+
83
+ // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
84
+ inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
85
+ return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
86
+ }
87
+ // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
88
+ inline void _foreach_add_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
89
+ return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
90
+ }
91
+
92
+ // aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
93
+ inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) {
94
+ return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out);
95
+ }
96
+ // aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
97
+ inline void _foreach_add_outf(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
98
+ return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out);
99
+ }
100
+
101
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API void _foreach_exp_out(at::TensorList out, at::TensorList self);
21
+ TORCH_API void _foreach_exp_outf(at::TensorList self, at::TensorList out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights);
21
+ TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights);
22
+ TORCH_API ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight);
23
+ TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _histogramdd_bin_edges_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out);
20
+ TORCH_API ::std::vector<at::Tensor> histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _make_dual_copy_out(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out);
20
+ TORCH_API at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_mkldnn_transpose_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
26
+ inline at::Tensor _mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) {
27
+ return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
28
+ }
29
+
30
+ // aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
31
+ inline at::Tensor & _mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) {
32
+ return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
33
+ }
34
+
35
+ // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & _mkldnn_transpose_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) {
37
+ return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
38
+ }
39
+ // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & _mkldnn_transpose_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
41
+ return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
42
+ }
43
+
44
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt);
21
+ TORCH_API at::Tensor & _nested_tensor_from_tensor_list_out(at::Tensor & out, at::TensorList list, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt);
22
+ TORCH_API at::Tensor & _nested_tensor_from_tensor_list_outf(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt);
20
+ TORCH_API at::Tensor & _nested_tensor_from_tensor_list_out(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _test_string_default {
18
+ using schema = at::Tensor (const at::Tensor &, c10::string_view, c10::string_view);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_string_default")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor")
24
+ static at::Tensor call(const at::Tensor & dummy, c10::string_view a, c10::string_view b);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a, c10::string_view b);
26
+ };
27
+
28
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_cuda(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _unsafe_view {
18
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_view")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_view(Tensor self, SymInt[] size) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size);
26
+ };
27
+
28
+ struct TORCH_API _unsafe_view_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_view")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
21
+ TORCH_API at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
22
+ TORCH_API at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
23
+ TORCH_API at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
24
+ TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
25
+ TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
26
+
27
+ } // namespace meta
28
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim);
20
+ } // namespace native
21
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices);
23
+
24
+ } // namespace meta
25
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API alias {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::alias")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "alias(Tensor(a) self) -> Tensor(a)")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/align_tensors_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::align_tensors(Tensor[] tensors) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> align_tensors(at::TensorList tensors) {
27
+ return at::_ops::align_tensors::call(tensors);
28
+ }
29
+
30
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API arccosh {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API arccosh_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API arccosh_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
27
+ return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
28
+ }
29
+
30
+ // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
32
+ return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
33
+ }
34
+ // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
36
+ return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
37
+ }
38
+
39
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_outf(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
22
+ TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace meta
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/block_diag_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::block_diag(Tensor[] tensors) -> Tensor
26
+ inline at::Tensor block_diag(at::TensorList tensors) {
27
+ return at::_ops::block_diag::call(tensors);
28
+ }
29
+
30
+ // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors) {
32
+ return at::_ops::block_diag_out::call(tensors, out);
33
+ }
34
+ // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out) {
36
+ return at::_ops::block_diag_out::call(tensors, out);
37
+ }
38
+
39
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor coalesce(const at::Tensor & self);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor copysign(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
22
+ TORCH_API at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & copysign_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace meta
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::vector<at::Tensor> dsplit(const at::Tensor & self, int64_t sections);
20
+ TORCH_API ::std::vector<at::Tensor> dsplit(const at::Tensor & self, at::IntArrayRef indices);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & empty_quantized_out(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out);
20
+ TORCH_API at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor exp2(const at::Tensor & self);
21
+ TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & exp2_(at::Tensor & self);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor expm1(const at::Tensor & self);
21
+ TORCH_API at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & expm1_(at::Tensor & self);
24
+
25
+ } // namespace meta
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fbgemm_linear_fp16_weight {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_linear_fp16_weight")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias);
26
+ };
27
+
28
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_irfft2 {
18
+ using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional<c10::string_view>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm);
26
+ };
27
+
28
+ struct TORCH_API fft_irfft2_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional<c10::string_view>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional<at::Generator> generator=c10::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor i0(const at::Tensor & self);
21
+ TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & i0_(at::Tensor & self);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source);
21
+ TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API linalg_inv {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv(Tensor A) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & A);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A);
26
+ };
27
+
28
+ struct TORCH_API linalg_inv_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & A, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & mkldnn_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
20
+ TORCH_API at::Tensor mkldnn_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
21
+ } // namespace native
22
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={});
21
+ TORCH_API at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
22
+ TORCH_API at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={});
23
+ TORCH_API at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
24
+ TORCH_API at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size);
25
+ TORCH_API at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out);
26
+ TORCH_API at::Tensor & new_empty_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size);
27
+ TORCH_API at::Tensor & new_empty_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out);
28
+
29
+ } // namespace compositeexplicitautograd
30
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
21
+ TORCH_API at::Tensor nll_loss2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight);
22
+ TORCH_API at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
23
+ TORCH_API at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input);
24
+ TORCH_API at::Tensor & nll_loss2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight);
25
+ TORCH_API at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input);
26
+
27
+ } // namespace cuda
28
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor poisson(const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at