applied-ai-018 commited on
Commit
ff642f6
·
verified ·
1 Parent(s): a761284

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h +3 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h +46 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h +337 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h +25 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h +39 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/List.h +490 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h +360 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h +186 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h +28 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h +23 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Range.h +25 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h +1 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h +276 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h +173 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h +1 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h +96 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/blob.h +208 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +387 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h +28 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h +795 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +313 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h +36 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h +20 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h +101 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function.h +111 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h +10 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h +358 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type.h +2425 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/operator_name.h +92 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/stack.h +200 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/symbol.h +147 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h +9 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh +149 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh +537 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h +19 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h +208 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h +290 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h +174 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h +11 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh +53 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/AcceleratorHooksInterface.h +21 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h +201 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h +102 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h +70 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/QScheme.h>
3
+
4
+ // Forward declarations of core ATen types used in dispatch functions
5
+ namespace c10 {
6
+
7
+ template<typename T>
8
+ class List;
9
+ template<typename T>
10
+ class IListRef;
11
+ class Stream;
12
+ class Scalar;
13
+ class SymInt;
14
+ class SymIntList;
15
+ struct Storage;
16
+ struct TensorOptions;
17
+ template <typename T>
18
+ class ArrayRef;
19
+ template <typename T>
20
+ class OptionalArrayRef;
21
+
22
+ } // namespace c10
23
+
24
+ namespace at {
25
+
26
+ class Tensor;
27
+ class OptionalTensorRef;
28
+ struct Dimname;
29
+ struct Generator;
30
+ using TensorList = c10::ArrayRef<Tensor>;
31
+ using ITensorListRef = c10::IListRef<Tensor>;
32
+ using IOptTensorListRef = c10::IListRef<OptionalTensorRef>;
33
+ using DimnameList = c10::ArrayRef<Dimname>;
34
+ using IntArrayRef = c10::ArrayRef<int64_t>;
35
+ using OptionalIntArrayRef = c10::OptionalArrayRef<int64_t>;
36
+ using OptionalSymIntArrayRef = c10::OptionalArrayRef<c10::SymInt>;
37
+
38
+ using c10::Stream;
39
+ using c10::Storage;
40
+ using c10::QScheme;
41
+ using c10::Scalar;
42
+ using c10::SymInt;
43
+ using c10::SymIntList;
44
+ using c10::TensorOptions;
45
+
46
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Array.h>
4
+ #include <ATen/core/TransformationHelper.h>
5
+ #include <c10/util/Half.h>
6
+ #include <c10/util/BFloat16.h>
7
+ #include <c10/util/MathConstants.h>
8
+ #include <c10/util/Optional.h>
9
+ #include <c10/macros/Macros.h>
10
+
11
+ #include <type_traits>
12
+ #include <limits>
13
+ #include <cmath>
14
+
15
+ /**
16
+ * Distributions kernel adapted from THRandom.cpp
17
+ * The kernels try to follow std::random distributions signature
18
+ * For instance: in ATen
19
+ * auto gen = at::detail::createCPUGenerator();
20
+ * at::uniform_real_distribution<double> uniform(0, 1);
21
+ * auto sample = uniform(gen.get());
22
+ *
23
+ * vs std::random
24
+ *
25
+ * std::mt19937 gen;
26
+ * std::uniform_real_distribution uniform(0, 1);
27
+ * auto sample = uniform(gen);
28
+ */
29
+
30
+
31
+ namespace at {
32
+ namespace {
33
+
34
+ /**
35
+ * Samples a discrete uniform distribution in the range [base, base+range) of type T
36
+ */
37
+ template <typename T>
38
+ struct uniform_int_from_to_distribution {
39
+
40
+ C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
41
+
42
+ template <typename RNG>
43
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
44
+ if ((
45
+ std::is_same<T, int64_t>::value ||
46
+ std::is_same<T, double>::value ||
47
+ std::is_same<T, float>::value ||
48
+ std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
49
+ {
50
+ return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
51
+ } else {
52
+ return transformation::uniform_int_from_to<T>(generator->random(), range_, base_);
53
+ }
54
+ }
55
+
56
+ private:
57
+ uint64_t range_;
58
+ int64_t base_;
59
+ };
60
+
61
+ /**
62
+ * Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)]
63
+ */
64
+ template <typename T>
65
+ struct uniform_int_full_range_distribution {
66
+
67
+ template <typename RNG>
68
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
69
+ return transformation::uniform_int_full_range<T>(generator->random64());
70
+ }
71
+
72
+ };
73
+
74
+ /**
75
+ * Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types
76
+ * and [0, 2^mantissa] for floating-point types.
77
+ */
78
+ template <typename T>
79
+ struct uniform_int_distribution {
80
+
81
+ template <typename RNG>
82
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
83
+ if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) {
84
+ return transformation::uniform_int<T>(generator->random64());
85
+ } else {
86
+ return transformation::uniform_int<T>(generator->random());
87
+ }
88
+ }
89
+
90
+ };
91
+
92
+ /**
93
+ * Samples a uniform distribution in the range [from, to) of type T
94
+ */
95
+ template <typename T>
96
+ struct uniform_real_distribution {
97
+
98
+ C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
99
+ TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
100
+ TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
101
+ from_ = from;
102
+ to_ = to;
103
+ }
104
+
105
+ template <typename RNG>
106
+ C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
107
+ if constexpr (std::is_same_v<T, double>) {
108
+ return transformation::uniform_real<T>(generator->random64(), from_, to_);
109
+ } else {
110
+ return transformation::uniform_real<T>(generator->random(), from_, to_);
111
+ }
112
+ }
113
+
114
+ private:
115
+ T from_;
116
+ T to_;
117
+ };
118
+
119
+ // The SFINAE checks introduced in #39816 looks overcomplicated and must revisited
120
+ // https://github.com/pytorch/pytorch/issues/40052
121
+ #define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \
122
+ template <typename T> \
123
+ struct has_member_##member \
124
+ { \
125
+ typedef char yes; \
126
+ typedef long no; \
127
+ template <typename U> static yes test(decltype(&U::member)); \
128
+ template <typename U> static no test(...); \
129
+ static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \
130
+ }
131
+
132
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample);
133
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample);
134
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample);
135
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample);
136
+
137
+ #define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \
138
+ \
139
+ template <typename RNG, typename ret_type, \
140
+ typename std::enable_if_t<( \
141
+ has_member_next_##TYPE##_normal_sample<RNG>::value && \
142
+ has_member_set_next_##TYPE##_normal_sample<RNG>::value \
143
+ ), int> = 0> \
144
+ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
145
+ if (generator->next_##TYPE##_normal_sample()) { \
146
+ *ret = *(generator->next_##TYPE##_normal_sample()); \
147
+ generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \
148
+ return true; \
149
+ } \
150
+ return false; \
151
+ } \
152
+ \
153
+ template <typename RNG, typename ret_type, \
154
+ typename std::enable_if_t<( \
155
+ !has_member_next_##TYPE##_normal_sample<RNG>::value || \
156
+ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \
157
+ ), int> = 0> \
158
+ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \
159
+ return false; \
160
+ } \
161
+ \
162
+ template <typename RNG, typename ret_type, \
163
+ typename std::enable_if_t<( \
164
+ has_member_set_next_##TYPE##_normal_sample<RNG>::value \
165
+ ), int> = 0> \
166
+ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \
167
+ generator->set_next_##TYPE##_normal_sample(cache); \
168
+ } \
169
+ \
170
+ template <typename RNG, typename ret_type, \
171
+ typename std::enable_if_t<( \
172
+ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \
173
+ ), int> = 0> \
174
+ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
175
+ }
176
+
177
+ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
178
+ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
179
+
180
+ /**
181
+ * Samples a normal distribution using the Box-Muller method
182
+ * Takes mean and standard deviation as inputs
183
+ * Note that Box-muller method returns two samples at a time.
184
+ * Hence, we cache the "next" sample in the CPUGeneratorImpl class.
185
+ */
186
+ template <typename T>
187
+ struct normal_distribution {
188
+
189
+ C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
190
+ TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
191
+ mean = mean_in;
192
+ stdv = stdv_in;
193
+ }
194
+
195
+ template <typename RNG>
196
+ C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
197
+ dist_acctype<T> ret;
198
+ // return cached values if available
199
+ if constexpr (std::is_same_v<T, double>) {
200
+ if (maybe_get_next_double_normal_sample(generator, &ret)) {
201
+ return transformation::normal(ret, mean, stdv);
202
+ }
203
+ } else {
204
+ if (maybe_get_next_float_normal_sample(generator, &ret)) {
205
+ return transformation::normal(ret, mean, stdv);
206
+ }
207
+ }
208
+ // otherwise generate new normal values
209
+ uniform_real_distribution<T> uniform(0.0, 1.0);
210
+ const dist_acctype<T> u1 = uniform(generator);
211
+ const dist_acctype<T> u2 = uniform(generator);
212
+ const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2));
213
+ const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1;
214
+ if constexpr (std::is_same_v<T, double>) {
215
+ maybe_set_next_double_normal_sample(generator, r * ::sin(theta));
216
+ } else {
217
+ maybe_set_next_float_normal_sample(generator, r * ::sin(theta));
218
+ }
219
+ ret = r * ::cos(theta);
220
+ return transformation::normal(ret, mean, stdv);
221
+ }
222
+
223
+ private:
224
+ T mean;
225
+ T stdv;
226
+ };
227
+
228
+ template <typename T>
229
+ struct DiscreteDistributionType { using type = float; };
230
+
231
+ template <> struct DiscreteDistributionType<double> { using type = double; };
232
+
233
+ /**
234
+ * Samples a bernoulli distribution given a probability input
235
+ */
236
+ template <typename T>
237
+ struct bernoulli_distribution {
238
+
239
+ C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
240
+ TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
241
+ p = p_in;
242
+ }
243
+
244
+ template <typename RNG>
245
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
246
+ uniform_real_distribution<T> uniform(0.0, 1.0);
247
+ return transformation::bernoulli<T>(uniform(generator), p);
248
+ }
249
+
250
+ private:
251
+ T p;
252
+ };
253
+
254
+ /**
255
+ * Samples a geometric distribution given a probability input
256
+ */
257
+ template <typename T>
258
+ struct geometric_distribution {
259
+
260
+ C10_HOST_DEVICE inline geometric_distribution(T p_in) {
261
+ TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
262
+ p = p_in;
263
+ }
264
+
265
+ template <typename RNG>
266
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
267
+ uniform_real_distribution<T> uniform(0.0, 1.0);
268
+ return transformation::geometric<T>(uniform(generator), p);
269
+ }
270
+
271
+ private:
272
+ T p;
273
+ };
274
+
275
+ /**
276
+ * Samples an exponential distribution given a lambda input
277
+ */
278
+ template <typename T>
279
+ struct exponential_distribution {
280
+
281
+ C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
282
+
283
+ template <typename RNG>
284
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
285
+ uniform_real_distribution<T> uniform(0.0, 1.0);
286
+ return transformation::exponential<T>(uniform(generator), lambda);
287
+ }
288
+
289
+ private:
290
+ T lambda;
291
+ };
292
+
293
+ /**
294
+ * Samples a cauchy distribution given median and sigma as inputs
295
+ */
296
+ template <typename T>
297
+ struct cauchy_distribution {
298
+
299
+ C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
300
+
301
+ template <typename RNG>
302
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
303
+ uniform_real_distribution<T> uniform(0.0, 1.0);
304
+ return transformation::cauchy<T>(uniform(generator), median, sigma);
305
+ }
306
+
307
+ private:
308
+ T median;
309
+ T sigma;
310
+ };
311
+
312
+ /**
313
+ * Samples a lognormal distribution
314
+ * Takes mean and standard deviation as inputs
315
+ * Outputs two samples at a time
316
+ */
317
+ template <typename T>
318
+ struct lognormal_distribution {
319
+
320
+ C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
321
+ TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
322
+ mean = mean_in;
323
+ stdv = stdv_in;
324
+ }
325
+
326
+ template<typename RNG>
327
+ C10_HOST_DEVICE inline T operator()(RNG generator){
328
+ normal_distribution<T> normal(mean, stdv);
329
+ return transformation::log_normal<T>(normal(generator));
330
+ }
331
+
332
+ private:
333
+ T mean;
334
+ T stdv;
335
+ };
336
+ }
337
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ostream>
4
+ #include <string>
5
+
6
+ #include <c10/core/Scalar.h>
7
+ #include <ATen/core/Tensor.h>
8
+
9
+ namespace c10 {
10
+ TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
11
+ TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s);
12
+ TORCH_API std::string toString(const Scalar& s);
13
+ }
14
+ namespace at {
15
+
16
+ TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
17
+ TORCH_API std::ostream& print(
18
+ std::ostream& stream,
19
+ const Tensor& tensor,
20
+ int64_t linesize);
21
+ static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) {
22
+ return print(out,t,80);
23
+ }
24
+ TORCH_API void print(const Tensor & t, int64_t linesize=80);
25
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <c10/util/intrusive_ptr.h>
5
+
6
+ namespace at {
7
+
8
+ using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
9
+
10
+ c10::optional<GeneratorFuncType>& GetGeneratorPrivate();
11
+
12
+ class TORCH_API _GeneratorRegister {
13
+ public:
14
+ explicit _GeneratorRegister(const GeneratorFuncType& func);
15
+ };
16
+
17
+ TORCH_API at::Generator GetGeneratorForPrivateuse1(
18
+ c10::DeviceIndex device_index);
19
+
20
+ /**
21
+ * This is used to register Generator to PyTorch for `privateuse1` key.
22
+ *
23
+ * Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1)
24
+ *
25
+ * class CustomGeneratorImpl : public c10::GeneratorImpl {
26
+ * CustomGeneratorImpl(DeviceIndex device_index = -1);
27
+ * explicit ~CustomGeneratorImpl() override = default;
28
+ * ...
29
+ * };
30
+ *
31
+ * at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) {
32
+ * return at::make_generator<CustomGeneratorImpl>(id);
33
+ * }
34
+ */
35
+
36
+ #define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \
37
+ static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate);
38
+
39
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/List.h ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue_to.h>
4
+ #include <ATen/core/jit_type_base.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/TypeTraits.h>
8
+ #include <c10/util/TypeList.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <c10/util/ArrayRef.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <vector>
13
+
14
+ namespace at {
15
+ class Tensor;
16
+ }
17
+ namespace c10 {
18
+ struct IValue;
19
+ template<class T> class List;
20
+ struct Type;
21
+
22
+ namespace detail {
23
+
24
+ struct ListImpl final : public c10::intrusive_ptr_target {
25
+ using list_type = std::vector<IValue>;
26
+
27
+ explicit TORCH_API ListImpl(list_type list_, TypePtr elementType_);
28
+
29
+ list_type list;
30
+
31
+ TypePtr elementType;
32
+
33
+ intrusive_ptr<ListImpl> copy() const {
34
+ return make_intrusive<ListImpl>(list, elementType);
35
+ }
36
+ friend TORCH_API bool operator==(const ListImpl& lhs, const ListImpl& rhs);
37
+ };
38
+ }
39
+
40
+ namespace impl {
41
+
42
+ template<class T, class Iterator> class ListIterator;
43
+
44
+ template<class T, class Iterator> class ListElementReference;
45
+
46
+ template<class T, class Iterator>
47
+ void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs);
48
+
49
+ template<class T, class Iterator>
50
+ bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs);
51
+
52
+ template<class T, class Iterator>
53
+ bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs);
54
+
55
+ template<class T>
56
+ struct ListElementConstReferenceTraits {
57
+ // In the general case, we use IValue::to().
58
+ using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type;
59
+ };
60
+
61
+ // There is no to() overload for c10::optional<std::string>.
62
+ template<>
63
+ struct ListElementConstReferenceTraits<c10::optional<std::string>> {
64
+ using const_reference = c10::optional<std::reference_wrapper<const std::string>>;
65
+ };
66
+
67
+ template<class T, class Iterator>
68
+ class ListElementReference final {
69
+ public:
70
+ operator std::conditional_t<
71
+ std::is_reference<typename c10::detail::
72
+ ivalue_to_const_ref_overload_return<T>::type>::value,
73
+ const T&,
74
+ T>() const;
75
+
76
+ ListElementReference& operator=(T&& new_value) &&;
77
+
78
+ ListElementReference& operator=(const T& new_value) &&;
79
+
80
+ // assigning another ref to this assigns the underlying value
81
+ ListElementReference& operator=(ListElementReference&& rhs) && noexcept;
82
+
83
+ const IValue& get() const& {
84
+ return *iterator_;
85
+ }
86
+
87
+ friend void swap<T, Iterator>(ListElementReference&& lhs, ListElementReference&& rhs);
88
+
89
+ ListElementReference(const ListElementReference&) = delete;
90
+ ListElementReference& operator=(const ListElementReference&) = delete;
91
+
92
+ private:
93
+ ListElementReference(Iterator iter)
94
+ : iterator_(iter) {}
95
+
96
+ // allow moving, but only our friends (i.e. the List class) can move us
97
+ ListElementReference(ListElementReference&&) noexcept = default;
98
+ ListElementReference& operator=(ListElementReference&& rhs) & noexcept {
99
+ iterator_ = std::move(rhs.iterator_);
100
+ return *this;
101
+ }
102
+
103
+ friend class List<T>;
104
+ friend class ListIterator<T, Iterator>;
105
+
106
+ Iterator iterator_;
107
+ };
108
+
109
+ // this wraps vector::iterator to make sure user code can't rely
110
+ // on it being the type of the underlying vector.
111
+ template <class T, class Iterator>
112
+ class ListIterator final {
113
+ public:
114
+ // C++17 friendly std::iterator implementation
115
+ using iterator_category = std::random_access_iterator_tag;
116
+ using value_type = T;
117
+ using difference_type = std::ptrdiff_t;
118
+ using pointer = T*;
119
+ using reference = ListElementReference<T, Iterator>;
120
+
121
+ explicit ListIterator() = default;
122
+ ~ListIterator() = default;
123
+
124
+ ListIterator(const ListIterator&) = default;
125
+ ListIterator(ListIterator&&) noexcept = default;
126
+ ListIterator& operator=(const ListIterator&) = default;
127
+ ListIterator& operator=(ListIterator&&) noexcept = default;
128
+
129
+ ListIterator& operator++() {
130
+ ++iterator_;
131
+ return *this;
132
+ }
133
+
134
+ ListIterator operator++(int) {
135
+ ListIterator copy(*this);
136
+ ++*this;
137
+ return copy;
138
+ }
139
+
140
+ ListIterator& operator--() {
141
+ --iterator_;
142
+ return *this;
143
+ }
144
+
145
+ ListIterator operator--(int) {
146
+ ListIterator copy(*this);
147
+ --*this;
148
+ return copy;
149
+ }
150
+
151
+ ListIterator& operator+=(typename List<T>::size_type offset) {
152
+ iterator_ += offset;
153
+ return *this;
154
+ }
155
+
156
+ ListIterator& operator-=(typename List<T>::size_type offset) {
157
+ iterator_ -= offset;
158
+ return *this;
159
+ }
160
+
161
+ ListIterator operator+(typename List<T>::size_type offset) const {
162
+ return ListIterator{iterator_ + offset};
163
+ }
164
+
165
+ ListIterator operator-(typename List<T>::size_type offset) const {
166
+ return ListIterator{iterator_ - offset};
167
+ }
168
+
169
+ friend difference_type operator-(const ListIterator& lhs, const ListIterator& rhs) {
170
+ return lhs.iterator_ - rhs.iterator_;
171
+ }
172
+
173
+ ListElementReference<T, Iterator> operator*() const {
174
+ return {iterator_};
175
+ }
176
+
177
+ ListElementReference<T, Iterator> operator[](typename List<T>::size_type offset) const {
178
+ return {iterator_ + offset};
179
+ }
180
+
181
+ private:
182
+ explicit ListIterator(Iterator iterator): iterator_(std::move(iterator)) {}
183
+
184
+ Iterator iterator_;
185
+
186
+ friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) {
187
+ return lhs.iterator_ == rhs.iterator_;
188
+ }
189
+
190
+ friend bool operator!=(const ListIterator& lhs, const ListIterator& rhs) {
191
+ return !(lhs == rhs);
192
+ }
193
+
194
+ friend bool operator<(const ListIterator& lhs, const ListIterator& rhs) {
195
+ return lhs.iterator_ < rhs.iterator_;
196
+ }
197
+
198
+ friend bool operator<=(const ListIterator& lhs, const ListIterator& rhs) {
199
+ return lhs.iterator_ <= rhs.iterator_;
200
+ }
201
+
202
+ friend bool operator>(const ListIterator& lhs, const ListIterator& rhs) {
203
+ return lhs.iterator_ > rhs.iterator_;
204
+ }
205
+
206
+ friend bool operator>=(const ListIterator& lhs, const ListIterator& rhs) {
207
+ return lhs.iterator_ >= rhs.iterator_;
208
+ }
209
+
210
+ friend class ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
211
+ friend class List<T>;
212
+ };
213
+
214
+ template<class T> List<T> toTypedList(List<IValue> list);
215
+ template<class T> List<IValue> toList(List<T>&& list);
216
+ template<class T> List<IValue> toList(const List<T>& list);
217
+ const IValue* ptr_to_first_element(const List<IValue>& list);
218
+ }
219
+
220
+ /**
221
+ * An object of this class stores a list of values of type T.
222
+ *
223
+ * This is a pointer type. After a copy, both Lists
224
+ * will share the same storage:
225
+ *
226
+ * > List<int> a;
227
+ * > List<int> b = a;
228
+ * > b.push_back("three");
229
+ * > ASSERT("three" == a.get(0));
230
+ *
231
+ * We use this class in the PyTorch kernel API instead of
232
+ * std::vector<T>, because that allows us to do optimizations
233
+ * and switch out the underlying list implementation without
234
+ * breaking backwards compatibility for the kernel API.
235
+ */
236
+ template<class T>
237
+ class List final {
238
+ private:
239
+ // This is an intrusive_ptr because List is a pointer type.
240
+ // Invariant: This will never be a nullptr, there will always be a valid
241
+ // ListImpl.
242
+ c10::intrusive_ptr<c10::detail::ListImpl> impl_;
243
+
244
+ using internal_reference_type = impl::ListElementReference<T, typename c10::detail::ListImpl::list_type::iterator>;
245
+ using internal_const_reference_type = typename impl::ListElementConstReferenceTraits<T>::const_reference;
246
+
247
+ public:
248
+ using value_type = T;
249
+ using size_type = typename c10::detail::ListImpl::list_type::size_type;
250
+ using iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
251
+ using const_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
252
+ using reverse_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::reverse_iterator>;
253
+
254
+ /**
255
+ * Constructs an empty list.
256
+ */
257
+ explicit List();
258
+
259
+ /**
260
+ * Constructs a list with some initial values.
261
+ * Example:
262
+ * List<int> a({2, 3, 4});
263
+ */
264
+ List(std::initializer_list<T> initial_values);
265
+ explicit List(ArrayRef<T> initial_values);
266
+
267
+ /**
268
+ * Create a generic list with runtime type information.
269
+ * This only works for c10::impl::GenericList and is not part of the public API
270
+ * but only supposed to be used internally by PyTorch.
271
+ */
272
+ explicit List(TypePtr elementType);
273
+
274
+ List(const List&) = default;
275
+ List& operator=(const List&) = default;
276
+
277
+ /**
278
+ * Create a new List pointing to a deep copy of the same data.
279
+ * The List returned is a new list with separate storage.
280
+ * Changes in it are not reflected in the original list or vice versa.
281
+ */
282
+ List copy() const;
283
+
284
+ /**
285
+ * Returns the element at specified location pos, with bounds checking.
286
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
287
+ */
288
+ internal_const_reference_type get(size_type pos) const;
289
+
290
+ /**
291
+ * Moves out the element at the specified location pos and returns it, with bounds checking.
292
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
293
+ * The list contains an invalid element at position pos afterwards. Any operations
294
+ * on it before re-setting it are invalid.
295
+ */
296
+ value_type extract(size_type pos) const;
297
+
298
+ /**
299
+ * Returns a reference to the element at specified location pos, with bounds checking.
300
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
301
+ *
302
+ * You cannot store the reference, but you can read it and assign new values to it:
303
+ *
304
+ * List<int64_t> list = ...;
305
+ * list[2] = 5;
306
+ * int64_t v = list[1];
307
+ */
308
+ internal_const_reference_type operator[](size_type pos) const;
309
+
310
+ internal_reference_type operator[](size_type pos);
311
+
312
+ /**
313
+ * Assigns a new value to the element at location pos.
314
+ */
315
+ void set(size_type pos, const value_type& value) const;
316
+
317
+ /**
318
+ * Assigns a new value to the element at location pos.
319
+ */
320
+ void set(size_type pos, value_type&& value) const;
321
+
322
+ /**
323
+ * Returns an iterator to the first element of the container.
324
+ * If the container is empty, the returned iterator will be equal to end().
325
+ */
326
+ iterator begin() const;
327
+
328
+ /**
329
+ * Returns an iterator to the element following the last element of the container.
330
+ * This element acts as a placeholder; attempting to access it results in undefined behavior.
331
+ */
332
+ iterator end() const;
333
+
334
+ /**
335
+ * Checks if the container has no elements.
336
+ */
337
+ bool empty() const;
338
+
339
+ /**
340
+ * Returns the number of elements in the container
341
+ */
342
+ size_type size() const;
343
+
344
+ /**
345
+ * Increase the capacity of the vector to a value that's greater or equal to new_cap.
346
+ */
347
+ void reserve(size_type new_cap) const;
348
+
349
+ /**
350
+ * Erases all elements from the container. After this call, size() returns zero.
351
+ * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated.
352
+ */
353
+ void clear() const;
354
+
355
+ /**
356
+ * Inserts value before pos.
357
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
358
+ */
359
+ iterator insert(iterator pos, const T& value) const;
360
+
361
+ /**
362
+ * Inserts value before pos.
363
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
364
+ */
365
+ iterator insert(iterator pos, T&& value) const;
366
+
367
+ /**
368
+ * Inserts a new element into the container directly before pos.
369
+ * The new element is constructed with the given arguments.
370
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
371
+ */
372
+ template<class... Args>
373
+ iterator emplace(iterator pos, Args&&... value) const;
374
+
375
+ /**
376
+ * Appends the given element value to the end of the container.
377
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
378
+ */
379
+ void push_back(const T& value) const;
380
+
381
+ /**
382
+ * Appends the given element value to the end of the container.
383
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
384
+ */
385
+ void push_back(T&& value) const;
386
+
387
+ /**
388
+ * Appends the given list to the end of the container. Uses at most one memory allocation.
389
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
390
+ */
391
+ void append(List<T> lst) const;
392
+
393
+ /**
394
+ * Appends the given element value to the end of the container.
395
+ * The new element is constructed with the given arguments.
396
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
397
+ */
398
+ template<class... Args>
399
+ void emplace_back(Args&&... args) const;
400
+
401
+ /**
402
+ * Removes the element at pos.
403
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
404
+ */
405
+ iterator erase(iterator pos) const;
406
+
407
+ /**
408
+ * Removes the elements in the range [first, last).
409
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
410
+ */
411
+ iterator erase(iterator first, iterator last) const;
412
+
413
+ /**
414
+ * Removes the last element of the container.
415
+ * Calling pop_back on an empty container is undefined.
416
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
417
+ */
418
+ void pop_back() const;
419
+
420
+ /**
421
+ * Resizes the container to contain count elements.
422
+ * If the current size is less than count, additional default-inserted elements are appended.
423
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
424
+ */
425
+ void resize(size_type count) const;
426
+
427
+ /**
428
+ * Resizes the container to contain count elements.
429
+ * If the current size is less than count, additional copies of value are appended.
430
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
431
+ */
432
+ void resize(size_type count, const T& value) const;
433
+
434
+ /**
435
+ * Value equality comparison. This function implements Python-like semantics for
436
+ * equality: two lists with the same identity (e.g. same pointer) trivially
437
+ * compare equal, otherwise each element is compared for equality.
438
+ */
439
+ template <class T_>
440
+ friend bool operator==(const List<T_>& lhs, const List<T_>& rhs);
441
+
442
+ template <class T_>
443
+ friend bool operator!=(const List<T_>& lhs, const List<T_>& rhs);
444
+
445
+ /**
446
+ * Identity comparison. Returns true if and only if `rhs` represents the same
447
+ * List object as `this`.
448
+ */
449
+ bool is(const List<T>& rhs) const;
450
+
451
+ std::vector<T> vec() const;
452
+
453
+ /**
454
+ * Returns the number of Lists currently pointing to this same list.
455
+ * If this is the only instance pointing to this list, returns 1.
456
+ */
457
+ // TODO Test use_count
458
+ size_t use_count() const;
459
+
460
+ TypePtr elementType() const;
461
+
462
+ // See [unsafe set type] for why this exists.
463
+ void unsafeSetElementType(TypePtr t);
464
+
465
+ private:
466
+ explicit List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements);
467
+ explicit List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements);
468
+ friend struct IValue;
469
+ template<class T_> friend List<T_> impl::toTypedList(List<IValue>);
470
+ template<class T_> friend List<IValue> impl::toList(List<T_>&&);
471
+ template<class T_> friend List<IValue> impl::toList(const List<T_>&);
472
+ friend const IValue* impl::ptr_to_first_element(const List<IValue>& list);
473
+ };
474
+
475
+ namespace impl {
476
+ // GenericList is how IValue stores lists. It is, however, not part of the
477
+ // public API. Kernels should use Lists with concrete types instead
478
+ // (maybe except for some internal prim ops).
479
+ using GenericList = List<IValue>;
480
+
481
+ const IValue* ptr_to_first_element(const GenericList& list);
482
+
483
+ }
484
+ }
485
+
486
+ namespace torch {
487
+ template<class T> using List = c10::List<T>;
488
+ }
489
+
490
+ #include <ATen/core/List_inl.h> // IWYU pragma: keep
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type_base.h>
4
+ #include <ATen/core/ivalue.h>
5
+
6
+ namespace c10 {
7
+
8
+ template<class T> decltype(auto) getTypePtr();
9
+ std::string toString(const Type& type);
10
+
11
+ template<class T>
12
+ List<T>::List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements)
13
+ : impl_(std::move(elements)) {}
14
+
15
+ template<class T>
16
+ List<T>::List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements)
17
+ : impl_(elements) {}
18
+
19
+ template<class T>
20
+ List<T>::List()
21
+ : List(make_intrusive<c10::detail::ListImpl>(
22
+ typename c10::detail::ListImpl::list_type(),
23
+ getTypePtr<T>())) {
24
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
25
+ }
26
+
27
+ template<class T>
28
+ List<T>::List(ArrayRef<T> values)
29
+ : List(make_intrusive<c10::detail::ListImpl>(
30
+ typename c10::detail::ListImpl::list_type(),
31
+ getTypePtr<T>())) {
32
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
33
+ impl_->list.reserve(values.size());
34
+ for (const T& element : values) {
35
+ impl_->list.push_back(element);
36
+ }
37
+ }
38
+
39
+ template<class T>
40
+ List<T>::List(std::initializer_list<T> initial_values)
41
+ : List(ArrayRef<T>(initial_values)) {
42
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
43
+ }
44
+
45
+ template<class T>
46
+ List<T>::List(TypePtr elementType)
47
+ : List(make_intrusive<c10::detail::ListImpl>(
48
+ typename c10::detail::ListImpl::list_type(),
49
+ std::move(elementType))) {
50
+ static_assert(std::is_same<T, IValue>::value || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
51
+ "This constructor is only valid for c10::impl::GenericList or List<Future>.");
52
+ }
53
+
54
+ namespace impl {
55
+ template<class T>
56
+ List<T> toTypedList(impl::GenericList list) {
57
+ // If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant
58
+ // because upcasting would allow people to add types into the new list that would break the old list.
59
+ // However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can
60
+ // allow upcasting. This can be a perf improvement since we can cast List<T> to List<optional<T>>
61
+ // without having to copy it. This is also used to provide backwards compatibility with some old models
62
+ // that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_
63
+ // as List<Tensor> before we changed that argument to be List<optional<Tensor>>. When deserializing, we
64
+ // have list.use_count() == 1 and can deserialize the List<Tensor> directly as List<optional<Tensor>>.
65
+ TORCH_CHECK(*list.impl_->elementType == *getTypePtr<T>()
66
+ || (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr<T>()))
67
+ , "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr<T>()), ">. Types mismatch.");
68
+ return List<T>(std::move(list.impl_));
69
+ }
70
+
71
+ template<class T>
72
+ impl::GenericList toList(List<T>&& list) {
73
+ return GenericList(std::move(list.impl_));
74
+ }
75
+ template<class T>
76
+ impl::GenericList toList(const List<T>& list) {
77
+ return GenericList(list.impl_);
78
+ }
79
+ }
80
+
81
+ template<class T>
82
+ List<T> List<T>::copy() const {
83
+ return List<T>(impl_->copy());
84
+ }
85
+
86
+ namespace detail {
87
+ template<class T>
88
+ T list_element_to(T element) {
89
+ return element;
90
+ }
91
+ template<class T>
92
+ T list_element_to(const IValue& element) {
93
+ return element.template to<T>();
94
+ }
95
+ template<class T>
96
+ T list_element_to(IValue&& element) {
97
+ return std::move(element).template to<T>();
98
+ }
99
+ template<class T>
100
+ struct ListElementFrom {
101
+ static IValue from(const T& element) {
102
+ return element;
103
+ }
104
+ static IValue from(T&& element) {
105
+ return std::move(element);
106
+ }
107
+ };
108
+ template<>
109
+ struct ListElementFrom<IValue> {
110
+ static const IValue& from(const IValue& element) {
111
+ return element;
112
+ }
113
+ static IValue&& from(IValue&& element) {
114
+ return std::move(element);
115
+ }
116
+ };
117
+ }
118
+
119
+ namespace impl {
120
+
121
+ template <class T, class Iterator>
122
+ ListElementReference<T, Iterator>::operator std::conditional_t<
123
+ std::is_reference<typename c10::detail::ivalue_to_const_ref_overload_return<
124
+ T>::type>::value,
125
+ const T&,
126
+ T>() const {
127
+ return iterator_->template to<T>();
128
+ }
129
+
130
+ template<class T, class Iterator>
131
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(T&& new_value) && {
132
+ *iterator_ = c10::detail::ListElementFrom<T>::from(std::move(new_value));
133
+ return *this;
134
+ }
135
+
136
+ template<class T, class Iterator>
137
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(const T& new_value) && {
138
+ *iterator_ = c10::detail::ListElementFrom<T>::from(new_value);
139
+ return *this;
140
+ }
141
+
142
+ template<class T, class Iterator>
143
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && noexcept {
144
+ *iterator_ = *rhs.iterator_;
145
+ return *this;
146
+ }
147
+
148
+ template<class T, class Iterator>
149
+ void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs) {
150
+ std::swap(*lhs.iterator_, *rhs.iterator_);
151
+ }
152
+
153
+ template<class T, class Iterator>
154
+ bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs) {
155
+ const T& lhs_tmp = lhs;
156
+ return lhs_tmp == rhs;
157
+ }
158
+
159
+ template<class T, class Iterator>
160
+ inline bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs) {
161
+ return rhs == lhs;
162
+ }
163
+
164
+ template<class T>
165
+ inline typename ListElementConstReferenceTraits<T>::const_reference
166
+ list_element_to_const_ref(const IValue& element) {
167
+ return element.template to<T>();
168
+ }
169
+
170
+ template<>
171
+ inline typename ListElementConstReferenceTraits<c10::optional<std::string>>::const_reference
172
+ list_element_to_const_ref<c10::optional<std::string>>(const IValue& element) {
173
+ return element.toOptionalStringRef();
174
+ }
175
+
176
+ } // namespace impl
177
+
178
+ template<class T>
179
+ void List<T>::set(size_type pos, const value_type& value) const {
180
+ impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(value);
181
+ }
182
+
183
+ template<class T>
184
+ void List<T>::set(size_type pos, value_type&& value) const {
185
+ impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(std::move(value));
186
+ }
187
+
188
+ template<class T>
189
+ typename List<T>::internal_const_reference_type List<T>::get(size_type pos) const {
190
+ return operator[](pos);
191
+ }
192
+
193
+ template<class T>
194
+ typename List<T>::internal_const_reference_type List<T>::operator[](size_type pos) const {
195
+ return c10::impl::list_element_to_const_ref<T>(impl_->list.at(pos));
196
+ }
197
+
198
+ template<class T>
199
+ typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
200
+ static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
201
+ return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
202
+ }
203
+
204
+ template<class T>
205
+ typename List<T>::value_type List<T>::extract(size_type pos) const {
206
+ auto& elem = impl_->list.at(pos);
207
+ auto result = c10::detail::list_element_to<T>(std::move(elem));
208
+ // Reset the list element to a T() instead of None to keep it correctly typed
209
+ elem = c10::detail::ListElementFrom<T>::from(T{});
210
+ return result;
211
+ }
212
+
213
+ template<class T>
214
+ typename List<T>::iterator List<T>::begin() const {
215
+ return iterator(impl_->list.begin());
216
+ }
217
+
218
+ template<class T>
219
+ typename List<T>::iterator List<T>::end() const {
220
+ return iterator(impl_->list.end());
221
+ }
222
+
223
+ template<class T>
224
+ bool List<T>::empty() const {
225
+ return impl_->list.empty();
226
+ }
227
+
228
+ template<class T>
229
+ typename List<T>::size_type List<T>::size() const {
230
+ return impl_->list.size();
231
+ }
232
+
233
+ template<class T>
234
+ void List<T>::reserve(size_type new_cap) const {
235
+ impl_->list.reserve(new_cap);
236
+ }
237
+
238
+ template<class T>
239
+ void List<T>::clear() const {
240
+ impl_->list.clear();
241
+ }
242
+
243
+ template<class T>
244
+ typename List<T>::iterator List<T>::insert(iterator pos, const T& value) const {
245
+ return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(value)) };
246
+ }
247
+
248
+ template<class T>
249
+ typename List<T>::iterator List<T>::insert(iterator pos, T&& value) const {
250
+ return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(std::move(value))) };
251
+ }
252
+
253
+ template<class T>
254
+ template<class... Args>
255
+ typename List<T>::iterator List<T>::emplace(iterator pos, Args&&... value) const {
256
+ // TODO Use list_element_from?
257
+ return iterator { impl_->list.emplace(pos.iterator_, std::forward<Args>(value)...) };
258
+ }
259
+
260
+ template<class T>
261
+ void List<T>::push_back(const T& value) const {
262
+ impl_->list.push_back(c10::detail::ListElementFrom<T>::from(value));
263
+ }
264
+
265
+ template<class T>
266
+ void List<T>::push_back(T&& value) const {
267
+ impl_->list.push_back(c10::detail::ListElementFrom<T>::from(std::move(value)));
268
+ }
269
+
270
+ template<class T>
271
+ void List<T>::append(List<T> b) const {
272
+ if (b.use_count() == 1) {
273
+ impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end()));
274
+ } else {
275
+ impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end());
276
+ }
277
+ }
278
+
279
+ template<class T>
280
+ template<class... Args>
281
+ void List<T>::emplace_back(Args&&... args) const {
282
+ // TODO Use list_element_from?
283
+ impl_->list.push_back(T(std::forward<Args>(args)...));
284
+ }
285
+
286
+ template<class T>
287
+ typename List<T>::iterator List<T>::erase(iterator pos) const {
288
+ return iterator { impl_->list.erase(pos.iterator_) };
289
+ }
290
+
291
+ template<class T>
292
+ typename List<T>::iterator List<T>::erase(iterator first, iterator last) const {
293
+ return iterator { impl_->list.erase(first.iterator_, last.iterator_) };
294
+ }
295
+
296
+ template<class T>
297
+ void List<T>::pop_back() const {
298
+ impl_->list.pop_back();
299
+ }
300
+
301
+ template<class T>
302
+ void List<T>::resize(size_type count) const {
303
+ impl_->list.resize(count, T{});
304
+ }
305
+
306
+ template<class T>
307
+ void List<T>::resize(size_type count, const T& value) const {
308
+ impl_->list.resize(count, value);
309
+ }
310
+
311
+ template<class T>
312
+ bool operator==(const List<T>& lhs, const List<T>& rhs) {
313
+ // Lists with the same identity trivially compare equal.
314
+ if (lhs.impl_ == rhs.impl_) {
315
+ return true;
316
+ }
317
+
318
+ // Otherwise, just compare values directly.
319
+ return *lhs.impl_ == *rhs.impl_;
320
+ }
321
+
322
+ template<class T>
323
+ bool operator!=(const List<T>& lhs, const List<T>& rhs) {
324
+ return !(lhs == rhs);
325
+ }
326
+
327
+ template<class T>
328
+ bool List<T>::is(const List<T>& rhs) const {
329
+ return this->impl_ == rhs.impl_;
330
+ }
331
+
332
+ template<class T>
333
+ std::vector<T> List<T>::vec() const {
334
+ std::vector<T> result(begin(), end());
335
+ return result;
336
+ }
337
+
338
+ template<class T>
339
+ size_t List<T>::use_count() const {
340
+ return impl_.use_count();
341
+ }
342
+
343
+ template <class T>
344
+ TypePtr List<T>::elementType() const {
345
+ return impl_->elementType;
346
+ }
347
+
348
+ template <class T>
349
+ void List<T>::unsafeSetElementType(TypePtr t) {
350
+ impl_->elementType = std::move(t);
351
+ }
352
+
353
+ namespace impl {
354
+
355
+ inline const IValue* ptr_to_first_element(const GenericList& list) {
356
+ return &list.impl_->list[0];
357
+ }
358
+
359
+ }
360
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ConstantSymNodeImpl.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <cstdint>
10
+ #include <string>
11
+
12
+ namespace c10 {
13
+
14
+ // The motivating usecase for this is to represent the ragged size structure
15
+ // of a jagged tensor [B, [s_0, s_1, s_2], D] as a single integer j0. This
16
+ // allows us to simply return [B, j0, D] if someone queries for the size of our
17
+ // tensor.
18
+ //
19
+ // Morally we define comparison between two nested ints to return true if
20
+ // that comparison holds for all corresponding elements of the arrays they
21
+ // represent. Comparison between a nested int and a plain int is defined
22
+ // similarly.
23
+ //
24
+ // To simulate this desired behavior but also avoid the O(N) cost of checking,
25
+ // we associate each raggedness pattern with an integer "id" that can be used as
26
+ // a proxy to evaluate equality. We also constrain the range of values for this
27
+ // as to enable inequality checks.
28
+ //
29
+ // We also support a positive integer scalar "coeff" that is used for computing
30
+ // strides. For example given, a [B, j0, D] tensor, it can be strided in two
31
+ // different ways: [D * j0, D, 1] and [j0, 1, sum(j0)]. The coeff is used to
32
+ // differentiate the two cases.
33
+ //
34
+ // During tracing the strides of the outputs need to be a function of the size
35
+ // and strides of the inputs so it is important that NestedIntSymNode itself is
36
+ // able to express this.
37
+ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
38
+ public:
39
+ // CAUTION: you should probably not be constructing these directly; please
40
+ // the higher-level API in python instead (TODO: actually introduce that).
41
+ explicit NestedIntSymNodeImpl(int64_t val, int64_t coeff)
42
+ : val_(val), coeff_(coeff) {}
43
+
44
+ bool bool_() override {
45
+ return false;
46
+ }
47
+
48
+ bool is_int() override {
49
+ return true;
50
+ }
51
+
52
+ bool is_float() override {
53
+ return false;
54
+ }
55
+
56
+ bool is_bool() override {
57
+ return false;
58
+ }
59
+
60
+ bool is_nested_int() const override {
61
+ return true;
62
+ }
63
+
64
+ bool has_hint() override {
65
+ return true;
66
+ }
67
+
68
+ c10::SymNode wrap_int(int64_t num) override {
69
+ return SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(num));
70
+ };
71
+
72
+ int64_t guard_int(const char* file, int64_t line) override {
73
+ TORCH_CHECK(false);
74
+ }
75
+
76
+ double guard_float(const char* file, int64_t line) override {
77
+ TORCH_CHECK(false, "not a float");
78
+ }
79
+
80
+ bool guard_bool(const char* file, int64_t line) override {
81
+ TORCH_CHECK(false, "not a bool");
82
+ }
83
+
84
+ int64_t int_() override {
85
+ TORCH_CHECK(false);
86
+ }
87
+
88
+ std::string str() override {
89
+ if (coeff_ == 1) {
90
+ return "j" + std::to_string(val_);
91
+ }
92
+ return std::to_string(coeff_) + "*j" + std::to_string(val_);
93
+ }
94
+
95
+ // NOTE [ Inequalities with nested int ]
96
+ //
97
+ // The semantics of nested int when it comes to relations is that it is
98
+ // treated as integer known to be within a certain range,
99
+ //
100
+ // j0 \in [2, int64_t::max]
101
+ //
102
+ // allowing us to answer queries like j0 >= 1 (True), and j0 == 0 (False).
103
+ // This is a useful default range for the raggedness pattern of a jagged
104
+ // tensor (1) since sizes are non-negative, and (2) we need to get past 0/1
105
+ // specialization checks.
106
+ //
107
+ // [ Indeterminate inequalities error out ]
108
+ //
109
+ // Given the semantic defined above, certain relations like j0 < 3 are thus
110
+ // indeterminable. In our impl today, evaluating such relations error
111
+ //
112
+ // It may seem convenient to just define indeterminate relations to return
113
+ // False, but the implementation we maintain in parallel using sympy does not
114
+ // allow this.
115
+ //
116
+ // Sympy only allows overriding of Ge. The other relations (Lt, Gt, Le) are,
117
+ // by consequence, all derived from Ge e.g., Lt(a, b) := !Ge(a, b). This
118
+ // would mean that means that if we define the indeterminate j0 >= 3 to be
119
+ // False, the also indeterminate j0 < 3 will be evaluated to be True!
120
+ //
121
+ // [ Coefficient are assumed positive ]
122
+ //
123
+ // For the purpose of computing inequalities, we consider the coefficient of
124
+ // the nested int to be a positive integer.
125
+ //
126
+ // Thus, no modifications are needed to the logic since
127
+ // j0 >= k implies coeff * j0 >= k
128
+ //
129
+ c10::SymNode eq(const c10::SymNode& other) override;
130
+ c10::SymNode ne(const c10::SymNode& other) override;
131
+ c10::SymNode ge(const c10::SymNode& other) override;
132
+ c10::SymNode gt(const c10::SymNode& other) override;
133
+ c10::SymNode lt(const c10::SymNode& other) override;
134
+ c10::SymNode le(const c10::SymNode& other) override;
135
+ c10::SymNode mul(const c10::SymNode& other) override;
136
+
137
+ c10::optional<int64_t> nested_int() override {
138
+ return val_;
139
+ }
140
+
141
+ c10::optional<int64_t> nested_int_coeff() override {
142
+ return coeff_;
143
+ }
144
+
145
+ bool is_symbolic() override {
146
+ return false;
147
+ }
148
+
149
+ #define DEFINE_BINARY_NOT_SUPPORTED(name) \
150
+ c10::SymNode name(const c10::SymNode& other) override { \
151
+ TORCH_CHECK(false, #name " not supported by NestedIntSymNode"); \
152
+ }
153
+
154
+ DEFINE_BINARY_NOT_SUPPORTED(add)
155
+ DEFINE_BINARY_NOT_SUPPORTED(sub)
156
+ DEFINE_BINARY_NOT_SUPPORTED(truediv)
157
+ DEFINE_BINARY_NOT_SUPPORTED(pow)
158
+ DEFINE_BINARY_NOT_SUPPORTED(floordiv)
159
+ DEFINE_BINARY_NOT_SUPPORTED(mod)
160
+ DEFINE_BINARY_NOT_SUPPORTED(sym_min)
161
+ DEFINE_BINARY_NOT_SUPPORTED(sym_max)
162
+ DEFINE_BINARY_NOT_SUPPORTED(sym_and)
163
+ DEFINE_BINARY_NOT_SUPPORTED(sym_or)
164
+
165
+ #undef DEFINE_BINARY_NOT_SUPPORTED
166
+
167
+ #define DEFINE_NOT_SUPPORTED(name) \
168
+ c10::SymNode name() override { \
169
+ TORCH_CHECK(false, #name " is not supported by NestedIntSymNode"); \
170
+ }
171
+
172
+ DEFINE_NOT_SUPPORTED(sym_not)
173
+ DEFINE_NOT_SUPPORTED(ceil)
174
+ DEFINE_NOT_SUPPORTED(floor)
175
+ DEFINE_NOT_SUPPORTED(neg)
176
+ DEFINE_NOT_SUPPORTED(clone)
177
+ DEFINE_NOT_SUPPORTED(sym_float)
178
+
179
+ #undef DEFINE_NOT_SUPPORTED
180
+
181
+ private:
182
+ int64_t val_;
183
+ int64_t coeff_;
184
+ };
185
+
186
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TorchDispatchUtils.h>
3
+
4
+ namespace at {
5
+ namespace impl {
6
+
7
+ struct TORCH_API RestorePythonTLSSnapshot {
8
+ RestorePythonTLSSnapshot();
9
+ ~RestorePythonTLSSnapshot();
10
+
11
+ private:
12
+ c10::impl::LocalDispatchKeySet saved_;
13
+ c10::impl::ForceDispatchKeyGuard guard_;
14
+ };
15
+
16
+
17
+ // RAII guard to make working with the above TLS safer.
18
+ struct TORCH_API MaybeSetTLSOnEntryGuard {
19
+ public:
20
+ MaybeSetTLSOnEntryGuard();
21
+ ~MaybeSetTLSOnEntryGuard();
22
+
23
+ private:
24
+ bool value_set_;
25
+ };
26
+
27
+ } // namespace impl
28
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dispatch/Dispatcher.h>
4
+
5
+ // TODO: this can probably live in c10
6
+
7
+ namespace at {
8
+ namespace impl {
9
+
10
+ class TORCH_API PythonOpRegistrationTrampoline final {
11
+ static std::atomic<c10::impl::PyInterpreter*> interpreter_;
12
+
13
+ public:
14
+ // Returns true if you successfully registered yourself (that means
15
+ // you are in the hot seat for doing the operator registrations!)
16
+ static bool registerInterpreter(c10::impl::PyInterpreter*);
17
+
18
+ // Returns nullptr if no interpreter has been registered yet.
19
+ static c10::impl::PyInterpreter* getInterpreter();
20
+ };
21
+
22
+ } // namespace impl
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Range.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <iosfwd>
5
+
6
+ namespace at {
7
+
8
+ struct Range {
9
+ Range(int64_t begin, int64_t end)
10
+ : begin(begin)
11
+ , end(end) {}
12
+
13
+ int64_t size() const { return end - begin; }
14
+
15
+ Range operator/(int64_t divisor) {
16
+ return Range(begin / divisor, end / divisor);
17
+ }
18
+
19
+ int64_t begin;
20
+ int64_t end;
21
+ };
22
+
23
+ std::ostream& operator<<(std::ostream& out, const Range& range);
24
+
25
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <c10/core/ScalarType.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Deprecated.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/irange.h>
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+
11
+ namespace at {
12
+
13
+ // The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor
14
+ // is used to enable the __restrict__ keyword/modifier for the data
15
+ // passed to cuda.
16
+ template <typename T>
17
+ struct DefaultPtrTraits {
18
+ typedef T* PtrType;
19
+ };
20
+
21
+ #if defined(__CUDACC__) || defined(__HIPCC__)
22
+ template <typename T>
23
+ struct RestrictPtrTraits {
24
+ typedef T* __restrict__ PtrType;
25
+ };
26
+ #endif
27
+
28
+ // TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors.
29
+ // For CUDA tensors it is used in device code (only). This means that we restrict ourselves
30
+ // to functions and types available there (e.g. IntArrayRef isn't).
31
+
32
+ // The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers.
33
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
34
+ class TensorAccessorBase {
35
+ public:
36
+ typedef typename PtrTraits<T>::PtrType PtrType;
37
+
38
+ C10_HOST_DEVICE TensorAccessorBase(
39
+ PtrType data_,
40
+ const index_t* sizes_,
41
+ const index_t* strides_)
42
+ : data_(data_), sizes_(sizes_), strides_(strides_) {}
43
+ C10_HOST IntArrayRef sizes() const {
44
+ return IntArrayRef(sizes_,N);
45
+ }
46
+ C10_HOST IntArrayRef strides() const {
47
+ return IntArrayRef(strides_,N);
48
+ }
49
+ C10_HOST_DEVICE index_t stride(index_t i) const {
50
+ return strides_[i];
51
+ }
52
+ C10_HOST_DEVICE index_t size(index_t i) const {
53
+ return sizes_[i];
54
+ }
55
+ C10_HOST_DEVICE PtrType data() {
56
+ return data_;
57
+ }
58
+ C10_HOST_DEVICE const PtrType data() const {
59
+ return data_;
60
+ }
61
+ protected:
62
+ PtrType data_;
63
+ const index_t* sizes_;
64
+ const index_t* strides_;
65
+ };
66
+
67
+ // The `TensorAccessor` is typically instantiated for CPU `Tensor`s using
68
+ // `Tensor.accessor<T, N>()`.
69
+ // For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only
70
+ // indexing on the device uses `TensorAccessor`s.
71
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
72
+ class TensorAccessor : public TensorAccessorBase<T,N,PtrTraits,index_t> {
73
+ public:
74
+ typedef typename PtrTraits<T>::PtrType PtrType;
75
+
76
+ C10_HOST_DEVICE TensorAccessor(
77
+ PtrType data_,
78
+ const index_t* sizes_,
79
+ const index_t* strides_)
80
+ : TensorAccessorBase<T, N, PtrTraits, index_t>(data_,sizes_,strides_) {}
81
+
82
+ C10_HOST_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
83
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
84
+ }
85
+
86
+ C10_HOST_DEVICE const TensorAccessor<T, N-1, PtrTraits, index_t> operator[](index_t i) const {
87
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
88
+ }
89
+ };
90
+
91
+ template<typename T, template <typename U> class PtrTraits, typename index_t>
92
+ class TensorAccessor<T,1,PtrTraits,index_t> : public TensorAccessorBase<T,1,PtrTraits,index_t> {
93
+ public:
94
+ typedef typename PtrTraits<T>::PtrType PtrType;
95
+
96
+ C10_HOST_DEVICE TensorAccessor(
97
+ PtrType data_,
98
+ const index_t* sizes_,
99
+ const index_t* strides_)
100
+ : TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {}
101
+ C10_HOST_DEVICE T & operator[](index_t i) {
102
+ // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
103
+ return this->data_[this->strides_[0]*i];
104
+ }
105
+ C10_HOST_DEVICE const T & operator[](index_t i) const {
106
+ return this->data_[this->strides_[0]*i];
107
+ }
108
+ };
109
+
110
+
111
+ // GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host
112
+ // and as
113
+ // In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host)
114
+ // in order to transfer them on the device when calling kernels.
115
+ // On the device, indexing of multidimensional tensors gives to `TensorAccessor`s.
116
+ // Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__.
117
+ // Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available
118
+ // on the device, so those functions are host only.
119
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
120
+ class GenericPackedTensorAccessorBase {
121
+ public:
122
+ typedef typename PtrTraits<T>::PtrType PtrType;
123
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
124
+ C10_HOST GenericPackedTensorAccessorBase(
125
+ PtrType data_,
126
+ const index_t* sizes_,
127
+ const index_t* strides_)
128
+ : data_(data_) {
129
+ std::copy(sizes_, sizes_ + N, std::begin(this->sizes_));
130
+ std::copy(strides_, strides_ + N, std::begin(this->strides_));
131
+ }
132
+
133
+ // if index_t is not int64_t, we want to have an int64_t constructor
134
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
135
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
136
+ C10_HOST GenericPackedTensorAccessorBase(
137
+ PtrType data_,
138
+ const source_index_t* sizes_,
139
+ const source_index_t* strides_)
140
+ : data_(data_) {
141
+ for (const auto i : c10::irange(N)) {
142
+ this->sizes_[i] = sizes_[i];
143
+ this->strides_[i] = strides_[i];
144
+ }
145
+ }
146
+
147
+ C10_HOST_DEVICE index_t stride(index_t i) const {
148
+ return strides_[i];
149
+ }
150
+ C10_HOST_DEVICE index_t size(index_t i) const {
151
+ return sizes_[i];
152
+ }
153
+ C10_HOST_DEVICE PtrType data() {
154
+ return data_;
155
+ }
156
+ C10_HOST_DEVICE const PtrType data() const {
157
+ return data_;
158
+ }
159
+ protected:
160
+ PtrType data_;
161
+ // NOLINTNEXTLINE(*c-arrays*)
162
+ index_t sizes_[N];
163
+ // NOLINTNEXTLINE(*c-arrays*)
164
+ index_t strides_[N];
165
+ C10_HOST void bounds_check_(index_t i) const {
166
+ TORCH_CHECK_INDEX(
167
+ 0 <= i && i < index_t{N},
168
+ "Index ",
169
+ i,
170
+ " is not within bounds of a tensor of dimension ",
171
+ N);
172
+ }
173
+ };
174
+
175
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
176
+ class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase<T,N,PtrTraits,index_t> {
177
+ public:
178
+ typedef typename PtrTraits<T>::PtrType PtrType;
179
+
180
+ C10_HOST GenericPackedTensorAccessor(
181
+ PtrType data_,
182
+ const index_t* sizes_,
183
+ const index_t* strides_)
184
+ : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
185
+
186
+ // if index_t is not int64_t, we want to have an int64_t constructor
187
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
188
+ C10_HOST GenericPackedTensorAccessor(
189
+ PtrType data_,
190
+ const source_index_t* sizes_,
191
+ const source_index_t* strides_)
192
+ : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
193
+
194
+ C10_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
195
+ index_t* new_sizes = this->sizes_ + 1;
196
+ index_t* new_strides = this->strides_ + 1;
197
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
198
+ }
199
+
200
+ C10_DEVICE const TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) const {
201
+ const index_t* new_sizes = this->sizes_ + 1;
202
+ const index_t* new_strides = this->strides_ + 1;
203
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
204
+ }
205
+
206
+ /// Returns a PackedTensorAccessor of the same dimension after transposing the
207
+ /// two dimensions given. Does not actually move elements; transposition is
208
+ /// made by permuting the size/stride arrays. If the dimensions are not valid,
209
+ /// asserts.
210
+ C10_HOST GenericPackedTensorAccessor<T, N, PtrTraits, index_t> transpose(
211
+ index_t dim1,
212
+ index_t dim2) const {
213
+ this->bounds_check_(dim1);
214
+ this->bounds_check_(dim2);
215
+ GenericPackedTensorAccessor<T, N, PtrTraits, index_t> result(
216
+ this->data_, this->sizes_, this->strides_);
217
+ std::swap(result.strides_[dim1], result.strides_[dim2]);
218
+ std::swap(result.sizes_[dim1], result.sizes_[dim2]);
219
+ return result;
220
+ }
221
+ };
222
+
223
+ template<typename T, template <typename U> class PtrTraits, typename index_t>
224
+ class GenericPackedTensorAccessor<T,1,PtrTraits,index_t> : public GenericPackedTensorAccessorBase<T,1,PtrTraits,index_t> {
225
+ public:
226
+ typedef typename PtrTraits<T>::PtrType PtrType;
227
+ C10_HOST GenericPackedTensorAccessor(
228
+ PtrType data_,
229
+ const index_t* sizes_,
230
+ const index_t* strides_)
231
+ : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
232
+
233
+ // if index_t is not int64_t, we want to have an int64_t constructor
234
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
235
+ C10_HOST GenericPackedTensorAccessor(
236
+ PtrType data_,
237
+ const source_index_t* sizes_,
238
+ const source_index_t* strides_)
239
+ : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
240
+
241
+ C10_DEVICE T & operator[](index_t i) {
242
+ return this->data_[this->strides_[0] * i];
243
+ }
244
+ C10_DEVICE const T& operator[](index_t i) const {
245
+ return this->data_[this->strides_[0]*i];
246
+ }
247
+
248
+ // Same as in the general N-dimensional case, but note that in the
249
+ // 1-dimensional case the returned PackedTensorAccessor will always be an
250
+ // identical copy of the original
251
+ C10_HOST GenericPackedTensorAccessor<T, 1, PtrTraits, index_t> transpose(
252
+ index_t dim1,
253
+ index_t dim2) const {
254
+ this->bounds_check_(dim1);
255
+ this->bounds_check_(dim2);
256
+ return GenericPackedTensorAccessor<T, 1, PtrTraits, index_t>(
257
+ this->data_, this->sizes_, this->strides_);
258
+ }
259
+ };
260
+
261
+
262
+ // Can't put this directly into the macro function args because of commas
263
+ #define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t>
264
+
265
+ // Old name for `GenericPackedTensorAccessor`
266
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
267
+ C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X)
268
+
269
+ #undef AT_X
270
+
271
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
272
+ using PackedTensorAccessor32 = GenericPackedTensorAccessor<T, N, PtrTraits, int32_t>;
273
+
274
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
275
+ using PackedTensorAccessor64 = GenericPackedTensorAccessor<T, N, PtrTraits, int64_t>;
276
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <c10/util/Half.h>
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/MathConstants.h>
5
+ #include <ATen/NumericUtils.h>
6
+ #include <limits>
7
+ #include <cstdint>
8
+ #include <cassert>
9
+
10
+ namespace at {
11
+
12
+ // Using DistAccumType in accumulate types for distributions.
13
+ // Note: Ideally we'd be using ATen/AccumulateType.h but looks
14
+ // like the there is some inconsistency in how accumulate types
15
+ // are mapped currently, e.g. for the cpu side, float is mapped
16
+ // to double.
17
+ template <typename T>
18
+ struct DistAccumType { };
19
+
20
+ #if defined(__CUDACC__) || defined(__HIPCC__)
21
+ template <> struct DistAccumType<half> { using type = float; };
22
+ #endif
23
+ template <> struct DistAccumType<BFloat16> { using type = float; };
24
+ template <> struct DistAccumType<Half> { using type = float; };
25
+ template <> struct DistAccumType<float> { using type = float; };
26
+ template <> struct DistAccumType<double> { using type = double; };
27
+
28
+ template <typename T>
29
+ using dist_acctype = typename DistAccumType<T>::type;
30
+
31
+ namespace transformation {
32
+
33
+ /**
34
+ * A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified.
35
+ * `range` is `to - from`
36
+ * `base` is `from`
37
+ */
38
+ template <typename T, typename V>
39
+ C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) {
40
+ return static_cast<T>(static_cast<int64_t>((val % range) + base));
41
+ }
42
+
43
+ /**
44
+ * A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None
45
+ */
46
+ template <typename T, typename V>
47
+ C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
48
+ return static_cast<T>(static_cast<int64_t>(val));
49
+ }
50
+
51
+ /**
52
+ * A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`.
53
+ * In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double
54
+ * in this overloaded version
55
+ */
56
+ template <typename T, typename V>
57
+ C10_HOST_DEVICE inline typename std::enable_if<!(std::is_floating_point<T>::value), T>::type uniform_int(V val) {
58
+ if constexpr (std::is_same_v<T, bool>) {
59
+ return static_cast<bool>(val & 1);
60
+ } else if constexpr (std::is_same_v<T, int64_t>) {
61
+ return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
62
+ } else if constexpr (std::is_same_v<T, at::Half> || std::is_same<T, at::BFloat16>::value) {
63
+ return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
64
+ } else if constexpr (std::is_integral_v<T>) {
65
+ return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
66
+ } else {
67
+ assert(false);
68
+ return 0;
69
+ }
70
+ }
71
+
72
+ /**
73
+ * An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`,
74
+ * added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version.
75
+ */
76
+ template<typename T, typename V>
77
+ C10_HOST_DEVICE inline typename std::enable_if<std::is_floating_point<T>::value, T>::type uniform_int(V val) {
78
+ return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
79
+ }
80
+
81
+ template <typename T, typename V>
82
+ C10_HOST_DEVICE inline dist_acctype<T> uniform_real(V val, T from, T to) {
83
+ constexpr auto MASK = static_cast<V>((static_cast<uint64_t>(1) << std::numeric_limits<T>::digits) - 1);
84
+ constexpr auto DIVISOR = static_cast<dist_acctype<T>>(1) / (static_cast<uint64_t>(1) << std::numeric_limits<T>::digits);
85
+ dist_acctype<T> x = (val & MASK) * DIVISOR;
86
+ return (x * (to - from) + from);
87
+ }
88
+
89
+ /**
90
+ * Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to
91
+ * normally distributed with `mean` and standard deviation `std`.
92
+ */
93
+ template <typename T>
94
+ C10_HOST_DEVICE inline T normal(T val, T mean, T std) {
95
+ return val * std + mean;
96
+ }
97
+
98
+ /**
99
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
100
+ * Cauchy distribution with location parameter `median` and scale parameter `sigma`.
101
+ */
102
+ template <typename T>
103
+ C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) {
104
+ // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
105
+ // __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps),
106
+ // thus we clip those values.
107
+ constexpr T eps = std::numeric_limits<T>::epsilon();
108
+ constexpr T one_minus_eps = 1 - eps;
109
+ constexpr T zero_plus_eps = 0 + eps;
110
+ val = (val > one_minus_eps ? one_minus_eps : val);
111
+ val = (val < zero_plus_eps ? zero_plus_eps : val);
112
+ return median + sigma * at::tan(c10::pi<T> * (val - static_cast<T>(0.5)));
113
+ }
114
+
115
+ template <>
116
+ C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) {
117
+ // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
118
+ return median + sigma * at::tan(c10::pi<double> * (val - static_cast<double>(0.5)));
119
+ }
120
+
121
+ /**
122
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
123
+ * exponentially distributed with `lambda` parameter of the distribution.
124
+ */
125
+ template <typename T>
126
+ C10_HOST_DEVICE inline T exponential(T val, T lambda) {
127
+ // https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates
128
+ // Different implementations for CUDA and CPU to preserve original logic
129
+ // TODO: must be investigated and unified!!!
130
+ // https://github.com/pytorch/pytorch/issues/38662
131
+ #if defined(__CUDACC__) || defined(__HIPCC__)
132
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
133
+ // curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
134
+ // we need log to be not 0, and not underflow when converted to half
135
+ // fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args
136
+ auto log = val >= static_cast<T>(1.) - std::numeric_limits<T>::epsilon() / 2
137
+ ? -std::numeric_limits<T>::epsilon() / 2
138
+ : at::log(val);
139
+ return static_cast<T>(-1.0) / lambda * log;
140
+ #else
141
+ return static_cast<T>(-1.0) / lambda * at::log1p(-val);
142
+ #endif
143
+ }
144
+
145
+ /**
146
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
147
+ * geometrically distributed with success probability `p`.
148
+ */
149
+ template <typename T>
150
+ C10_HOST_DEVICE inline T geometric(T val, T p) {
151
+ // https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions
152
+ return static_cast<T>(::ceil(at::log(val) / at::log1p(-p)));
153
+ }
154
+
155
+ /**
156
+ * Transforms normally distributed `val` to log-normally distributed.
157
+ */
158
+ template <typename T>
159
+ C10_HOST_DEVICE inline T log_normal(T val) {
160
+ // https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles
161
+ return at::exp(val);
162
+ }
163
+
164
+ /**
165
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
166
+ * bernoulli distributed with success probability `p`.
167
+ */
168
+ template <typename T>
169
+ C10_HOST_DEVICE inline T bernoulli(T val, T p) {
170
+ return val < p;
171
+ }
172
+
173
+ }} // namespace at::transformation
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <c10/core/UndefinedTensorImpl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstring>
3
+ #include <map>
4
+ #include <memory>
5
+ #include <ostream>
6
+ #include <sstream>
7
+ #include <unordered_map>
8
+
9
+ #include <c10/core/impl/LocalDispatchKeySet.h>
10
+
11
+ namespace at {
12
+ namespace vitals {
13
+
14
+ TORCH_API bool torchVitalEnabled();
15
+
16
+ struct TORCH_API TorchVitalAttr {
17
+ // always initialized to empty
18
+ std::string value = "";
19
+ template <typename T>
20
+ TorchVitalAttr& operator<<(const T& t) {
21
+ if (torchVitalEnabled()) {
22
+ std::stringstream ss;
23
+ ss << t;
24
+ value += ss.str();
25
+ }
26
+ return *this;
27
+ }
28
+
29
+ template <typename T>
30
+ void write(const T& t, bool force) {
31
+ if (force || torchVitalEnabled()) {
32
+ std::stringstream ss;
33
+ ss << t;
34
+ value = ss.str();
35
+ }
36
+ }
37
+ };
38
+
39
+ struct TORCH_API TorchVital {
40
+ std::string name;
41
+ std::unordered_map<std::string, TorchVitalAttr> attrs;
42
+
43
+ explicit TorchVital(std::string n) : name(std::move(n)) {}
44
+ TorchVital(const TorchVital&) = default;
45
+ TorchVital(TorchVital&&) = default;
46
+ TorchVital() = delete;
47
+
48
+ TorchVitalAttr& create(const std::string& attr);
49
+ TorchVitalAttr& create(const std::string& attr, bool force);
50
+ friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt);
51
+
52
+ ~TorchVital();
53
+ };
54
+
55
+ std::ostream& operator<<(std::ostream& os, TorchVital const& tv);
56
+
57
+ // A way to access vitals by string names instead of by global reference.
58
+ // This enables access to vitals from the PythonAPI.
59
+ class TORCH_API APIVitals {
60
+ public:
61
+ bool vitals_enabled;
62
+
63
+ // Set any vital sign that was added to the map.
64
+ bool setVital(
65
+ const std::string& vital_name,
66
+ const std::string& attr_name,
67
+ const std::string& value,
68
+ bool force = false);
69
+ std::string readVitals();
70
+
71
+ APIVitals();
72
+
73
+ // Ensure this stays a singleton
74
+ APIVitals(APIVitals const& other) = delete;
75
+ APIVitals(APIVitals&& other) = delete;
76
+ APIVitals& operator=(const APIVitals&) = delete;
77
+ APIVitals& operator=(APIVitals&&) = delete;
78
+
79
+ private:
80
+ std::unordered_map<std::string, TorchVital> name_map_;
81
+ };
82
+
83
+ extern TORCH_API APIVitals VitalsAPI;
84
+
85
+ } // namespace vitals
86
+ } // namespace at
87
+
88
+ #define TORCH_VITAL_DECLARE(name) \
89
+ TORCH_API at::vitals::TorchVital TorchVital_##name;
90
+
91
+ #define TORCH_VITAL_DEFINE(name) \
92
+ TORCH_API at::vitals::TorchVital TorchVital_##name(#name);
93
+
94
+ #define TORCH_VITAL_BASE(name) TorchVital_##name
95
+
96
+ #define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr)
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/blob.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <sstream>
5
+ #include <type_traits>
6
+ #include <typeinfo>
7
+ #include <vector>
8
+
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <c10/util/typeid.h>
11
+ #include <c10/macros/Macros.h>
12
+
13
+ namespace caffe2 {
14
+
15
+ class Tensor;
16
+
17
+ /**
18
+ * @brief Blob is a general container that hosts a typed pointer.
19
+ *
20
+ * A Blob hosts a pointer as well as its type, and takes charge of deleting it
21
+ * properly when the blob is deallocated or re-allocated with a new type. A blob
22
+ * could contain anything, although the most common case is to contain a Tensor.
23
+ */
24
+ class TORCH_API Blob final : public c10::intrusive_ptr_target {
25
+ public:
26
+ /**
27
+ * Initializes an empty Blob.
28
+ */
29
+ Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {}
30
+ ~Blob() override {
31
+ Reset();
32
+ }
33
+
34
+ Blob(Blob&& other) noexcept : Blob() {
35
+ swap(other);
36
+ }
37
+
38
+ Blob& operator=(Blob&& other) noexcept {
39
+ Blob(std::move(other)).swap(*this);
40
+ return *this;
41
+ }
42
+
43
+ /**
44
+ * Checks if the content stored in the blob is of type T.
45
+ */
46
+ template <class T>
47
+ bool IsType() const noexcept {
48
+ return meta_.Match<T>();
49
+ }
50
+
51
+ /**
52
+ * Returns the meta info of the blob.
53
+ */
54
+ const TypeMeta meta() const noexcept {
55
+ return meta_;
56
+ }
57
+
58
+ /**
59
+ * Returns a printable typename of the blob.
60
+ */
61
+ c10::string_view TypeName() const noexcept {
62
+ return meta_.name();
63
+ }
64
+
65
+ /**
66
+ * @brief Gets the const reference of the stored object. The code checks if
67
+ * the stored object is of the desired type.
68
+ */
69
+ // TODO(jerryzh): add a Get(c10::DeviceType) function?
70
+ template <class T>
71
+ const T& Get() const {
72
+ TORCH_INTERNAL_ASSERT(
73
+ IsType<T>(),
74
+ "wrong type for the Blob instance. Blob contains ",
75
+ meta_.name(),
76
+ " while caller expects ",
77
+ TypeMeta::TypeName<T>());
78
+ // TODO: after we add Get<Tensor>(c10::DeviceType)
79
+ // and changed all the callsites, we can add
80
+ // a static assert here to enforce T != Tensor
81
+ return *static_cast<const T*>(pointer_);
82
+ }
83
+
84
+ const void* GetRaw() const noexcept {
85
+ return pointer_;
86
+ }
87
+ void* GetRaw() noexcept {
88
+ return pointer_;
89
+ }
90
+
91
+ /**
92
+ * @brief Gets a mutable pointer to the stored object.
93
+ *
94
+ * If the current object is not of the right type, a new object is created
95
+ * and the old object is freed. Note that type T should have a default
96
+ * constructor. Otherwise, create the object yourself first, and use
97
+ * Reset().
98
+ */
99
+ template <class T>
100
+ T* GetMutable() {
101
+ static_assert(
102
+ std::is_default_constructible<T>::value,
103
+ "GetMutable can't be called with non-default-constructible types. "
104
+ "Try using specialized methods");
105
+ if (IsType<T>()) {
106
+ return static_cast<T*>(pointer_);
107
+ } else {
108
+ // TODO Re-enable logging
109
+ // VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>();
110
+ return Reset<T>(new T());
111
+ }
112
+ }
113
+
114
+ template <class T>
115
+ T* GetMutableOrNull() {
116
+ if (IsType<T>()) {
117
+ return static_cast<T*>(pointer_);
118
+ } else {
119
+ return nullptr;
120
+ }
121
+ }
122
+
123
+ /**
124
+ * Sets the underlying object to the allocated one. The Blob then takes over
125
+ * the ownership of the passed in pointer. If there is already an object in
126
+ * the Blob, the old object is freed.
127
+ *
128
+ * This is used when the underlying class T does not have a default ctor, or
129
+ * complex initializations needs to be done outside the blob.
130
+ */
131
+ template <class T>
132
+ T* Reset(T* allocated) {
133
+ free_();
134
+ meta_ = TypeMeta::Make<T>();
135
+ pointer_ = static_cast<void*>(allocated);
136
+ has_ownership_ = true;
137
+ return allocated;
138
+ }
139
+
140
+ /**
141
+ * Sets the underlying object to the allocated one, but does not take over
142
+ * the ownership of the passed in pointer. If there is already an object in
143
+ * the Blob, the old object is freed.
144
+ *
145
+ * Unlike Reset, this does not take over the ownership of the pointer and the
146
+ * caller is responsible for making sure that the lifetime of the allocated
147
+ * blob outlasts the lifetime of any access to this blob, until another Reset
148
+ * call is made or the blob is destructed.
149
+ */
150
+ template <class T>
151
+ typename std::remove_const<T>::type* ShareExternal(
152
+ typename std::remove_const<T>::type* allocated) {
153
+ return static_cast<T*>(ShareExternal(
154
+ static_cast<void*>(allocated),
155
+ TypeMeta::Make<typename std::remove_const<T>::type>()));
156
+ }
157
+
158
+ void* ShareExternal(void* allocated, const TypeMeta meta) {
159
+ free_();
160
+ meta_ = meta;
161
+ pointer_ = allocated;
162
+ has_ownership_ = false;
163
+ return allocated;
164
+ }
165
+
166
+ /**
167
+ * Resets the Blob to an empty one.
168
+ */
169
+ void Reset() {
170
+ free_();
171
+ pointer_ = nullptr;
172
+ meta_ = TypeMeta();
173
+ has_ownership_ = false;
174
+ }
175
+
176
+ /**
177
+ * @brief Swaps the underlying storage of two blobs.
178
+ */
179
+ void swap(Blob& rhs) {
180
+ using std::swap;
181
+ swap(meta_, rhs.meta_);
182
+ swap(pointer_, rhs.pointer_);
183
+ swap(has_ownership_, rhs.has_ownership_);
184
+ }
185
+
186
+ private:
187
+ void free_() {
188
+ if (has_ownership_ && pointer_ != nullptr) {
189
+ (*meta_.deleteFn())(pointer_);
190
+ }
191
+ }
192
+
193
+ TypeMeta meta_;
194
+ void* pointer_;
195
+ bool has_ownership_;
196
+
197
+ C10_DISABLE_COPY_AND_ASSIGN(Blob);
198
+ };
199
+
200
+ inline void swap(Blob& lhs, Blob& rhs) {
201
+ lhs.swap(rhs);
202
+ }
203
+
204
+ inline std::ostream& operator<<(std::ostream& out, const Blob& v) {
205
+ return out << "Blob[" << v.TypeName() << "]";
206
+ }
207
+
208
+ } // namespace caffe2
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+
5
+ namespace c10 {
6
+
7
+ namespace impl {
8
+ namespace detail {
9
+ template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
10
+ template<class FuncType, class ReturnType, class... Parameters>
11
+ class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
12
+ public:
13
+ template<class FuncType_>
14
+ explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
15
+ : kernel_func_(std::forward<FuncType_>(kernel_func)) {}
16
+
17
+ decltype(auto) operator()(Parameters... args) {
18
+ return kernel_func_(std::forward<Parameters>(args)...);
19
+ }
20
+
21
+ private:
22
+ FuncType kernel_func_;
23
+ };
24
+ }
25
+
26
+ // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
27
+ // inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
28
+ // This can, for example, be used for lambdas, functors or even function pointers.
29
+ // In the case of function pointers, since it is a runtime function pointer,
30
+ // there is an overhead for calling it whenever the kernel is invoked.
31
+ template<class FuncType>
32
+ using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
33
+ FuncType,
34
+ typename guts::infer_function_traits_t<FuncType>::return_type,
35
+ typename guts::infer_function_traits_t<FuncType>::parameter_types
36
+ >;
37
+ }
38
+
39
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains boxing (not unboxing) logic,
4
+ // i.e. how to make a vector<IValue> from a set of concrete arguments.
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <ATen/core/stack.h>
8
+ #include <c10/core/TensorOptions.h>
9
+
10
+ #include <ATen/core/boxing/BoxedKernel.h>
11
+
12
+ #include <c10/util/Metaprogramming.h>
13
+ #include <type_traits>
14
+
15
+ namespace c10 {
16
+ namespace impl {
17
+
18
+ //
19
+ // utils
20
+ //
21
+
22
+ // is_mutable_tensor_ref
23
+ template <class T> struct is_mutable_tensor_ref : std::false_type {};
24
+ template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
25
+
26
+ // is_tuple_of_mutable_tensor_refs
27
+ //
28
+ template <class T, class Enable = void>
29
+ struct is_tuple_of_mutable_tensor_refs : std::false_type {};
30
+
31
+ template <class T>
32
+ struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
33
+ : guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
34
+ {};
35
+
36
+ // has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
37
+ //
38
+ template <class T, class Enable = void>
39
+ struct has_ivalue_to : std::false_type {};
40
+
41
+ template <class T>
42
+ struct has_ivalue_to<T, std::void_t<decltype(std::declval<IValue>().to<T>())>>
43
+ : std::true_type
44
+ {};
45
+
46
+ //
47
+ // boxing predicates
48
+ //
49
+
50
+ // A boxable arg type is one that IValue has a constructor for.
51
+ template <typename T>
52
+ using can_box =
53
+ std::disjunction<
54
+ std::is_constructible<IValue, std::decay_t<T>>,
55
+ // TensorOptions are not directly constructible into IValue,
56
+ // but torch::jit::push knows how to handle them
57
+ std::is_same<TensorOptions, std::decay_t<T>>
58
+ >;
59
+
60
+ template <typename... Ts>
61
+ using can_box_all = std::conjunction<can_box<Ts>...>;
62
+
63
+ // an unboxable result is one that can be extracted from an IValue
64
+ template <typename T>
65
+ using can_unbox =
66
+ std::conjunction<
67
+ std::disjunction<
68
+ has_ivalue_to<T>,
69
+ // void returns are ok
70
+ std::is_same<void, T>
71
+ >,
72
+ std::negation<std::is_lvalue_reference<T>>
73
+ >;
74
+
75
+ //
76
+ // boxArgs - utility for pushing unboxed args onto IValue stack
77
+ //
78
+ template <class... Args>
79
+ torch::jit::Stack boxArgs(Args... args) {
80
+ // TODO Reuse stack vector instead of allocating?
81
+ torch::jit::Stack stack;
82
+ stack.reserve(sizeof...(Args));
83
+ torch::jit::push(stack, std::forward<Args>(args)...);
84
+ return stack;
85
+ }
86
+
87
+ template <class T>
88
+ static inline constexpr size_t boxed_size_one() {
89
+ static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
90
+ return 1;
91
+ }
92
+
93
+ // torch::jit::push pushes 4 values for a TensorOptions; this needs to
94
+ // be kept in sync.
95
+ template <>
96
+ inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
97
+ return 4;
98
+ }
99
+
100
+ // NOTE: this could probably be simplified with C++17 fold expressions.
101
+ template <typename...>
102
+ struct BoxedSize : std::integral_constant<size_t, 0> {};
103
+ template <class T, class... Args>
104
+ struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
105
+
106
+ template <class... Args>
107
+ static inline constexpr size_t boxed_size() {
108
+ return BoxedSize<Args...>::value;
109
+ }
110
+
111
+ using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
112
+
113
+ template <typename T>
114
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
115
+ new (&dest[lastIdx]) IValue(arg);
116
+ lastIdx++;
117
+ }
118
+
119
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
120
+ new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
121
+ new (&dest[lastIdx++]) IValue(options.layout());
122
+ new (&dest[lastIdx++]) IValue(options.device());
123
+ new (&dest[lastIdx++]) IValue(options.pinned_memory());
124
+ }
125
+
126
+ inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
127
+
128
+ template<typename T, typename... Args>
129
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
130
+ boxToStack(dest, arg, lastIdx);
131
+ boxArgsToStack(dest, lastIdx, args...);
132
+ }
133
+
134
+ //
135
+ // PopResult is a helper class whose specializations handle popping single and
136
+ // multiple return values, respectively.
137
+ //
138
+ template <class Result>
139
+ struct PopResult final {
140
+ static Result call(Stack& stack) {
141
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
142
+ stack.size() == 1,
143
+ "Boxed kernel was expected to return one value on the stack, ",
144
+ "but instead pushed ", stack.size(), " values."
145
+ );
146
+ return std::move(stack[0]).to<Result>();
147
+ }
148
+ };
149
+
150
+ template <class... Types>
151
+ struct PopResult<std::tuple<Types...>> final {
152
+ using Result = std::tuple<Types...>;
153
+
154
+ static Result call(Stack& stack) {
155
+ // for tuple return types, boxed kernel has pushed multiple values onto the stack
156
+ constexpr int RetCount = sizeof...(Types);
157
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
158
+ stack.size() == RetCount,
159
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
160
+ "but instead pushed ", stack.size(), " values."
161
+ );
162
+ return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
163
+ }
164
+ private:
165
+ // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
166
+ // I'm sure there's an incantation that slips it past the parser but eh
167
+ template <size_t... indices>
168
+ static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
169
+ return std::make_tuple((std::move(stack[indices]).to<Types>())...);
170
+ }
171
+ };
172
+
173
+ //
174
+ // BoxedKernelWrapper
175
+ //
176
+ // For a given function type FT, BoxedKernelWrapper<FT> implements
177
+ // a `call` method that
178
+ // - takes a boxed kernel and unboxed arguments as specified by FT,
179
+ // - calls `boxArgs` to box the arguments
180
+ // - calls the boxed kernel
181
+ // - unboxes and returns the result
182
+ //
183
+ // The partial specializations below handle various cases: in
184
+ // particular, not all types appearing in op signatures are supported,
185
+ // and ops returning references have nonstandard wrapper implementations.
186
+ //
187
+
188
+ // 1. The base specialization of BoxedKernelWrapper should never be instantiated.
189
+ // A "no call method defined on BoxedKernelWrapper" compile error means that
190
+ // an op signature has failed to trigger any of the partial specializations
191
+ // that follow this one.
192
+ //
193
+ template <class FuncType, class Enable = void>
194
+ struct BoxedKernelWrapper {
195
+ // The reason we're not just doing straight up static_assert(false, ...) here:
196
+ // Basically, the way to make sure a static_assert only fires if a template
197
+ // is actually instantiated (rather than every time the file is parsed) is to use
198
+ // template parameters in the expression, e.g. FuncType here. However, since
199
+ // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
200
+ // effect.
201
+ static_assert(sizeof(FuncType) != sizeof(FuncType),
202
+ "Function signature contains one or more unsupported parameter and/or return types. "
203
+ "Look for a nearby error like "
204
+ "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
205
+ "- (your function type) is the unsupported signature.");
206
+ };
207
+
208
+ //
209
+ // 2. Supported signatures, other than those involving non-const Tensor refs -
210
+ // i.e., "functional" ops.
211
+ //
212
+
213
+ template <class Result, class... Args>
214
+ struct BoxedKernelWrapper<
215
+ Result(Args...),
216
+ std::enable_if_t<
217
+ can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
218
+ void
219
+ >
220
+ > {
221
+ static Result call(
222
+ const BoxedKernel& boxed_kernel_func,
223
+ const OperatorHandle& opHandle,
224
+ DispatchKeySet dispatchKeySet,
225
+ Args... args
226
+ ) {
227
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
228
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
229
+
230
+ if constexpr (!std::is_same_v<void, Result>) {
231
+ // op has pushed one or more values onto the stack.
232
+ return PopResult<Result>::call(stack);
233
+ } else {
234
+ // op returns void, boxed kernel has pushed nothing onto stack.
235
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
236
+ stack.empty(),
237
+ "Boxed kernel was expected to return no values on the stack, ",
238
+ "but instead returned ", stack.size(), " values."
239
+ );
240
+ }
241
+ }
242
+ };
243
+
244
+ //
245
+ // 3. in-place ops take a single non-const Tensor reference
246
+ // as their first argument, and return it.
247
+ //
248
+ // Note: all signatures matching this pattern are assumed to be for such ops.
249
+ // Because of this, the generated BoxedKernelWrapper specializations simply
250
+ // return the in-place argument.
251
+ //
252
+
253
+ template <class... OtherArgs>
254
+ struct BoxedKernelWrapper<
255
+ at::Tensor&(at::Tensor&, OtherArgs...),
256
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
257
+ > {
258
+ static at::Tensor& call(
259
+ const BoxedKernel& boxed_kernel_func,
260
+ const OperatorHandle& opHandle,
261
+ DispatchKeySet dispatchKeySet,
262
+ at::Tensor& outArg, OtherArgs... otherArgs
263
+ ) {
264
+ torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
265
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
266
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
267
+ stack.size() == 1,
268
+ "Boxed kernel was expected to return a single value on the stack, ",
269
+ "but instead returned ", stack.size(), " values."
270
+ );
271
+
272
+ return outArg;
273
+ }
274
+ };
275
+
276
+ //
277
+ // 3.5. In-process migration to make in-place ops take and return
278
+ // const references instead.
279
+ template <class... OtherArgs>
280
+ struct BoxedKernelWrapper<
281
+ const at::Tensor&(const at::Tensor&, OtherArgs...),
282
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
283
+ > {
284
+ static const at::Tensor& call(
285
+ const BoxedKernel& boxed_kernel_func,
286
+ const OperatorHandle& opHandle,
287
+ DispatchKeySet dispatchKeySet,
288
+ const at::Tensor& outArg, OtherArgs... otherArgs
289
+ ) {
290
+ torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
291
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
292
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
293
+ stack.size() == 1,
294
+ "Boxed kernel was expected to return a single value on the stack, ",
295
+ "but instead returned ", stack.size(), " values."
296
+ );
297
+
298
+ return outArg;
299
+ }
300
+ };
301
+
302
+ //
303
+ // 4. out of place ops that take a single non-const Tensor reference as their
304
+ // final argument, and also return it.
305
+ //
306
+ // Note: all signatures matching this pattern are assumed to be for such ops.
307
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
308
+ // return out arguments.
309
+ //
310
+ template <class FirstArg, class... RestArgs>
311
+ struct BoxedKernelWrapper<
312
+ at::Tensor&(FirstArg, RestArgs...),
313
+ std::enable_if_t<
314
+ can_box_all<FirstArg, RestArgs...>::value
315
+ // this skips over in-place kernels with a non-const Tensor
316
+ // arg at the front, so those can unambiguously trigger the preceding specialization.
317
+ && !is_mutable_tensor_ref<FirstArg>::value,
318
+ void
319
+ >
320
+ > {
321
+ static at::Tensor& call(
322
+ const BoxedKernel& boxed_kernel_func,
323
+ const OperatorHandle& opHandle,
324
+ DispatchKeySet dispatchKeySet,
325
+ FirstArg firstArg, RestArgs... restArgs
326
+ ) {
327
+ torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
328
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
329
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
330
+ stack.size() == 1,
331
+ "Boxed kernel was expected to return a single value on the stack, ",
332
+ "but instead returned ", stack.size(), " values."
333
+ );
334
+
335
+ // reusing restArgs after it has been forwarded here is ok because we know
336
+ // that the last element is of type `Tensor&`.
337
+ return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
338
+ }
339
+ };
340
+
341
+ //
342
+ // 5. out of place ops that take multiple non-const Tensor references as their
343
+ // final arguments, and return them in a std::tuple.
344
+ //
345
+ // Note: all signatures matching this pattern are assumed to be for such ops.
346
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
347
+ // return the out arguments.
348
+ //
349
+ template <class Result, class... Args>
350
+ struct BoxedKernelWrapper<
351
+ Result(Args...),
352
+ std::enable_if_t<
353
+ can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
354
+ void
355
+ >
356
+ > {
357
+ static Result call(
358
+ const BoxedKernel& boxed_kernel_func,
359
+ const OperatorHandle& opHandle,
360
+ DispatchKeySet dispatchKeySet,
361
+ Args... args
362
+ ) {
363
+ using ArgTuple = std::tuple<Args...>;
364
+ constexpr int RetCount = std::tuple_size<Result>();
365
+
366
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
367
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
368
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
369
+ stack.size() == RetCount,
370
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
371
+ "but instead returned ", stack.size(), " values."
372
+ );
373
+
374
+ // reusing args after it has been forwarded here is ok because we know
375
+ // that the last RetCount elements are of type `Tensor&`.
376
+ auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
377
+ static_assert(
378
+ std::is_same<Result, decltype(result)>::value,
379
+ "The parameter list of an op returning a tuple of Tensor references "
380
+ "must end with an equal number of Tensor reference parameters."
381
+ );
382
+ return result;
383
+ }
384
+ };
385
+
386
+ } // impl
387
+ } // c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <memory>
5
+
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/Exception.h>
9
+
10
+ namespace c10 {
11
+
12
+ struct ClassType;
13
+ using ClassTypePtr = std::shared_ptr<ClassType>;
14
+
15
+ TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex);
16
+
17
+ template <typename T>
18
+ const c10::ClassTypePtr& getCustomClassType() {
19
+ // Classes are never unregistered from getCustomClassTypeMap and the
20
+ // hash lookup can be a hot path, so just cache.
21
+ // For the same reason, it's fine If this ends up getting duplicated across
22
+ // DSO boundaries for whatever reason.
23
+ static c10::ClassTypePtr cache = getCustomClassTypeImpl(
24
+ std::type_index(typeid(T)));
25
+ return cache;
26
+ }
27
+
28
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/Type.h>
8
+
9
+ namespace c10 {
10
+ namespace impl {
11
+
12
+ // A CppSignature object holds RTTI information about a C++ function signature at runtime
13
+ // and can compare them or get a debug-printable name.
14
+ class TORCH_API CppSignature final {
15
+ public:
16
+ CppSignature(const CppSignature&) = default;
17
+ CppSignature(CppSignature&&) noexcept = default;
18
+ CppSignature& operator=(const CppSignature&) = default;
19
+ CppSignature& operator=(CppSignature&&) noexcept = default;
20
+
21
+ template<class FuncType>
22
+ static CppSignature make() {
23
+ // Normalize functors, lambdas, function pointers, etc. into the plain function type
24
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
25
+ // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
26
+ // with different calling conventions.
27
+ // See Note [Plumbing Keys Through The Dispatcher]
28
+ using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
29
+
30
+ return CppSignature(std::type_index(typeid(decayed_function_type)));
31
+ }
32
+
33
+ std::string name() const {
34
+ return c10::demangle(signature_.name());
35
+ }
36
+
37
+ friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
38
+ if (lhs.signature_ == rhs.signature_) {
39
+ return true;
40
+ }
41
+ // Without RTLD_GLOBAL, the type_index comparison could yield false because
42
+ // they point to different instances of the RTTI data, but the types would
43
+ // still be the same. Let's check for that case too.
44
+ // Note that there still is a case where this might not work, i.e. when
45
+ // linking libraries of different compilers together, they might have
46
+ // different ways to serialize a type name. That, together with a missing
47
+ // RTLD_GLOBAL, would still fail this.
48
+ if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
49
+ return true;
50
+ }
51
+
52
+ return false;
53
+ }
54
+
55
+ private:
56
+ explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
57
+ std::type_index signature_;
58
+ };
59
+
60
+ inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
61
+ return !(lhs == rhs );
62
+ }
63
+
64
+ }
65
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <ATen/core/function_schema.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <c10/util/Bitset.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/core/Variadic.h>
10
+ #include <ATen/core/stack.h>
11
+
12
+ namespace c10 {
13
+
14
+ namespace impl {
15
+
16
+ // Take a DispatchKeySet for a Tensor and determine what the actual dispatch
17
+ // DispatchKey should be, taking into account TLS, and skipping backends which
18
+ // fall through.
19
+ //
20
+ // Unlike Tensor::key_set(), the value of this on a tensor can change depending
21
+ // on TLS.
22
+ //
23
+ // NB: If there is no valid dispatch key, this will return Undefined
24
+ static inline DispatchKeySet computeDispatchKeySet(
25
+ DispatchKeySet ks,
26
+ // The key mask lets us eliminate (by zero entries) keys which should not
27
+ // be considered for dispatch. There are two cases when we use this:
28
+ //
29
+ // - If an operator's dispatch table contains a fallthrough entry, we
30
+ // should bypass it entirely when finding the key
31
+ // - If a user invokes with redispatch, the mask lets us
32
+ // zero out the key the user asked us to stop.
33
+ //
34
+ // These excluded backends are NOT tracked in the TLS, but must be applied
35
+ // AFTER TLS (since the backend may have been introduced for consideration
36
+ // by the included TLS), which is why you have to pass them in to this
37
+ // function (as opposed to just applying it to the input 'ks').
38
+ DispatchKeySet key_mask
39
+ ) {
40
+ c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
41
+ // TODO: It's a bit irritating that we have to do logical ORs here, it would
42
+ // be nice to only do one. Can always_included be folded into the TLS? Well,
43
+ // it's a bit troublesome, because fastpath TLS access requires the type of
44
+ // the TLS in question to be zero-initialized, so you don't actually win
45
+ // anyting in that case.
46
+ return (((ks | local.included_) - local.excluded_) & key_mask);
47
+ }
48
+
49
+ }
50
+
51
+ namespace detail {
52
+ // A small gadget to extract the DispatchKeySet from types which are known
53
+ // to have it. Used to extract dispatch keys from unboxed calls.
54
+ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
55
+ DispatchKeySet ts;
56
+ void operator()(const at::Tensor& x) {
57
+ ts = ts | x.key_set();
58
+ }
59
+ void operator()(const c10::optional<at::Tensor>& x) {
60
+ if (x.has_value()) {
61
+ ts = ts | x->key_set();
62
+ }
63
+ }
64
+ void operator()(at::ArrayRef<at::Tensor> xs) {
65
+ for (const auto& x : xs) {
66
+ ts = ts | x.key_set();
67
+ }
68
+ }
69
+ // Tensor?[] translates to this case.
70
+ void operator()(const c10::List<c10::optional<at::Tensor>>& xs) {
71
+ for (c10::optional<at::Tensor> x : xs) {
72
+ if (x.has_value()) {
73
+ ts = ts | x.value().key_set();
74
+ }
75
+ }
76
+ }
77
+ // Structured Tensor[] translates to this case
78
+ void operator()(const at::ITensorListRef& xs) {
79
+ for (const auto& x : xs) {
80
+ ts = ts | x.key_set();
81
+ }
82
+ }
83
+ [[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) {
84
+ // Just checking that the handling of Tensor?[] didn't change.
85
+ TORCH_INTERNAL_ASSERT(false);
86
+ }
87
+ void operator()(const at::Generator& gen) {
88
+ if (gen.defined()) {
89
+ ts = ts | gen.key_set();
90
+ }
91
+ }
92
+ void operator()(const c10::optional<at::Generator>& gen) {
93
+ if (gen.has_value() && gen->defined()) {
94
+ ts = ts | gen->key_set();
95
+ }
96
+ }
97
+ template <typename T>
98
+ void operator()(const T&) {
99
+ // do nothing
100
+ }
101
+ };
102
+
103
+ // NB: take by const reference (Don't do universal forwarding here! You
104
+ // don't want to move into this function!)
105
+ template <typename... Args>
106
+ DispatchKeySet multi_dispatch_key_set(const Args&... args) {
107
+ return MultiDispatchKeySet().apply(args...).ts;
108
+ }
109
+ }
110
+
111
+ /**
112
+ * An instance of DispatchKeyExtractor knows how to get a dispatch key given
113
+ * a list of arguments for an operator call.
114
+ *
115
+ * The instance is specific for a certain operator as:
116
+ * - In boxed dispatch, different operators have different ways to extract
117
+ * the dispatch key (e.g. different numbers of arguments), and we precompute
118
+ * the stack locations we should look at; and
119
+ * - In all dispatch, some backends should be excluded from dispatch because
120
+ * they have been registered as fallthrough. The set of excluded backends
121
+ * varies from operator, as some operators may have overridden the
122
+ * fallthrough with custom behavior.
123
+ *
124
+ * Note - this should maintain identical impl to the py dispatcher key extraction logic
125
+ * at pytorch/torch/dispatcher.py
126
+ */
127
+ struct TORCH_API DispatchKeyExtractor final {
128
+ public:
129
+ static DispatchKeyExtractor make(const FunctionSchema& schema) {
130
+ return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
131
+ }
132
+
133
+ static DispatchKeyExtractor makeUninitialized() {
134
+ return DispatchKeyExtractor(c10::utils::bitset());
135
+ }
136
+
137
+ void registerSchema(const FunctionSchema& schema) {
138
+ TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
139
+ dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
140
+ }
141
+ void deregisterSchema() {
142
+ dispatch_arg_indices_reverse_ = c10::utils::bitset();
143
+ }
144
+
145
+ DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
146
+ DispatchKeySet ks;
147
+ dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
148
+ const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
149
+ if (C10_LIKELY(ivalue.isTensor())) {
150
+ // NB: Take care not to introduce a refcount bump (there's
151
+ // no safe toTensorRef method, alas)
152
+ ks = ks | ivalue.unsafeToTensorImpl()->key_set();
153
+ } else if (C10_UNLIKELY(ivalue.isTensorList())) {
154
+ for (const at::Tensor& tensor : ivalue.toTensorList()) {
155
+ ks = ks | tensor.key_set();
156
+ }
157
+ }
158
+ // Tensor?[] translates to a c10::List<IValue> so we need to peek inside
159
+ else if (C10_UNLIKELY(ivalue.isList())) {
160
+ for (const auto& elt : ivalue.toListRef()) {
161
+ if (elt.isTensor()) {
162
+ ks = ks | elt.toTensor().key_set();
163
+ }
164
+ }
165
+ }
166
+ });
167
+ // Keys that are fallthrough should be skipped
168
+ if (requiresBitsetPerBackend_) {
169
+ auto backend_idx = ks.getBackendIndex();
170
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
171
+ } else {
172
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
173
+ }
174
+ }
175
+
176
+ template<class... Args>
177
+ DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
178
+ auto ks = detail::multi_dispatch_key_set(args...);
179
+ // Keys that are fallthrough should be skipped
180
+ if (requiresBitsetPerBackend_) {
181
+ auto backend_idx = ks.getBackendIndex();
182
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
183
+ } else {
184
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
185
+ }
186
+ }
187
+
188
+ void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
189
+
190
+ std::string dumpState() const;
191
+ void checkInvariants(const FunctionSchema& schema) const;
192
+
193
+ private:
194
+ static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
195
+ TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
196
+ "The function schema has ", schema.arguments().size(),
197
+ " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
198
+ c10::utils::bitset dispatch_arg_indices_reverse;
199
+ for (const auto index : c10::irange(schema.arguments().size())) {
200
+ if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
201
+ schema.arguments()[index].type()->isSubtypeOf(
202
+ *ListType::ofTensors()) ||
203
+ schema.arguments()[index].type()->isSubtypeOf(
204
+ *ListType::ofOptionalTensors()) ||
205
+ schema.arguments()[index].type()->isSubtypeOf(
206
+ *OptionalType::ofTensor())) {
207
+ dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
208
+ }
209
+ }
210
+ return dispatch_arg_indices_reverse;
211
+ }
212
+
213
+ explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
214
+ : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
215
+ , nonFallthroughKeys_(DispatchKeySet::FULL)
216
+ , requiresBitsetPerBackend_(false) {
217
+ for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
218
+ nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
219
+ }
220
+ }
221
+
222
+ // this is a bitset that has ones for each argument index which has to be
223
+ // considered for dispatch. This avoids having to iterate over the stack
224
+ // to find all the tensors. The bits are stored in reverse order, i.e.
225
+ // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
226
+ // the top of the stack (i.e. the i-th last argument of the function)
227
+ // is relevant for dispatch.
228
+ // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
229
+ // fallthrough
230
+ c10::utils::bitset dispatch_arg_indices_reverse_;
231
+
232
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel.
233
+ DispatchKeySet nonFallthroughKeys_;
234
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
235
+ // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
236
+ std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
237
+ // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
238
+ // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
239
+ bool requiresBitsetPerBackend_;
240
+ };
241
+
242
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/SequenceNumber.h>
4
+ #include <ATen/core/boxing/KernelFunction.h>
5
+ #include <ATen/core/boxing/impl/boxing.h>
6
+ #include <ATen/core/dispatch/OperatorEntry.h>
7
+ #include <ATen/core/dispatch/CppSignature.h>
8
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
9
+ #include <ATen/record_function.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/LeftRight.h>
12
+ #include <list>
13
+ #include <mutex>
14
+ #include <condition_variable>
15
+ #include <type_traits>
16
+ #include <c10/core/SafePyObject.h>
17
+
18
+ #include <ATen/core/grad_mode.h>
19
+ #include <ATen/core/enum_tag.h>
20
+
21
+ #ifndef NDEBUG
22
+ #include <iostream>
23
+ #endif
24
+
25
+ namespace c10 {
26
+
27
+ TORCH_API bool show_dispatch_trace();
28
+ TORCH_API void dispatch_trace_nesting_incr();
29
+ TORCH_API void dispatch_trace_nesting_decr();
30
+ TORCH_API int64_t dispatch_trace_nesting_value();
31
+
32
+ struct DispatchTraceNestingGuard {
33
+ DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); }
34
+ ~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); }
35
+ };
36
+
37
+ class TORCH_API OperatorHandle;
38
+ template<class FuncType> class TypedOperatorHandle;
39
+
40
+ /**
41
+ * Implement this interface and register your instance with the dispatcher
42
+ * to get notified when operators are registered or deregistered with
43
+ * the dispatcher.
44
+ *
45
+ * NB: registration events only occur when a 'def' occurs; we don't trigger
46
+ * on 'impl' or 'fallback' calls.
47
+ */
48
+ class TORCH_API OpRegistrationListener {
49
+ public:
50
+ virtual ~OpRegistrationListener();
51
+
52
+ virtual void onOperatorRegistered(const OperatorHandle& op) = 0;
53
+ virtual void onOperatorDeregistered(const OperatorHandle& op) = 0;
54
+ };
55
+
56
+ namespace detail {
57
+ class RegistrationListenerList;
58
+ }
59
+ class SchemaRegistrationHandleRAII;
60
+
61
+ /**
62
+ * Top-level dispatch interface for dispatching via the dynamic dispatcher.
63
+ * Most end users shouldn't use this directly; if you're trying to register
64
+ * ops look in op_registration
65
+ */
66
+ class TORCH_API Dispatcher final {
67
+ private:
68
+ // For direct access to backend fallback information
69
+ friend class impl::OperatorEntry;
70
+
71
+ struct OperatorDef final {
72
+ explicit OperatorDef(OperatorName&& op_name)
73
+ : op(std::move(op_name)) {}
74
+
75
+ impl::OperatorEntry op;
76
+
77
+ // These refer to the number of outstanding RegistrationHandleRAII
78
+ // for this operator. def_count reflects only def() registrations
79
+ // (in the new world, this should only ever be 1, but old style
80
+ // registrations may register the schema multiple times, which
81
+ // will increase this count). def_and_impl_count reflects the number
82
+ // of combined def() and impl() registrations. When the last def() gets
83
+ // unregistered, we must immediately call the Deregistered listeners, but we
84
+ // must not actually delete the handle as there are other outstanding RAII
85
+ // destructors which will try to destruct and they had better still have a
86
+ // working operator handle in this case
87
+ size_t def_count = 0;
88
+ size_t def_and_impl_count = 0;
89
+ };
90
+ friend class OperatorHandle;
91
+ template<class> friend class TypedOperatorHandle;
92
+
93
+ struct Guard final {
94
+ Guard() : alive(true), mutex() {}
95
+ std::atomic<bool> alive;
96
+ std::mutex mutex;
97
+ };
98
+
99
+ public:
100
+ ~Dispatcher();
101
+
102
+ // Implementation note: this class abstracts over the fact that we have per-operator
103
+ // dispatch tables. This could be easily adjusted to have a single global hash
104
+ // table.
105
+ static Dispatcher& realSingleton();
106
+
107
+ C10_ALWAYS_INLINE static Dispatcher& singleton() {
108
+ #if !defined C10_MOBILE
109
+ // Implemented inline so that steady-state code needn't incur
110
+ // function-call overhead. We can't just inline `realSingleton`
111
+ // because the function-local static would get duplicated across
112
+ // all DSOs that include & use this header, leading to multiple
113
+ // singleton instances.
114
+ static Dispatcher& s = realSingleton();
115
+ return s;
116
+ #else
117
+ // For C10_MOBILE, we should never inline a static function that
118
+ // has a static member, since the generated code calls
119
+ // __cxa_guard_acquire and __cxa_guard_release which help
120
+ // implement exactly once semantics for the initialization of the
121
+ // static Dispatcher& s above (for the non-mobile case). That
122
+ // additional code when duplicated across all operator stubs
123
+ // for every backend results in a lot of additional code
124
+ // being generated by the compiler.
125
+ return realSingleton();
126
+ #endif
127
+ }
128
+
129
+ // ------------------------------------------------------------------------
130
+ //
131
+ // Accessing operators by schema
132
+ //
133
+ // ------------------------------------------------------------------------
134
+
135
+ /**
136
+ * Looks for an operator schema with the given name and overload name
137
+ * and returns it if it is registered WITH A SCHEMA.
138
+ * Returns nullopt otherwise.
139
+ */
140
+ c10::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
141
+
142
+ /**
143
+ * Variant of findSchema that results in less code generated at the call site.
144
+ * It (1) takes const char* pointer rather than OperatorName (so we skip
145
+ * generating std::string constructor calls at the call site), and (2)
146
+ * it raises an exception if the operator is not found (so we skip
147
+ * generating exception raising code at the call site)
148
+ *
149
+ * Irritatingly, we still have to generate the handful of instructions
150
+ * for dealing with an exception being thrown during static initialization
151
+ * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we
152
+ * could avoid this code too, but as the name of the function suggests,
153
+ * it does throw exceptions.
154
+ */
155
+ OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
156
+
157
+ // Like findSchema, but also returns OperatorHandle even if there is no schema
158
+ c10::optional<OperatorHandle> findOp(const OperatorName& operator_name);
159
+
160
+ // Returns a list of all operator names present in the operatorLookupTable_
161
+ const std::vector<OperatorName> getAllOpNames();
162
+
163
+ // ------------------------------------------------------------------------
164
+ //
165
+ // Invoking operators
166
+ //
167
+ // ------------------------------------------------------------------------
168
+
169
+ template<class Return, class... Args>
170
+ Return call(const TypedOperatorHandle<Return (Args...)>& op, Args... args) const;
171
+
172
+
173
+ template<class Return, class... Args>
174
+ static Return callWithDispatchKeySlowPath(const TypedOperatorHandle<Return (Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args);
175
+
176
+ // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation.
177
+ // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set.
178
+ // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key.
179
+ // See Note [Plumbing Keys Through The Dispatcher]
180
+ template<class Return, class... Args>
181
+ Return redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const;
182
+
183
+ // Invoke an operator via the boxed calling convention using an IValue stack
184
+ void callBoxed(const OperatorHandle& op, Stack* stack) const;
185
+ void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const;
186
+
187
+ // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
188
+ // See Note [Plumbing Keys Through The Dispatcher]
189
+ void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const;
190
+
191
+ bool hasBackendFallbackForDispatchKey(DispatchKey dk) {
192
+ auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk);
193
+ if (dispatch_ix < 0) return false;
194
+ return backendFallbackKernels_[dispatch_ix].kernel.isValid();
195
+ }
196
+
197
+ // Used by torchdeploy/multipy for multiple interpreters racing.
198
+ void waitForDef(const FunctionSchema& schema);
199
+ void waitForImpl(const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key);
200
+
201
+ // ------------------------------------------------------------------------
202
+ //
203
+ // Performing registrations (NON user public; use op_registration)
204
+ //
205
+ // ------------------------------------------------------------------------
206
+
207
+ /**
208
+ * Register a new operator schema.
209
+ *
210
+ * If a schema with the same operator name and overload name already exists,
211
+ * this function will check that both schemas are exactly identical.
212
+ */
213
+ RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector<at::Tag> tags = {});
214
+
215
+ /**
216
+ * Register a kernel to the dispatch table for an operator.
217
+ * If dispatch_key is nullopt, then this registers a fallback kernel.
218
+ *
219
+ * @return A RAII object that manages the lifetime of the registration.
220
+ * Once that object is destructed, the kernel will be deregistered.
221
+ */
222
+ // NB: steals the inferred function schema, as we may need to hold on to
223
+ // it for a bit until the real schema turns up
224
+ RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
225
+
226
+ /**
227
+ * Given an operator, tells the Dispatcher that we have implemented an abstract impl
228
+ * for this op in the given Python module. Call this a "pystub".
229
+ */
230
+ RegistrationHandleRAII registerAbstractImplPyStub(const OperatorName& op_name, const char* pymodule, const char* context);
231
+
232
+ /**
233
+ * Given an operator, throws if we have an abstract impl pystub.
234
+ */
235
+ void throwIfHasAbstractImplPyStub(OperatorName op_name);
236
+
237
+ c10::optional<std::pair<const char*, const char*>> getAbstractImplPyStub(OperatorName op_name);
238
+
239
+ /**
240
+ * Register a new operator by name.
241
+ */
242
+ RegistrationHandleRAII registerName(OperatorName op_name);
243
+
244
+ /**
245
+ * Register a fallback kernel for a backend.
246
+ * If an operator is called but there is no concrete kernel for the dispatch
247
+ * key of the given operator arguments, it will check if there is such a
248
+ * fallback kernel for the given dispatch key and, if yes, call that one.
249
+ */
250
+ RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug);
251
+
252
+ /**
253
+ * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend
254
+ * API. These invocations are only permitted once per program, so we raise
255
+ * an error if this is called again for the same namespace.
256
+ */
257
+ RegistrationHandleRAII registerLibrary(std::string ns, std::string debug);
258
+
259
+ // ------------------------------------------------------------------------
260
+ //
261
+ // Listeners on registrations
262
+ //
263
+ // ------------------------------------------------------------------------
264
+
265
+ /**
266
+ * Add a listener that gets called whenever a new op is registered or an existing
267
+ * op is deregistered. Immediately after registering, this listener gets called
268
+ * for all previously registered ops, so it can be used to keep track of ops
269
+ * registered with this dispatcher.
270
+ */
271
+ RegistrationHandleRAII addRegistrationListener(std::unique_ptr<OpRegistrationListener> listener);
272
+
273
+ void checkInvariants() const;
274
+
275
+ //
276
+ // ------------------------------------------------------------------------
277
+ //
278
+ // Assertions
279
+ //
280
+ // ------------------------------------------------------------------------
281
+
282
+ /**
283
+ * For testing purposes.
284
+ * Returns a list of all operators that were created through calls to registerImpl(),
285
+ * without any corresponding calls to registerDef(). After static initialization
286
+ * is done this is almost certainly a bug, as the created OperatorHandle won't have
287
+ * any schema associated with it and users calling the op through the dispatcher
288
+ * won't be able to access it
289
+ *
290
+ * Note that we cannot enforce this invariant "as we go" during static initialization,
291
+ * due to undefined static initialization order- we have no guarantees over the order
292
+ * in which .def() and .impl() calls are registered in the dispatcher at static
293
+ * initialization time. So this function should only be called after static initialization.
294
+ */
295
+ std::vector<OperatorHandle> findDanglingImpls() const;
296
+
297
+ /**
298
+ * Useful for inspecting global Dispatcher registration state.
299
+ * Returns the names of all operators with a kernel registered for the specified DispatchKey.
300
+ * If no DispatchKey is specified, it returns all registered operators.
301
+ */
302
+ std::vector<OperatorName> getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const;
303
+
304
+ private:
305
+ Dispatcher();
306
+
307
+ static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey);
308
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey);
309
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef<const c10::IValue> args);
310
+
311
+ #ifdef FBCODE_CAFFE2
312
+ static bool profilingOperatorEvents();
313
+ static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref);
314
+ static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref);
315
+ #endif // FBCODE_CAFFE2
316
+
317
+ OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema);
318
+ OperatorHandle findOrRegisterName_(const OperatorName& op_name);
319
+
320
+ void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name);
321
+ void deregisterImpl_(
322
+ const OperatorHandle& op,
323
+ const OperatorName& op_name,
324
+ c10::optional<DispatchKey> dispatch_key,
325
+ impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
326
+ void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
327
+ void deregisterFallback_(DispatchKey dispatchKey);
328
+ void deregisterLibrary_(const std::string& ns);
329
+ void cleanup(const OperatorHandle& op, const OperatorName& op_name);
330
+ void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug);
331
+
332
+ std::list<OperatorDef> operators_;
333
+ #if !defined(C10_MOBILE)
334
+ LeftRight<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
335
+ #else
336
+ RWSafeLeftRightWrapper<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
337
+ #endif
338
+ // Map from namespace to debug string (saying, e.g., where the library was defined)
339
+ ska::flat_hash_map<std::string, std::string> libraries_;
340
+
341
+ std::array<impl::AnnotatedKernel, num_runtime_entries> backendFallbackKernels_;
342
+
343
+ std::unique_ptr<detail::RegistrationListenerList> listeners_;
344
+
345
+ // This condition variable gets notified whenever we add a new def/impl to the
346
+ // dispatch table. This is primarily used by multipy/torchdeploy, when
347
+ // we have multiple interpreters trying to register to the dispatch table.
348
+ // In this situation, whenever the non-primary interpreter would have tried
349
+ // to register to the dispatch table, instead it will check to see if the
350
+ // expected registration has already been made, and if it hasn't, wait on
351
+ // this condition variable to see if it was just racing with the primary
352
+ // interpreter.
353
+ //
354
+ // We expect it to be rare for there to be any waiters on this condition
355
+ // variable. This is mostly just to help give better diagnostics if
356
+ // something goes horribly wrong
357
+ std::condition_variable cond_var_;
358
+
359
+ // Protect concurrent access to the dispatcher. We store this in a
360
+ // `shared_ptr` as we return callbacks that call back into dispatcher methods,
361
+ // and we need to be able to handle and guard against the event when the
362
+ // `Dispatcher` has been destroyed before the callbacks fire.
363
+ std::shared_ptr<Guard> guard_;
364
+ };
365
+
366
+ /**
367
+ * This is a handle to an operator schema registered with the dispatcher.
368
+ * This handle can be used to register kernels with the dispatcher or
369
+ * to lookup a kernel for a certain set of arguments.
370
+ */
371
+ class TORCH_API OperatorHandle {
372
+ template <typename T> friend struct std::hash;
373
+
374
+ public:
375
+ OperatorHandle(OperatorHandle&&) noexcept = default;
376
+ OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
377
+ OperatorHandle(const OperatorHandle&) = default;
378
+ OperatorHandle& operator=(const OperatorHandle&) = default;
379
+ // NOLINTNEXTLINE(performance-trivially-destructible)
380
+ ~OperatorHandle();
381
+
382
+ const OperatorName& operator_name() const {
383
+ return operatorDef_->op.operator_name();
384
+ }
385
+
386
+ bool hasSchema() const {
387
+ return operatorDef_->op.hasSchema();
388
+ }
389
+
390
+ const FunctionSchema& schema() const {
391
+ return operatorDef_->op.schema();
392
+ }
393
+
394
+ const std::string& debug() const {
395
+ return operatorDef_->op.debug();
396
+ }
397
+
398
+ std::string dumpState() const {
399
+ return operatorDef_->op.dumpState();
400
+ }
401
+
402
+ bool hasKernelForDispatchKey(DispatchKey k) const {
403
+ return operatorDef_->op.hasKernelForDispatchKey(k);
404
+ }
405
+
406
+ bool hasKernelForAnyDispatchKey(DispatchKeySet k) const {
407
+ return operatorDef_->op.hasKernelForAnyDispatchKey(k);
408
+ }
409
+
410
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const {
411
+ return operatorDef_->op.hasComputedKernelForDispatchKey(k);
412
+ }
413
+
414
+ std::string dumpComputedTable() const {
415
+ return operatorDef_->op.dumpComputedTable();
416
+ }
417
+
418
+ void checkInvariants() const {
419
+ return operatorDef_->op.checkInvariants();
420
+ }
421
+
422
+ c10::ArrayRef<at::Tag> getTags() const {
423
+ return operatorDef_->op.getTags();
424
+ }
425
+
426
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback) {
427
+ operatorDef_->op.setReportErrorCallback_(std::move(callback));
428
+ }
429
+
430
+ bool hasTag(const at::Tag& tag) const {
431
+ for(const auto& tag_: getTags()) {
432
+ if (tag == tag_) {
433
+ return true;
434
+ }
435
+ }
436
+ return false;
437
+ }
438
+
439
+ template<class FuncType>
440
+ TypedOperatorHandle<FuncType> typed() const {
441
+ // NB: This assert is not 100% sound: you can retrieve a typed() operator
442
+ // handle prior to ANY C++ signature being registered on the operator
443
+ // and the check will say everything is OK (at which point you can then
444
+ // smuggle in a kernel that is typed incorrectly). For everything
445
+ // in core library this won't happen, because all the static registrations
446
+ // will be done by the time a typed() handle is acquired.
447
+ #if !defined C10_MOBILE
448
+ operatorDef_->op.assertSignatureIsCorrect<FuncType>();
449
+ if (fn_has_symint<FuncType>::value) {
450
+ operatorDef_->op.assertSignatureIsCorrect<typename fn_remove_symint<FuncType>::type>();
451
+ }
452
+ #endif
453
+ return TypedOperatorHandle<FuncType>(operatorIterator_);
454
+ }
455
+
456
+ void callBoxed(Stack* stack) const {
457
+ c10::Dispatcher::singleton().callBoxed(*this, stack);
458
+ }
459
+
460
+ void callBoxed(Stack& stack) const {
461
+ callBoxed(&stack);
462
+ }
463
+
464
+ void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const {
465
+ c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack);
466
+ }
467
+
468
+ void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
469
+ c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
470
+ }
471
+
472
+ template <typename F>
473
+ PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const {
474
+ return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor);
475
+ }
476
+
477
+ bool operator==(const OperatorHandle& other) const {
478
+ return operatorDef_ == other.operatorDef_;
479
+ }
480
+
481
+ bool operator!=(const OperatorHandle& other) const {
482
+ return operatorDef_ != other.operatorDef_;
483
+ }
484
+
485
+ private:
486
+ explicit OperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
487
+ : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {}
488
+ friend class Dispatcher;
489
+ template<class> friend class TypedOperatorHandle;
490
+
491
+ // Storing a direct pointer to the OperatorDef even though we
492
+ // already have the iterator saves an instruction in the critical
493
+ // dispatch path. The iterator is effectively a
494
+ // pointer-to-std::list-node, and (at least in libstdc++'s
495
+ // implementation) the element is at an offset 16 bytes from that,
496
+ // because the prev/next pointers come first in the list node
497
+ // struct. So, an add instruction would be necessary to convert from the
498
+ // iterator to an OperatorDef*.
499
+ Dispatcher::OperatorDef* operatorDef_;
500
+
501
+ // We need to store this iterator in order to make
502
+ // Dispatcher::cleanup() fast -- it runs a lot on program
503
+ // termination (and presuambly library unloading).
504
+ std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
505
+ };
506
+
507
+ /**
508
+ * This is a handle to an operator schema registered with the dispatcher.
509
+ * It holds the same information as an OperatorHandle, but it is templated
510
+ * on the operator arguments and allows calling the operator in an
511
+ * unboxed way.
512
+ */
513
+ template<class FuncType>
514
+ class TypedOperatorHandle final {
515
+ static_assert(guts::false_t<FuncType>(), "FuncType in OperatorHandle::typed<FuncType> was not a valid function type");
516
+ };
517
+ template<class Return, class... Args>
518
+ class TypedOperatorHandle<Return (Args...)> final : public OperatorHandle {
519
+ public:
520
+ TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default;
521
+ TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default;
522
+ TypedOperatorHandle(const TypedOperatorHandle&) = default;
523
+ TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default;
524
+
525
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
526
+ C10_ALWAYS_INLINE Return call(Args... args) const {
527
+ return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
528
+ }
529
+
530
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
531
+ C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const {
532
+ return c10::Dispatcher::singleton().redispatch<Return, Args...>(*this, currentDispatchKeySet, std::forward<Args>(args)...);
533
+ }
534
+
535
+ private:
536
+ explicit TypedOperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
537
+ : OperatorHandle(operatorIterator) {}
538
+ friend class OperatorHandle;
539
+ };
540
+
541
+ namespace detail {
542
+ template <class... Args> inline void unused_arg_(const Args&...) {}
543
+
544
+ // CaptureKernelCall is intended to capture return values from Dispatcher
545
+ // unboxed kernel calls. A record function may request to get outputs from the
546
+ // kernel calls. For boxed kernels, it's straightforward, the returned values
547
+ // are in the stack object. The stack can be passed to record functions. For
548
+ // unboxed kernels, we need to handle different kinds of return values, cache
549
+ // them temporarily, then release the values for the actual function call
550
+ // return.
551
+ template <typename ReturnType>
552
+ struct CaptureKernelCall {
553
+ template <typename F, typename... Args>
554
+ CaptureKernelCall(
555
+ const F& kernel,
556
+ const TypedOperatorHandle<ReturnType(Args...)>& op,
557
+ const DispatchKeySet& dispatchKeySet,
558
+ Args&&... args)
559
+ // Calls the kernel and capture the result in output_.
560
+ : output_{kernel.template call<ReturnType, Args...>(
561
+ op,
562
+ dispatchKeySet,
563
+ std::forward<Args>(args)...)} {}
564
+ // Wraps the return values in a Stack.
565
+ Stack getOutputs() {
566
+ Stack stack;
567
+ impl::push_outputs<ReturnType, false>::copy(output_, &stack);
568
+ return stack;
569
+ }
570
+ // Since we are returning the output_, we don't expect the output_ to be used
571
+ // afterward. Copy elision and RVO do not apply to class data members. Using
572
+ // move semantic to avoid copies when possible.
573
+ ReturnType release() && {
574
+ return std::move(output_);
575
+ }
576
+
577
+ private:
578
+ ReturnType output_;
579
+ };
580
+
581
+ // Handle the lvalue reference differently since it should not be moved.
582
+ template <>
583
+ inline at::Tensor& CaptureKernelCall<at::Tensor&>::release() && {
584
+ return output_;
585
+ }
586
+
587
+ // Handle case where the kernel returns void.
588
+ template <>
589
+ struct CaptureKernelCall<void> {
590
+ template <typename F, typename... Args>
591
+ CaptureKernelCall(
592
+ const F& kernel,
593
+ const TypedOperatorHandle<void(Args...)>& op,
594
+ const DispatchKeySet& dispatchKeySet,
595
+ Args&&... args) {
596
+ // Calling the kernel and no need to capture void.
597
+ kernel.template call<void, Args...>(
598
+ op, dispatchKeySet, std::forward<Args>(args)...);
599
+ }
600
+ Stack getOutputs() {
601
+ return Stack();
602
+ }
603
+ void release() && {}
604
+ };
605
+
606
+ } // namespace detail
607
+
608
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
609
+ template<class Return, class... Args>
610
+ inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<Return(Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) {
611
+ // If callbacks need inputs, we box the arguments and pass them to the guard.
612
+ // Note: For perf reasons we wouldn't want to prematurely box the arguments.
613
+ at::RecordFunction guard(std::move(stepCallbacks));
614
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved());
615
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
616
+ auto& schema = op.schema();
617
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
618
+ constexpr auto num_boxed_args = impl::boxed_size<Args...>();
619
+ if constexpr (num_boxed_args != 0) {
620
+ if (guard.needsInputs()) {
621
+ // If we used std::array<IValue, num_boxed_args> here, we would
622
+ // have to spend time default constructing the IValues in
623
+ // boxedArgs. aligned_storage has no such requirement.
624
+ impl::IValueAlignedStorage boxedArgs[num_boxed_args];
625
+ // For debugging only; could be removed (but the compiler will do
626
+ // that for us and it's nice to have the extra assurance of
627
+ // correctness from our debug builds).
628
+ int lastArgIdx = 0;
629
+ impl::boxArgsToStack(boxedArgs, lastArgIdx, args...);
630
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
631
+ // I don't *think* we need std::launder here, because IValue has
632
+ // no subclasses and no const or reference fields.
633
+ runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
634
+ for (size_t ii = 0; ii < num_boxed_args; ++ii) {
635
+ reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
636
+ }
637
+ } else {
638
+ runRecordFunction(guard, schema_ref, dispatchKey);
639
+ }
640
+ } else {
641
+ runRecordFunction(guard, schema_ref, dispatchKey);
642
+ }
643
+
644
+ if (C10_UNLIKELY(guard.needsOutputs())) {
645
+ // Calls the kernel and capture the output temporarily to pass to
646
+ // RecordFunction.
647
+ detail::CaptureKernelCall<Return> captureKernelCall(
648
+ kernel, op, dispatchKeySet, std::forward<Args>(args)...);
649
+ guard.setOutputs(captureKernelCall.getOutputs());
650
+ // Releases the captured output to return to caller.
651
+ return std::move(captureKernelCall).release();
652
+ }
653
+
654
+ // keeping the guard alive while executing the kernel
655
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
656
+ }
657
+
658
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
659
+ template<class Return, class... Args>
660
+ C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle<Return(Args...)>& op, Args... args) const {
661
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
662
+ auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor()
663
+ .template getDispatchKeySetUnboxed<Args...>(args...);
664
+ #ifndef NDEBUG
665
+ DispatchTraceNestingGuard debug_guard;
666
+ if (show_dispatch_trace()) {
667
+ auto nesting_value = dispatch_trace_nesting_value();
668
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
669
+ std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
670
+ }
671
+ #endif
672
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet);
673
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
674
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
675
+ if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) {
676
+ return callWithDispatchKeySlowPath<Return, Args...>(op, *step_callbacks, dispatchKeySet, kernel, std::forward<Args>(args)...);
677
+ }
678
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
679
+
680
+ #ifdef FBCODE_CAFFE2
681
+ if(profilingOperatorEvents()) {
682
+ struct FireOpRAII {
683
+ FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) {
684
+ fireOpStartUSDT(schema_ref);
685
+ }
686
+ ~FireOpRAII() { fireOpEndUSDT(schema_ref_); }
687
+ at::RecordFunction::schema_ref_t schema_ref_;
688
+ } event(op.schema());
689
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
690
+ } else {
691
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
692
+ }
693
+ #else
694
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
695
+ #endif // FBCODE_CAFFE2
696
+ }
697
+
698
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
699
+ template<class Return, class... Args>
700
+ inline Return Dispatcher::redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const {
701
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
702
+ // do not use RecordFunction on redispatch
703
+ #ifndef NDEBUG
704
+ DispatchTraceNestingGuard debug_guard;
705
+ if (show_dispatch_trace()) {
706
+ auto nesting_value = dispatch_trace_nesting_value();
707
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
708
+ std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
709
+ }
710
+ #endif
711
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet);
712
+ return kernel.template call<Return, Args...>(op, currentDispatchKeySet, std::forward<Args>(args)...);
713
+ }
714
+
715
+ inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
716
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
717
+ const auto& entry = op.operatorDef_->op;
718
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
719
+ #ifndef NDEBUG
720
+ DispatchTraceNestingGuard debug_guard;
721
+ if (show_dispatch_trace()) {
722
+ auto nesting_value = dispatch_trace_nesting_value();
723
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
724
+ std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
725
+ }
726
+ #endif
727
+ const auto& kernel = entry.lookup(dispatchKeySet);
728
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
729
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
730
+ if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) {
731
+ at::RecordFunction guard(std::move(*step_callbacks));
732
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
733
+ auto& schema = op.schema();
734
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
735
+ guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
736
+ : runRecordFunction(guard, schema_ref, dispatchKey);
737
+
738
+ // keeping the guard alive while executing the kernel
739
+ kernel.callBoxed(op, dispatchKeySet, stack);
740
+
741
+ if (C10_UNLIKELY(guard.needsOutputs())) {
742
+ guard.setOutputs(*stack);
743
+ }
744
+ return;
745
+ }
746
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
747
+ kernel.callBoxed(op, dispatchKeySet, stack);
748
+ }
749
+
750
+ // NB: this doesn't count as a "true" dispatcher jump, so no instrumentation
751
+ inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const {
752
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
753
+ const auto& entry = op.operatorDef_->op;
754
+ // We still compute this as we're obligated to pass it on to the internal
755
+ // kernel, if it is a boxed fallback
756
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
757
+ const auto& kernel = ([&]() {
758
+ if (op.hasKernelForDispatchKey(dk)) {
759
+ return entry.kernelForDispatchKey(dk);
760
+ } else {
761
+ auto idx = getDispatchTableIndexForDispatchKey(dk);
762
+ TORCH_INTERNAL_ASSERT(idx >= 0);
763
+ return backendFallbackKernels_[idx].kernel;
764
+ }
765
+ })();
766
+ kernel.callBoxed(op, dispatchKeySet, stack);
767
+ }
768
+
769
+ inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const {
770
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
771
+ const auto& entry = op.operatorDef_->op;
772
+ #ifndef NDEBUG
773
+ DispatchTraceNestingGuard debug_guard;
774
+ if (show_dispatch_trace()) {
775
+ auto nesting_value = dispatch_trace_nesting_value();
776
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
777
+ std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
778
+ }
779
+ #endif
780
+ const auto& kernel = entry.lookup(dispatchKeySet);
781
+ return kernel.callBoxed(op, dispatchKeySet, stack);
782
+ }
783
+
784
+ } // namespace c10
785
+
786
+ namespace std {
787
+
788
+ template <>
789
+ struct hash<c10::OperatorHandle> {
790
+ size_t operator()(const c10::OperatorHandle& op) const noexcept {
791
+ return std::hash<void*>{}(static_cast<void*>(op.operatorDef_));
792
+ }
793
+ };
794
+
795
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/operator_name.h>
4
+ #include <string>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ struct TORCH_API ObservedOperators {
10
+ ObservedOperators() = delete;
11
+
12
+ static bool isObserved(const OperatorName& name);
13
+
14
+ static std::unordered_set<std::string>& getUnobservedOperatorList();
15
+ };
16
+
17
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <c10/util/Metaprogramming.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/core/DispatchKey.h>
8
+ #include <c10/core/PyHandleCache.h>
9
+ #include <c10/core/SafePyObject.h>
10
+ #include <ATen/core/ivalue.h>
11
+ #include <ATen/core/boxing/KernelFunction.h>
12
+ #include <ATen/core/dispatch/DispatchKeyExtractor.h>
13
+
14
+ #include <ATen/core/dispatch/OperatorOptions.h>
15
+ #include <ATen/core/dispatch/CppSignature.h>
16
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
17
+ #include <ATen/core/enum_tag.h>
18
+
19
+ #include <list>
20
+ #include <array>
21
+
22
+ #ifdef C10_MOBILE
23
+ #define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
24
+ #endif
25
+
26
+ namespace c10 {
27
+
28
+ class Dispatcher;
29
+
30
+ namespace impl {
31
+
32
+ // This data structure represents a kernel that was registered to us from a
33
+ // user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
34
+ // about the kernel that isn't necessary for actual dispatching (this is why
35
+ // we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
36
+ // giving good error messages.
37
+ struct AnnotatedKernel final {
38
+ AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
39
+ : kernel(std::move(k))
40
+ , inferred_function_schema(std::move(s))
41
+ , debug(std::move(d))
42
+ {}
43
+ AnnotatedKernel() = default;
44
+ KernelFunction kernel;
45
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
46
+ // A little debug string to help us identify the kernel in question.
47
+ // Most importantly it records the TORCH_LIBRARY block that did the
48
+ // registration.
49
+ std::string debug;
50
+ };
51
+
52
+ // This data structure represents operator schema, with metadata specifying
53
+ // where the registration of this schema occurred
54
+ struct AnnotatedSchema final {
55
+ AnnotatedSchema(FunctionSchema s, std::string d)
56
+ : schema(std::move(s))
57
+ , debug(std::move(d))
58
+ {}
59
+ FunctionSchema schema;
60
+ std::string debug;
61
+ };
62
+
63
+ // Internal data structure that records information about a specific operator.
64
+ // It's not part of the public API; typically, users will interact with
65
+ // OperatorHandle instead.
66
+ //
67
+ // Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
68
+ // lock (this is important because some methods in OperatorEntry access
69
+ // dispatcher state)
70
+ class TORCH_API OperatorEntry final {
71
+ public:
72
+ explicit OperatorEntry(OperatorName&& operator_name);
73
+
74
+ OperatorEntry(const OperatorEntry&) = delete;
75
+ OperatorEntry(OperatorEntry&&) noexcept = delete;
76
+ OperatorEntry& operator=(const OperatorEntry&) = delete;
77
+ OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
78
+
79
+ const FunctionSchema& schema() const {
80
+ TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
81
+ return schema_->schema;
82
+ }
83
+ const std::string& debug() const {
84
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
85
+ return schema_->debug;
86
+ }
87
+ bool hasSchema() const {
88
+ return schema_.has_value();
89
+ }
90
+
91
+ bool isObserved() const {
92
+ return is_observed_;
93
+ }
94
+
95
+ // We may allocate an OperatorEntry for an operator even when we don't
96
+ // have a schema. When we receive the schema registration, we post
97
+ // facto register a schema.
98
+ //
99
+ // NB: registerSchema/deregisterSchema are not idempotent; if you
100
+ // attempt to register a schema when one is already present or vice
101
+ // versa that is an error. (Refcounting for the registrations is
102
+ // handled in the OperatorHandle in Dispatcher)
103
+ void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
104
+ void deregisterSchema();
105
+
106
+ const OperatorName& operator_name() const {
107
+ return name_;
108
+ }
109
+
110
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
111
+ using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
112
+ #else
113
+ using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
114
+ #endif
115
+ using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
116
+
117
+ // Why are kernels and fallback asymmetric? It has to do with ownership.
118
+ // Kernels and the computed dispatch tables for them are canonically
119
+ // owned by OperatorEntry, but backend fallbacks are specified once
120
+ // and apply for all operators, so they should be owned by Dispatcher.
121
+ // However, the registration of a backend fallback affects the
122
+ // state of the computed dispatch table, so when a backend fallback
123
+ // is updated, we need to update the operator tables too. Thus,
124
+ // registerKernel is the mechanism by which we give kernels to
125
+ // operator entry to own (and update dispatch table), but we only
126
+ // need a non-owning mechanism to update fallback.
127
+
128
+ // Precondition: Dispatcher::mutex_ is held
129
+ // Postcondition: caller is responsible for disposing of the kernel
130
+ AnnotatedKernelContainerIterator registerKernel(
131
+ const Dispatcher& dispatcher,
132
+ c10::optional<DispatchKey> dispatch_key,
133
+ KernelFunction kernel,
134
+ c10::optional<CppSignature> cpp_signature,
135
+ std::unique_ptr<FunctionSchema> inferred_function_schema,
136
+ std::string debug
137
+ );
138
+
139
+ // Precondition: Dispatcher::mutex_ is held
140
+ void deregisterKernel_(
141
+ const Dispatcher& dispatcher,
142
+ c10::optional<DispatchKey> dispatch_key,
143
+ AnnotatedKernelContainerIterator kernel
144
+ );
145
+
146
+ // Precondition: Dispatcher::mutex_ is held
147
+ void updateFallback(
148
+ const Dispatcher& dispatcher,
149
+ DispatchKey dispatch_key
150
+ );
151
+
152
+ // Precondition: Dispatcher::mutex_ is held
153
+ void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
154
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
155
+ schema_->schema.setAliasAnalysis(a);
156
+ }
157
+
158
+ std::string dumpComputedTable() const;
159
+ std::string dumpState() const;
160
+ void checkInvariants() const;
161
+
162
+ const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
163
+
164
+ // Asserts that the given FuncType is correct for calling this operator in an unboxed way.
165
+ template<class FuncType>
166
+ inline void assertSignatureIsCorrect() {
167
+ assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
168
+ }
169
+
170
+ void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
171
+
172
+ [[noreturn]] void reportError(DispatchKey dispatchKey) const;
173
+
174
+ const KernelFunction& lookup(DispatchKeySet ks) const {
175
+ const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
176
+ if (C10_UNLIKELY(idx == -1)) {
177
+ reportError(ks.highestPriorityTypeId());
178
+ }
179
+ const auto& kernel = dispatchTable_[idx];
180
+ // A valid kernel *always* has a boxed kernel and *may* have an
181
+ // unboxed kernel. However, we typically do unboxed calls in at::
182
+ // APIs, where the kernel 1) will very likely be valid and 2)
183
+ // should have an unboxed kernel. Checking the unboxed kernel
184
+ // first will allow us to avoid touching the boxed kernel at all
185
+ // in the common case.
186
+ if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
187
+ if (!kernel.isValid()) {
188
+ reportError(ks.highestPriorityTypeId());
189
+ }
190
+ }
191
+ return kernel;
192
+ }
193
+
194
+ std::string listAllDispatchKeys() const;
195
+
196
+ // Returns true if kernel_ has entry for any key in ks.
197
+ //
198
+ // Invariant: There are no alias keys in the passed-in dispatch key set.
199
+ // Note [No Alias Keys in DispatchKeySet]
200
+ // Alias keys should be checked using `hasKernelForDispatchKey`
201
+ // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
202
+ // have a value > 63 (causing overflow).
203
+ bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
204
+ // Returns true if kernel_ has entry for a particular key.
205
+ bool hasKernelForDispatchKey(DispatchKey k) const;
206
+ // Retrieves the kernel entry at a particular key. Symmetric with
207
+ // hasKernelForDispatchKey. To get the AnnotatedKernel, see
208
+ // getKernelForDispatchKey (private)
209
+ const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
210
+ // Returns true if the "computed table" has an entry for a particular key.
211
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const;
212
+ // Returns all the operator tags added at the time of registration
213
+ const std::vector<at::Tag>& getTags() const;
214
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
215
+
216
+ template <typename F>
217
+ PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
218
+ return py_cache_.ptr_or(self_interpreter, slow_accessor);
219
+ }
220
+
221
+ private:
222
+
223
+ OperatorName name_;
224
+ c10::optional<AnnotatedSchema> schema_;
225
+ #ifndef C10_MOBILE
226
+ std::vector<at::Tag> tags_;
227
+ #endif
228
+ std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
229
+ DispatchKeyExtractor dispatchKeyExtractor_;
230
+ // Pointer to the torch.ops.ns.op.overload object for speed
231
+ c10::PyHandleCache py_cache_;
232
+
233
+ // kernels_ stores all registered kernels for the corresponding dispatch key
234
+ // and catchAllKernels_ stores the catch-all kernels.
235
+ // If an operator library gets loaded that overwrites an already existing kernel,
236
+ // both kernels will be in that list but only the newer one will be in
237
+ // dispatchTable. If any of the kernels go away (say the library gets
238
+ // unloaded), we remove the kernel from this list and update the
239
+ // dispatchTable if necessary.
240
+ // Kernels in the list are ordered by registration time descendingly,
241
+ // newer registrations are before older registrations.
242
+ // We do not combine dispatchTable and kernels into one hash map because
243
+ // kernels is a larger data structure and accessed quite infrequently
244
+ // while dispatchTable is accessed often and should be kept small to fit
245
+ // into CPU caches.
246
+ // Invariants:
247
+ // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
248
+ // - dispatchTable[dispatch_key] does not exist if and only if
249
+ // kernels_[dispatch_key] does not exist
250
+ // - If kernels_[dispatch_key] exists, then it has elements.
251
+ // It is never an empty list.
252
+ //
253
+ // Why do we do that?
254
+ // -----
255
+ // We mostly do this to enable Jupyter notebooks where a cell registering
256
+ // a kernel could be executed multiple times and the later execution
257
+ // should overwrite the earlier one. Note that this still fails when the
258
+ // function schema changed between the executions, but it works as long
259
+ // as the function schema didn't change. A better solution would be to
260
+ // unload the old extension library from the Jupyter cell when the cell is
261
+ // re-executed and then only allow one kernel here, i.e. error if a kernel
262
+ // is already registered, but that's a lot of effort to implement and
263
+ // currently not high-pri.
264
+ ska::flat_hash_map<DispatchKey,
265
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
266
+ // On mobile, we needn't worry about Jupyter notebooks.
267
+ std::array<AnnotatedKernel, 1>
268
+ #else
269
+ std::list<AnnotatedKernel>
270
+ #endif
271
+ > kernels_;
272
+
273
+ const AnnotatedKernel& missingKernel() const;
274
+ const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
275
+
276
+ // cpp_signature_ stores function signature if any of
277
+ // the kernels was created in a way that allowed us to know the function
278
+ // signature (i.e. by supplying an unboxed C++ kernel function).
279
+ // If this is set, it will be used to check that future kernel
280
+ // registrations match and it will be used in unboxed function calls
281
+ // to verify their arguments against the known function signature.
282
+ struct CppSignatureWithDebug {
283
+ CppSignature signature;
284
+ std::string debug;
285
+ c10::optional<DispatchKey> dispatch_key;
286
+ };
287
+ c10::optional<CppSignatureWithDebug> cpp_signature_;
288
+ c10::optional<CppSignatureWithDebug> sym_cpp_signature_;
289
+
290
+ // A Python custom error handler for OperatorEntry::reportError
291
+ std::unique_ptr<c10::SafePyObject> report_error_callback_;
292
+
293
+ // Whether this operator needs to be observed with RecordFunction
294
+ const bool is_observed_;
295
+
296
+ [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
297
+ const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
298
+ std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
299
+ const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
300
+ ) const;
301
+ // This function re-establishes the invariant that dispatchTable
302
+ // contains the front element from the kernels list for a given runtime dispatch key.
303
+ void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
304
+ // Like above, but also handles alias dispatch keys.
305
+ void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
306
+ // Like above, but for ALL entries in the dispatch table.
307
+ void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
308
+ // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
309
+ const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
310
+ };
311
+
312
+ } // namespace impl
313
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ namespace c10 {
6
+
7
+ enum class AliasAnalysisKind : uint8_t {
8
+ INTERNAL_SPECIAL_CASE,
9
+ CONSERVATIVE, // The most conservative alias analysis type, assumes
10
+ // side-effects. This is the default analysis.
11
+ FROM_SCHEMA,
12
+ PURE_FUNCTION
13
+ };
14
+
15
+ #if !defined(_MSC_VER)
16
+ constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
17
+ #endif
18
+ inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
19
+ return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
20
+ ? "CONSERVATIVE"
21
+ : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
22
+ ? "FROM_SCHEMA"
23
+ : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
24
+ ? "PURE_FUNCTION"
25
+ : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
26
+ ? "INTERNAL_SPECIAL_CASE"
27
+ : "UNKNOWN";
28
+ }
29
+
30
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+
5
+ namespace c10 {
6
+
7
+ class RegistrationHandleRAII final {
8
+ public:
9
+ explicit RegistrationHandleRAII(std::function<void()> onDestruction)
10
+ : onDestruction_(std::move(onDestruction)) {}
11
+
12
+ ~RegistrationHandleRAII() {
13
+ if (onDestruction_) {
14
+ onDestruction_();
15
+ }
16
+ }
17
+
18
+ RegistrationHandleRAII(const RegistrationHandleRAII&) = delete;
19
+ RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete;
20
+
21
+ RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept
22
+ : onDestruction_(std::move(rhs.onDestruction_)) {
23
+ rhs.onDestruction_ = nullptr;
24
+ }
25
+
26
+ RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept {
27
+ onDestruction_ = std::move(rhs.onDestruction_);
28
+ rhs.onDestruction_ = nullptr;
29
+ return *this;
30
+ }
31
+
32
+ private:
33
+ std::function<void()> onDestruction_;
34
+ };
35
+
36
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from enum_tag.h
4
+
5
+ namespace at {
6
+ // Enum of valid tags obtained from the entries in tags.yaml
7
+ enum class Tag {
8
+ core,
9
+ data_dependent_output,
10
+ dynamic_output_shape,
11
+ generated,
12
+ inplace_view,
13
+ needs_fixed_stride_order,
14
+ nondeterministic_bitwise,
15
+ nondeterministic_seeded,
16
+ pointwise,
17
+ pt2_compliant_tag,
18
+ view_copy
19
+ };
20
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+
5
+ #include <utility>
6
+
7
+ namespace c10 {
8
+
9
+ struct EnumType;
10
+ using EnumTypePtr = std::shared_ptr<EnumType>;
11
+ using EnumNameValue = std::pair<std::string, IValue>;
12
+ struct TORCH_API EnumType : public NamedType {
13
+ friend struct Type;
14
+ static const TypeKind Kind = TypeKind::EnumType;
15
+
16
+ static EnumTypePtr create(
17
+ const c10::QualifiedName& qualified_class_name,
18
+ TypePtr value,
19
+ std::vector<EnumNameValue> enum_names_values,
20
+ std::weak_ptr<::torch::jit::CompilationUnit> cu) {
21
+ switch (value->kind()) {
22
+ case TypeKind::IntType:
23
+ case TypeKind::FloatType:
24
+ case TypeKind::StringType:
25
+ return EnumTypePtr(new EnumType(
26
+ qualified_class_name,
27
+ std::move(value),
28
+ std::move(enum_names_values),
29
+ std::move(cu)));
30
+ default:
31
+ AT_ERROR(
32
+ "Cannot create Enum with value type '",
33
+ value->str(),
34
+ "', only int, float and string are supported");
35
+ }
36
+ }
37
+
38
+ std::string str() const override {
39
+ return "Enum<" + annotation_str() + ">";
40
+ }
41
+
42
+ std::string repr_str() const override {
43
+ return str();
44
+ }
45
+
46
+ const TypePtr& getValueType() const {
47
+ return value_type_;
48
+ }
49
+
50
+ bool equals(const Type& rhs) const override {
51
+ if (auto* enum_rhs = rhs.castRaw<EnumType>()) {
52
+ return name().value() == enum_rhs->name().value() &&
53
+ *getValueType() == *(enum_rhs->getValueType()) &&
54
+ this->compilation_unit() == enum_rhs->compilation_unit();
55
+ }
56
+ return false;
57
+ }
58
+
59
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
60
+
61
+ std::shared_ptr<const ::torch::jit::CompilationUnit> compilation_unit()
62
+ const {
63
+ auto cu = cu_.lock();
64
+ return cu;
65
+ }
66
+
67
+ const QualifiedName& qualifiedClassName() const {
68
+ return name().value();
69
+ }
70
+
71
+ at::ArrayRef<TypePtr> containedTypes() const override {
72
+ return value_type_;
73
+ }
74
+
75
+ const at::ArrayRef<EnumNameValue> enumNamesValues() const {
76
+ return enum_names_values_;
77
+ }
78
+
79
+ private:
80
+ EnumType(
81
+ c10::QualifiedName qualified_class_name,
82
+ TypePtr value_type,
83
+ std::vector<EnumNameValue> enum_names_values,
84
+ std::weak_ptr<torch::jit::CompilationUnit> cu)
85
+ : NamedType(TypeKind::EnumType, std::move(qualified_class_name)),
86
+ value_type_(std::move(value_type)),
87
+ enum_names_values_(std::move(enum_names_values)),
88
+ cu_(std::move(cu)) {}
89
+
90
+ std::string annotation_str_impl(
91
+ C10_UNUSED TypePrinter printer = nullptr) const override {
92
+ const auto& n = name().value();
93
+ return n.qualifiedName();
94
+ }
95
+
96
+ TypePtr value_type_;
97
+ std::vector<EnumNameValue> enum_names_values_;
98
+ std::weak_ptr<::torch::jit::CompilationUnit> cu_;
99
+ };
100
+
101
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/qualified_name.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/FunctionRef.h>
8
+
9
+ namespace c10 {
10
+ struct FunctionSchema;
11
+ };
12
+
13
+ namespace at {
14
+ TORCH_API void launch(std::function<void()> func);
15
+ }
16
+
17
+ namespace torch {
18
+ namespace jit {
19
+
20
+ struct Graph;
21
+ struct Code;
22
+
23
+ namespace mobile {
24
+ struct Code;
25
+ }
26
+
27
+ using Stack = std::vector<at::IValue>;
28
+ using Kwargs = std::unordered_map<std::string, at::IValue>;
29
+ struct RecursiveMethodCallError : public std::exception {};
30
+ using TaskLauncher = std::function<void(std::function<void()>)>;
31
+
32
+ TORCH_API void preoptimizeGraph(std::shared_ptr<Graph>& graph, bool disable_autocast=false);
33
+
34
+ // A Function is a pure Graph with no implicit `self` object bound.
35
+ // It contains schema information and the executor that manages the
36
+ // execution of the function. Method is a wrapper around an
37
+ // underlying Function that also provides a `self` object.
38
+ struct TORCH_API Function {
39
+ Function() = default;
40
+ Function(const Function&) = default;
41
+ Function& operator=(const Function&) = default;
42
+ Function(Function&&) noexcept = default;
43
+ Function& operator=(Function&&) noexcept = default;
44
+ virtual c10::string_view doc_string() const {
45
+ static constexpr c10::string_view no_doc_string = "";
46
+ return no_doc_string;
47
+ }
48
+
49
+ virtual bool isGraphFunction() const {
50
+ return false;
51
+ }
52
+
53
+ virtual void run(Stack& stack) = 0;
54
+
55
+ virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync(
56
+ Stack& /*stack*/,
57
+ C10_UNUSED TaskLauncher taskLauncher = at::launch) {
58
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
59
+ return {};
60
+ }
61
+
62
+ at::IValue operator()(
63
+ Stack stack,
64
+ const Kwargs& kwargs = Kwargs()) {
65
+ getSchema().checkAndNormalizeInputs(stack, kwargs);
66
+ run(stack);
67
+ return stack.front();
68
+ }
69
+
70
+ virtual const c10::QualifiedName& qualname() const = 0;
71
+
72
+ const std::string& name() const {
73
+ return qualname().name();
74
+ }
75
+
76
+ // if this isn't yet defined, run its method_creator function
77
+ virtual void ensure_defined() = 0;
78
+
79
+ virtual const c10::FunctionSchema& getSchema() const = 0;
80
+
81
+ virtual size_t num_inputs() const = 0;
82
+
83
+ virtual Function& setSchema(c10::FunctionSchema schema) = 0;
84
+
85
+ // call() defines how different interpreter implementations interacts with
86
+ // Function objects. Basically interpreters need to provide a callback to
87
+ // communicate to Functions what to do if provided a Code object.
88
+ // Alternatively we could design the signature to return an optional Code
89
+ // object, but that requires special handling the null case in interpreter
90
+ // and the fallback behavior is not well defined by interpreter but rather
91
+ // Function themselves, so a callback approach is more reasonable than
92
+ // returning values.
93
+ // If call() returns true, then callback completes successfully, otherwise
94
+ // call() returns false.
95
+
96
+ // Overload for server interpreter, a bailout size is needed for graph executor.
97
+ virtual bool call(Stack&, c10::optional<size_t>, c10::function_ref<void(const Code&)>) {
98
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
99
+ return false;
100
+ }
101
+
102
+ // Overload for mobile interpreter.
103
+ virtual bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) {
104
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
105
+ return false;
106
+ }
107
+
108
+ virtual ~Function() = default;
109
+ };
110
+ } // namespace jit
111
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/core/GradMode.h>
5
+
6
+ namespace at {
7
+ using GradMode = c10::GradMode;
8
+ using AutoGradMode = c10::AutoGradMode;
9
+ using NoGradGuard = c10::NoGradGuard;
10
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <vector>
3
+ #include <cstdint>
4
+ #include <string>
5
+ #include <unordered_map>
6
+ #include <algorithm>
7
+
8
+ #include <c10/macros/Macros.h>
9
+
10
+ #include <ATen/core/aten_interned_strings.h>
11
+ #include <ATen/core/symbol.h>
12
+
13
+ namespace c10 {
14
+
15
+ #define FORALL_NS_SYMBOLS(_) \
16
+ _(namespaces, prim) \
17
+ _(namespaces, prims) \
18
+ _(namespaces, nvprims) \
19
+ _(namespaces, aten) \
20
+ _(namespaces, cuda) \
21
+ _(namespaces, onnx) \
22
+ _(namespaces, attr) \
23
+ _(namespaces, scope) \
24
+ _(namespaces, user) \
25
+ _(namespaces, _caffe2) \
26
+ _(namespaces, dimname) \
27
+ _(namespaces, namespaces) \
28
+ _(prim, Assign) \
29
+ _(prim, BroadcastingChunk) \
30
+ _(prim, BroadcastSizes) \
31
+ _(prim, ReductionSizes) \
32
+ _(prim, Constant) \
33
+ _(prim, ChunkSizes) \
34
+ _(prim, ConstantMKLDNNTensor) \
35
+ _(prim, BroadcastMKLDNNTensors) \
36
+ _(prim, MKLDNNGroup) \
37
+ _(prim, MKLDNNHardSwish) \
38
+ _(prim, MKLDNNHardSigmoid) \
39
+ _(prim, MKLDNNHardTanh) \
40
+ _(prim, MKLDNNClamp) \
41
+ _(prim, StaticRuntimeCopyOuts) \
42
+ _(prim, Drop) \
43
+ _(prim, Eval) \
44
+ _(prim, Expand) /* onnx */ \
45
+ _(prim, FusionGroup) \
46
+ _(prim, CudaFusionGroup) \
47
+ _(prim, CudaFusionGuard) \
48
+ _(prim, oneDNNFusionGroup) \
49
+ _(prim, oneDNNFusionGuard) \
50
+ _(prim, FunctionalGraph) \
51
+ _(prim, add_optional) \
52
+ _(prim, view_copy) \
53
+ _(prim, permute_copy) \
54
+ _(prim, reshape_copy) \
55
+ _(prim, squeeze_copy) \
56
+ _(prim, t_copy) \
57
+ _(prim, transpose_copy) \
58
+ _(prim, unsqueeze_copy) \
59
+ _(prim, flatten_copy) \
60
+ _(prim, expand_copy) \
61
+ _(prim, expand_as_copy) \
62
+ _(prim, DifferentiableGraph) \
63
+ _(prim, TensorExprGroup) \
64
+ _(prim, TensorExprDynamicGroup) \
65
+ _(prim, StaticSubgraph) \
66
+ _(prim, If) \
67
+ _(prim, Jump) /* debug */ \
68
+ _(prim, JumpNZ) /* debug */ \
69
+ _(prim, JumpZ) /* debug */ \
70
+ _(prim, Load) \
71
+ _(prim, Loop) \
72
+ _(prim, Param) \
73
+ _(prim, PackPadded) /* onnx */ \
74
+ _(prim, PadPacked) /* onnx */ \
75
+ _(prim, Placeholder) /* debug */ \
76
+ _(prim, Print) \
77
+ _(prim, EmptyListLiteral) \
78
+ _(prim, LegacyTypedConstructor) \
79
+ _(prim, PythonOp) \
80
+ _(prim, IgnoredPythonOp) \
81
+ _(prim, Reverse) \
82
+ _(prim, Return) \
83
+ _(prim, ReturnStmt) \
84
+ _(prim, BreakStmt) \
85
+ _(prim, ContinueStmt) \
86
+ _(prim, ComprehensionScope) \
87
+ _(prim, Store) \
88
+ _(prim, AutogradZero) \
89
+ _(prim, AutogradAnyNonZero) \
90
+ _(prim, AutogradAllNonZero) \
91
+ _(prim, AutogradAllZero) \
92
+ _(prim, Starred) \
93
+ _(prim, TupleConstruct) \
94
+ _(prim, TupleUnpack) \
95
+ _(prim, TupleIndex) \
96
+ _(prim, TupleSlice) \
97
+ _(prim, ListConstruct) \
98
+ _(prim, ListUnpack) \
99
+ _(prim, DictConstruct) \
100
+ _(prim, ModuleContainerIndex) \
101
+ _(prim, EnumName) \
102
+ _(prim, EnumValue) \
103
+ _(prim, StringIndex) \
104
+ _(prim, NumToTensor) \
105
+ _(prim, Uninitialized) \
106
+ _(prim, VarConcat) \
107
+ _(prim, VarStack) \
108
+ _(prim, With) \
109
+ _(prim, Enter) \
110
+ _(prim, Exit) \
111
+ _(prim, IfThenElse) \
112
+ _(aten, Bool) \
113
+ _(aten, Int) \
114
+ _(aten, FloatImplicit) \
115
+ _(aten, ComplexImplicit) \
116
+ _(aten, IntImplicit) \
117
+ _(aten, ScalarImplicit) \
118
+ _(aten, Float) \
119
+ _(aten, Complex) \
120
+ _(aten, str) \
121
+ _(aten, Delete) \
122
+ _(prim, device) \
123
+ _(prim, dtype) \
124
+ _(prim, layout) \
125
+ _(prim, id) \
126
+ _(prim, requires_grad) \
127
+ _(prim, MakeTestTensor) /* test */ \
128
+ _(prim, AutogradAdd) \
129
+ _(prim, GradOf) \
130
+ _(aten, grad) \
131
+ _(aten, backward) \
132
+ _(prim, Guard) \
133
+ _(prim, BailOut) \
134
+ _(prim, TypeCheck) \
135
+ _(prim, RequiresGradCheck) \
136
+ _(prim, FallbackGraph) \
137
+ _(prim, FusedConcat) \
138
+ _(prim, ConstantChunk) \
139
+ _(prim, MMTreeReduce) \
140
+ _(prim, MMBatchSide) \
141
+ _(prim, list) \
142
+ _(prim, dict) \
143
+ _(prim, min) \
144
+ _(prim, max) \
145
+ _(prim, abs) \
146
+ _(aten, divmod) \
147
+ _(prim, zip) \
148
+ _(prim, enumerate) \
149
+ _(prim, range) \
150
+ _(prim, rangelist) \
151
+ _(prim, isinstance) \
152
+ _(prim, tolist) \
153
+ _(prim, unchecked_cast) \
154
+ _(aten, _grad_sum_to_size) \
155
+ _(aten, _size_if_not_equal) \
156
+ _(aten, _ncf_unsqueeze) \
157
+ _(aten, warn) \
158
+ _(aten, sorted) \
159
+ _(aten, floordiv) \
160
+ _(aten, __range_length) \
161
+ _(aten, __derive_index) \
162
+ _(aten, __round_to_zero_floordiv) \
163
+ _(aten, is_scripting) \
164
+ _(aten, _unwrap_optional) \
165
+ _(prim, fork) \
166
+ _(prim, awaitable) \
167
+ _(prim, forkClosure) \
168
+ _(prim, awaitableClosure) \
169
+ _(prim, awaitable_nowait) \
170
+ _(prim, awaitable_wait) \
171
+ _(prim, RaiseException) \
172
+ _(prim, Closure) \
173
+ _(prim, CreateObject) \
174
+ _(prim, SetAttr) \
175
+ _(prim, GetAttr) \
176
+ _(prim, HasAttr) \
177
+ _(prim, profile) \
178
+ _(prim, profile_ivalue) \
179
+ _(prim, AddStatValue) \
180
+ _(prim, TimePoint) \
181
+ _(prim, CallFunction) \
182
+ _(prim, CallMethod) \
183
+ _(prim, LoopContinuation) \
184
+ _(prim, annotate) \
185
+ _(prim, TracedModuleForward) \
186
+ _(prim, TracedFork) \
187
+ _(prim, TracedAttr) \
188
+ _(prim, rpc_async) \
189
+ _(prim, rpc_sync) \
190
+ _(prim, rpc_remote) \
191
+ _(prim, is_cuda) \
192
+ _(aten, append) \
193
+ _(aten, as_tensor) \
194
+ _(aten, adaptive_avg_pool2d_backward) \
195
+ _(aten, dim) \
196
+ _(aten, format) \
197
+ _(aten, percentFormat) \
198
+ _(aten, __not__) \
199
+ _(aten, __is__) \
200
+ _(aten, __isnot__) \
201
+ _(aten, _ger) \
202
+ _(aten, __getitem__) \
203
+ _(aten, _set_item) \
204
+ _(aten, manual_seed) \
205
+ _(aten, device) \
206
+ _(aten, hash) \
207
+ _(aten, len) \
208
+ _(aten, list) \
209
+ _(aten, dict) \
210
+ _(aten, wait) \
211
+ _(aten, save) \
212
+ _(aten, keys) \
213
+ _(aten, ord) \
214
+ _(aten, chr) \
215
+ _(aten, hex) \
216
+ _(aten, oct) \
217
+ _(aten, clear) \
218
+ _(aten, setdefault) \
219
+ _(aten, bin) \
220
+ _(aten, pop) \
221
+ _(aten, insert) \
222
+ _(aten, tensor) \
223
+ _(prim, unchecked_unwrap_optional) \
224
+ _(aten, __contains__) \
225
+ _(prim, BailoutTemplate) \
226
+ _(prim, grad) \
227
+ _(cuda, _set_device) \
228
+ _(cuda, set_stream) \
229
+ _(cuda, _current_device) \
230
+ _(cuda, synchronize) \
231
+ _(aten, has_torch_function) \
232
+ _(aten, is_autocast_enabled) \
233
+ _(aten, is_autocast_cpu_enabled) \
234
+ _(aten, is_autocast_xla_enabled) \
235
+ FORALL_ATEN_BASE_SYMBOLS(_) \
236
+ _(onnx, Add) \
237
+ _(onnx, Concat) \
238
+ _(onnx, Constant) \
239
+ _(onnx, ConstantFill) \
240
+ _(onnx, Div) \
241
+ _(onnx, GRU) \
242
+ _(onnx, Gather) \
243
+ _(onnx, Gemm) \
244
+ _(onnx, LSTM) \
245
+ _(onnx, MatMul) \
246
+ _(onnx, Min) \
247
+ _(onnx, Max) \
248
+ _(onnx, Mul) \
249
+ _(onnx, Pow) \
250
+ _(onnx, RNN) \
251
+ _(onnx, Shape) \
252
+ _(onnx, Size) \
253
+ _(onnx, Slice) \
254
+ _(onnx, Softmax) \
255
+ _(onnx, Squeeze) \
256
+ _(onnx, Sub) \
257
+ _(onnx, Transpose) \
258
+ _(onnx, Unsqueeze) \
259
+ _(onnx, Loop) \
260
+ _(onnx, If) \
261
+ _(onnx, Reshape) \
262
+ _(onnx, Expand) \
263
+ _(onnx, Equal) \
264
+ _(onnx, Greater) \
265
+ _(onnx, GreaterOrEqual) \
266
+ _(onnx, Less) \
267
+ _(onnx, LessOrEqual) \
268
+ _(onnx, Not) \
269
+ _(aten, ATen) \
270
+ _(onnx, Split) \
271
+ _(onnx, ConstantOfShape) \
272
+ _(onnx, Cast) \
273
+ _(onnx, Mod) \
274
+ _(onnx, Sqrt) \
275
+ _(onnx, SplitToSequence) \
276
+ _(onnx, SequenceAt) \
277
+ _(onnx, SequenceConstruct) \
278
+ _(onnx, SequenceEmpty) \
279
+ _(onnx, SequenceInsert) \
280
+ _(onnx, SequenceErase) \
281
+ _(onnx, ConcatFromSequence) \
282
+ _(onnx, Identity) \
283
+ _(onnx, SoftmaxCrossEntropyLoss) \
284
+ _(onnx, NegativeLogLikelihoodLoss) \
285
+ _(onnx, LogSoftmax) \
286
+ _(onnx, ReduceL1) \
287
+ _(onnx, ReduceL2) \
288
+ _(onnx, Conv) \
289
+ _(onnx, BatchNormalization) \
290
+ _(onnx, ReduceMean) \
291
+ _(onnx, ReduceProd) \
292
+ _(onnx, Relu) \
293
+ _(onnx, Neg) \
294
+ _(onnx, NonZero) \
295
+ _(onnx, Range) \
296
+ _(onnx, Tile) \
297
+ _(onnx, Where) \
298
+ _(onnx, Optional) \
299
+ _(onnx, OptionalGetElement) \
300
+ _(onnx, OptionalHasElement) \
301
+ FORALL_ATTR_BASE_SYMBOLS(_) \
302
+ _(attr, Subgraph) \
303
+ _(attr, ReverseSubgraph) \
304
+ _(attr, f_real_outputs) \
305
+ _(attr, df_input_vjps) \
306
+ _(attr, df_input_captured_inputs) \
307
+ _(attr, df_input_captured_outputs) \
308
+ _(attr, df_output_vjps) \
309
+ _(attr, axes) \
310
+ _(attr, symbolic_shape_inputs) \
311
+ _(attr, allow_stack_outputs) \
312
+ _(attr, striding_inputs_desc) \
313
+ _(attr, striding_outputs_desc) \
314
+ _(attr, broadcast) \
315
+ _(attr, direction) \
316
+ _(attr, ends) \
317
+ _(attr, inplace) \
318
+ _(attr, input_as_shape) \
319
+ _(attr, is_zero) \
320
+ _(attr, num_none) \
321
+ _(attr, num_present) \
322
+ _(attr, perm) \
323
+ _(attr, starts) \
324
+ _(attr, profiled_type) \
325
+ _(attr, transA) \
326
+ _(attr, transB) \
327
+ _(attr, name) \
328
+ _(attr, module) \
329
+ _(attr, beg) \
330
+ _(attr, idx) \
331
+ _(attr, split) \
332
+ _(attr, slot) \
333
+ _(attr, kinds) \
334
+ _(attr, types) \
335
+ _(attr, scope) \
336
+ _(attr, keepdims) \
337
+ _(attr, cache_id) \
338
+ _(attr, new_axis) \
339
+ _(attr, warn_id) \
340
+ _(attr, output_layouts) \
341
+ _(attr, allowzero) \
342
+ _(attr, seen_none) \
343
+ _(attr, overload_name) \
344
+ _(attr, node_stack_idx)
345
+
346
+ enum class _keys : unique_t {
347
+ #define DEFINE_KEY(ns, s) ns##_##s,
348
+ FORALL_NS_SYMBOLS(DEFINE_KEY)
349
+ #undef DEFINE_KEY
350
+ num_symbols
351
+ };
352
+
353
+ #define DEFINE_SYMBOL(ns, s) \
354
+ namespace ns { constexpr Symbol s(static_cast<unique_t>(_keys::ns##_##s)); }
355
+ FORALL_NS_SYMBOLS(DEFINE_SYMBOL)
356
+ #undef DEFINE_SYMBOL
357
+
358
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type.h ADDED
@@ -0,0 +1,2425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/custom_class.h>
4
+ #include <ATen/core/jit_type_base.h>
5
+ #include <ATen/core/TensorBody.h>
6
+ #include <ATen/core/functional.h>
7
+ #include <ATen/core/symbol.h>
8
+ #include <ATen/core/type_factory.h>
9
+ #include <ATen/core/qualified_name.h>
10
+ #include <c10/util/TypeList.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <c10/core/SymFloat.h>
13
+ #include <c10/core/SymBool.h>
14
+ #include <c10/core/Device.h>
15
+
16
+ #include <array>
17
+ #include <memory>
18
+ #include <ostream>
19
+ #include <sstream>
20
+ #include <type_traits>
21
+ #include <utility>
22
+
23
+ namespace torch {
24
+ namespace jit {
25
+ struct Function;
26
+ } // namespace jit
27
+ } // namespace torch
28
+
29
+ namespace c10 {
30
+
31
+ template<class Key, class Value>
32
+ class Dict;
33
+ struct IValue;
34
+ struct FunctionSchema;
35
+ struct NamedType;
36
+ using OptNameList = c10::optional<std::vector<std::string>>;
37
+
38
+ void standardizeVectorForUnion(std::vector<TypePtr>& reference, std::vector<TypePtr>* to_fill);
39
+ void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten);
40
+
41
+ inline bool is_contiguous_strides(
42
+ const IntArrayRef sizes,
43
+ const IntArrayRef strides) {
44
+ int n_dim = static_cast<int>(sizes.size());
45
+ if (n_dim == 0) {
46
+ return true;
47
+ }
48
+
49
+ if (strides[n_dim - 1] != 1) {
50
+ return false;
51
+ }
52
+
53
+ for (int i = n_dim - 2; i >= 0; i--) {
54
+ if (strides[i] != strides[i + 1] * sizes[i + 1]) {
55
+ return false;
56
+ }
57
+ }
58
+ return true;
59
+ }
60
+
61
+ struct AnyType;
62
+ using AnyTypePtr = SingletonTypePtr<AnyType>;
63
+ // Any is the top of the type hierarchy, all other types are subtypes
64
+ // T <: Any, forall T
65
+ struct TORCH_API AnyType : public Type {
66
+ bool equals(const Type& rhs) const override {
67
+ return rhs.kind() == kind();
68
+ }
69
+ std::string str() const override {
70
+ return "Any";
71
+ }
72
+ static const TypeKind Kind = TypeKind::AnyType;
73
+ // global singleton
74
+ static AnyTypePtr get();
75
+
76
+ private:
77
+ AnyType() : Type(TypeKind::AnyType) {}
78
+ };
79
+
80
+ inline std::string toString(const Type& type) {
81
+ return type.str();
82
+ }
83
+
84
+ // Shim for compatibility with code that uses TypePtr.
85
+ inline std::string toString(const TypePtr& typePtr) {
86
+ return toString(*typePtr);
87
+ }
88
+
89
+ inline bool operator!=(const Type& lhs, const Type& rhs) {
90
+ return !(lhs == rhs);
91
+ }
92
+
93
+ // common base for all types that have a single sub element
94
+ // e.g. Future[T], Optional[T], List[T]
95
+ template <TypeKind K, typename T>
96
+ struct SingleElementType : public SharedType {
97
+ static const TypeKind Kind = K;
98
+
99
+ const TypePtr& getElementType() const {
100
+ return elem;
101
+ }
102
+
103
+ bool hasFreeVariables() const override {
104
+ return getElementType()->hasFreeVariables();
105
+ }
106
+
107
+ at::ArrayRef<TypePtr> containedTypes() const override {
108
+ return elem;
109
+ }
110
+
111
+ bool equals(const Type& rhs) const override {
112
+ if (auto rhs_ = rhs.cast<T>()) {
113
+ return *getElementType() == *rhs_->getElementType();
114
+ }
115
+ return false;
116
+ }
117
+
118
+ protected:
119
+ SingleElementType(TypePtr elem) : SharedType(Kind), elem(std::move(elem)) {
120
+ if (!this->elem) {
121
+ throw std::runtime_error(c10::str(
122
+ "Can not create ", typeKindToString(Kind), " with None type"));
123
+ }
124
+ }
125
+
126
+ private:
127
+ TypePtr elem;
128
+ };
129
+
130
+ struct UnionType;
131
+ using UnionTypePtr = std::shared_ptr<UnionType>;
132
+ struct TORCH_API UnionType : public SharedType {
133
+ friend struct Type;
134
+
135
+ static const TypeKind Kind = TypeKind::UnionType;
136
+
137
+ bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override;
138
+
139
+ std::string str() const override;
140
+
141
+ static UnionTypePtr create(std::vector<TypePtr> reference);
142
+
143
+ bool equals(const Type& rhs) const override;
144
+
145
+ bool isUnionType() const override {
146
+ return true;
147
+ }
148
+
149
+ at::ArrayRef<TypePtr> containedTypes() const override {
150
+ return types_;
151
+ }
152
+
153
+ // For testing purposes only
154
+ at::ArrayRef<TypePtr> getTypes() const {
155
+ return types_;
156
+ }
157
+
158
+ TypePtr createWithContained(std::vector<TypePtr> contained_types) const override {
159
+ return create(std::move(contained_types));
160
+ }
161
+
162
+ bool canHoldType(const Type& type) const;
163
+
164
+ bool hasFreeVariables() const override {
165
+ return has_free_variables_;
166
+ }
167
+
168
+ c10::optional<TypePtr> toOptional() const;
169
+
170
+ c10::optional<TypePtr> subtractTypeSet(std::vector<TypePtr>& to_subtract) const;
171
+
172
+ protected:
173
+ explicit UnionType(std::vector<TypePtr> types, TypeKind kind=TypeKind::UnionType);
174
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override;
175
+ std::string unionStr(
176
+ TypePrinter printer = nullptr,
177
+ bool is_annotation_str = false) const;
178
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
179
+ bool has_free_variables_;
180
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
181
+ std::vector<TypePtr> types_;
182
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
183
+ bool can_hold_none_;
184
+
185
+ };
186
+
187
+ struct OptionalType;
188
+ using OptionalTypePtr = std::shared_ptr<OptionalType>;
189
+ // This type represents an optional type. There is one `Optional` for
190
+ // each element type. `Optional[T]` can accept both `T` and
191
+ // `None`(`c10::nullopt` in C++)
192
+ // Subtype hierarchy for Optional:
193
+ // - Optional[T] <: Optional[R] iff T <: R
194
+ // - T <: Optional[R] if T <: R
195
+ // - None <: Optional[T] for all T
196
+ // - Optional[T] == Union[T, None] for all T
197
+ struct TORCH_API OptionalType : public UnionType {
198
+ static OptionalTypePtr create(const TypePtr& contained);
199
+
200
+ static const TypeKind Kind = TypeKind::OptionalType;
201
+
202
+ friend struct Type;
203
+
204
+ bool equals(const Type& rhs) const override;
205
+
206
+ const TypePtr& getElementType() const {
207
+ return contained_;
208
+ }
209
+
210
+ at::ArrayRef<TypePtr> containedTypes() const override {
211
+ return contained_;
212
+ }
213
+
214
+ std::string str() const override {
215
+ std::stringstream ss;
216
+ ss << getElementType()->str() << "?";
217
+ return ss.str();
218
+ }
219
+
220
+ TypePtr createWithContained(
221
+ std::vector<TypePtr> contained_types) const override {
222
+ AT_ASSERT(contained_types.size() == 1);
223
+ return create(contained_types[0]);
224
+ }
225
+
226
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
227
+
228
+ bool isUnionType() const override {
229
+ return true;
230
+ }
231
+
232
+ // common cast Optional[Tensor] for undefined tensor type
233
+ static TypePtr ofTensor();
234
+ //
235
+ // global singleton
236
+ static TypePtr get(TypePtr inner);
237
+
238
+ private:
239
+ explicit OptionalType(const TypePtr& contained);
240
+
241
+ TypePtr contained_;
242
+
243
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
244
+ std::stringstream ss;
245
+ ss << "Optional[" << getElementType()->annotation_str(std::move(printer)) << "]";
246
+ return ss.str();
247
+ }
248
+ };
249
+
250
+ template <typename T>
251
+ inline c10::optional<T> merge_primitive(
252
+ const c10::optional<T>& a,
253
+ const c10::optional<T>& b) {
254
+ if (a.has_value() && b.has_value() && a.value() == b.value()) {
255
+ return a;
256
+ }
257
+ return c10::optional<T>{};
258
+ }
259
+
260
+ // If we see `a + b + c` and know that a, b, and c are the same size and have
261
+ // two dimensions (WxH), then we can generate a fused kernel for them. That
262
+ // fused kernel would likely have indexing math to handling both the W and H
263
+ // dimensions. However, if we knew the WxH dimensions were contiguous, we can
264
+ // pretend like we only have a single dimension, simplifying the indexing logic.
265
+ // This can be performed even if the dimensions are transposed,
266
+ // as long as a, b, and c are transposed in the same way.
267
+ // We'd like to have the compiler be able to do this dimensionality reduction,
268
+ // but simply knowing sizes is not enough.
269
+ // We can extend profiling to also record stride information.
270
+ // Rather than recording specific strides,
271
+ // we can simply order the strides from smallest to largest with
272
+ // `stride_indices` A contiguity marker on the smallest stride (c0) indicates
273
+ // the stride is precisely 1, otherwise a contiguity marker means that $stride_n
274
+ // = size_{n-1}*stride_{n-1}$
275
+ struct TORCH_API Stride {
276
+ Stride() = default;
277
+ Stride(
278
+ const c10::optional<size_t>& stride_index,
279
+ c10::optional<bool> contiguous,
280
+ const c10::optional<size_t>& stride)
281
+ : stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {}
282
+
283
+ bool operator==(const Stride& b) const {
284
+ return stride_index_ == b.stride_index_ && contiguous_ == b.contiguous_ &&
285
+ stride_ == b.stride_;
286
+ }
287
+
288
+ bool isComplete() const {
289
+ return stride_index_ && contiguous_ && stride_;
290
+ }
291
+
292
+ c10::optional<size_t> stride_index_;
293
+ c10::optional<bool> contiguous_;
294
+ c10::optional<size_t> stride_;
295
+ };
296
+
297
+ template <>
298
+ inline c10::optional<Stride> merge_primitive(
299
+ const c10::optional<Stride>& a,
300
+ const c10::optional<Stride>& b) {
301
+ c10::optional<Stride> left = a;
302
+ c10::optional<Stride> right = b;
303
+ if (!left.has_value()) {
304
+ left = {Stride()};
305
+ }
306
+ if (!right.has_value()) {
307
+ right = {Stride()};
308
+ }
309
+
310
+ auto merged_index =
311
+ merge_primitive(left->stride_index_, right->stride_index_);
312
+ auto merged_cont = merge_primitive(left->contiguous_, right->contiguous_);
313
+ auto merged_stride = merge_primitive(left->stride_, right->stride_);
314
+ auto r = Stride(merged_index, merged_cont, merged_stride);
315
+ // normalize
316
+ if (!r.stride_index_.has_value() && !r.contiguous_.has_value() &&
317
+ !r.stride_.has_value()) {
318
+ return c10::optional<Stride>{};
319
+ }
320
+
321
+ return r;
322
+ }
323
+
324
+ struct TORCH_API ShapeSymbol {
325
+ // needed for use in `std::map`
326
+ ShapeSymbol() : value_(-1) {}
327
+ // is this symbol a fixed/static dimension
328
+ bool is_static() const {
329
+ return value_ >= 0;
330
+ };
331
+ bool operator==(const ShapeSymbol& b) const {
332
+ return value_ == b.value_;
333
+ }
334
+ bool operator<(const ShapeSymbol& b) const {
335
+ return value_ < b.value_;
336
+ }
337
+
338
+ static ShapeSymbol fromStaticSize(int64_t val) {
339
+ return ShapeSymbol(val);
340
+ }
341
+ int64_t static_size() const {
342
+ TORCH_CHECK(is_static());
343
+ return value_;
344
+ };
345
+
346
+ int64_t value() const {
347
+ return value_;
348
+ };
349
+
350
+ static ShapeSymbol newSymbol() {
351
+ return fromStaticSize(-static_cast<int64_t>(++num_symbols));
352
+ };
353
+ friend TORCH_API std::ostream& operator<<(
354
+ std::ostream& os,
355
+ const ShapeSymbol& s);
356
+
357
+ private:
358
+ ShapeSymbol(int64_t val) : value_(val) {}
359
+ int64_t value_;
360
+ static std::atomic<size_t> num_symbols;
361
+ };
362
+
363
+ inline ShapeSymbol merge_primitive(
364
+ const ShapeSymbol& a,
365
+ const ShapeSymbol& b) {
366
+ if (a.is_static() && b.is_static() && a == b) {
367
+ return a;
368
+ }
369
+ return ShapeSymbol::newSymbol();
370
+ }
371
+
372
+ // Shape of a Tensor represented with ShapeSymbol's. Unranked, ranked unknown
373
+ // dims, partially known and fully known shapes are all supported.
374
+ struct TORCH_API SymbolicShape {
375
+ // Unranked shape constructor.
376
+ SymbolicShape() : dims_(c10::nullopt) {}
377
+
378
+ // Known rank but unknown dimentions.
379
+ SymbolicShape(c10::optional<size_t> rank) : dims_(c10::nullopt) {
380
+ if(!rank) {
381
+ return;
382
+ }
383
+
384
+ std::vector<ShapeSymbol> shape_symbols;
385
+ shape_symbols.reserve(*rank);
386
+ for(size_t i = 0; i < *rank; ++i) {
387
+ shape_symbols.push_back(ShapeSymbol::newSymbol());
388
+ }
389
+ dims_ = shape_symbols;
390
+ }
391
+
392
+ // Mix of known and unknown ranks
393
+ SymbolicShape(const std::vector<c10::optional<int64_t>>& dims) {
394
+ std::vector<ShapeSymbol> shape_symbols;
395
+ shape_symbols.reserve(dims.size());
396
+ for(c10::optional<int64_t> dim: dims) {
397
+ if(!dim) {
398
+ shape_symbols.push_back(ShapeSymbol::newSymbol());
399
+ } else {
400
+ shape_symbols.push_back(ShapeSymbol::fromStaticSize(*dim));
401
+ }
402
+ }
403
+ dims_ = shape_symbols;
404
+ }
405
+
406
+ void dump() const;
407
+
408
+ SymbolicShape(std::vector<ShapeSymbol> dims) : dims_(std::move(dims)) {}
409
+
410
+ SymbolicShape(c10::IntArrayRef dims) {
411
+ std::vector<ShapeSymbol> shape_symbols;
412
+ shape_symbols.reserve(dims.size());
413
+ for(int64_t dim : dims) {
414
+ shape_symbols.push_back(ShapeSymbol::fromStaticSize(dim));
415
+ }
416
+ dims_ = shape_symbols;
417
+ }
418
+
419
+ ShapeSymbol operator[](size_t i) const {
420
+ if (!dims_) {
421
+ throw std::runtime_error("Rank isn't fixed");
422
+ }
423
+ return (*dims_).at(i);
424
+ }
425
+
426
+ ShapeSymbol at(size_t i) const {
427
+ if (!dims_) {
428
+ throw std::runtime_error("Rank isn't fixed");
429
+ }
430
+ return (*dims_).at(i);
431
+ }
432
+
433
+ // Returns rank or nullopt in case of unranked shape.
434
+ c10::optional<size_t> rank() const {
435
+ if(!dims_) {
436
+ return c10::nullopt;
437
+ }
438
+ return dims_->size();
439
+ }
440
+
441
+ c10::optional<std::vector<ShapeSymbol>> sizes() const {
442
+ return dims_;
443
+ }
444
+
445
+ c10::optional<std::vector<bool>> symbolicDims() const {
446
+ if (!dims_) {
447
+ return c10::nullopt;
448
+ }
449
+ auto symbolic_dims = std::vector<bool>();
450
+ for (const ShapeSymbol& s : *dims_) {
451
+ symbolic_dims.push_back(!s.is_static());
452
+ }
453
+ return symbolic_dims;
454
+ }
455
+
456
+ // Checks whether the shape is fully defined/complete, ie. rank and sizes
457
+ // of every dimension are known.
458
+ bool isComplete() const {
459
+ if(!dims_) {
460
+ return false;
461
+ }
462
+ for(auto d : *dims_) {
463
+ if(!d.is_static()) {
464
+ return false;
465
+ }
466
+ }
467
+ return true;
468
+ }
469
+
470
+ // Create new SymbolicShape that is result of merging self and another
471
+ // SymbolicShape. Only dimensions that are static and equal will be
472
+ // preserved.
473
+ // If either of two shapes are of unknown rank or they have unmatching rank,
474
+ // result will be unranked.
475
+ SymbolicShape merge(const SymbolicShape& other) const;
476
+
477
+ friend bool operator==(const SymbolicShape& lhs, const SymbolicShape& rhs) {
478
+ return lhs.dims_ == rhs.dims_;
479
+ }
480
+
481
+ friend bool operator!=(const SymbolicShape& lhs, const SymbolicShape& rhs) {
482
+ return !(lhs == rhs);
483
+ }
484
+
485
+ private:
486
+ c10::optional<std::vector<ShapeSymbol>> dims_;
487
+ };
488
+
489
+ namespace detail {
490
+ inline bool isComplete(const Stride& s) {
491
+ return s.isComplete();
492
+ }
493
+
494
+ template<typename T>
495
+ inline bool isComplete(const T& /*t*/) {
496
+ return true;
497
+ }
498
+ }
499
+
500
+ template <typename T>
501
+ struct VaryingShape {
502
+ using ListOfOptionalElements = std::vector<c10::optional<T>>;
503
+ VaryingShape(const std::vector<T>& vec)
504
+ : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
505
+
506
+ VaryingShape(c10::ArrayRef<T> vec)
507
+ : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
508
+
509
+ VaryingShape(c10::optional<size_t> size = c10::nullopt) : dims_(c10::nullopt) {
510
+ if (size) {
511
+ dims_ = ListOfOptionalElements(*size);
512
+ }
513
+ }
514
+
515
+ VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {}
516
+
517
+ VaryingShape(size_t size) : VaryingShape(c10::optional<size_t>(size)) {}
518
+
519
+ bool operator==(const VaryingShape& other) const {
520
+ return dims_ == other.dims_;
521
+ }
522
+
523
+ const c10::optional<T> &operator[](size_t i) const {
524
+ if (!dims_) {
525
+ throw std::runtime_error("Rank isn't fixed");
526
+ }
527
+ return (*dims_).at(i);
528
+ }
529
+
530
+ c10::optional<size_t> size() const {
531
+ if (!dims_) {
532
+ return c10::nullopt;
533
+ }
534
+ const auto& dims = dims_.value();
535
+ return dims.size();
536
+ }
537
+
538
+ const c10::optional<ListOfOptionalElements>& sizes() const {
539
+ return dims_;
540
+ }
541
+
542
+ TORCH_API VaryingShape merge(const VaryingShape& other) const;
543
+
544
+ c10::optional<std::vector<T>> concrete_sizes() const {
545
+ if (!dims_) {
546
+ return c10::nullopt;
547
+ }
548
+ std::vector<T> sizes;
549
+ sizes.reserve(dims_.value().size());
550
+ for (auto d : *dims_) {
551
+ if (!d) {
552
+ return c10::nullopt;
553
+ }
554
+ sizes.push_back(d.value());
555
+ }
556
+ return sizes;
557
+ }
558
+
559
+ bool isComplete() const {
560
+ if (!dims_) {
561
+ return false;
562
+ }
563
+ for (auto d : *dims_) {
564
+ if (!d || !detail::isComplete(*d)) {
565
+ return false;
566
+ }
567
+ }
568
+ return true;
569
+ }
570
+
571
+ private:
572
+ c10::optional<ListOfOptionalElements> dims_;
573
+ };
574
+
575
+ struct TensorType;
576
+ // TODO: investigate making this SingletonOrSharedTypePtr<TensorType>
577
+ using TensorTypePtr = std::shared_ptr<TensorType>;
578
+ // This type represents a single Tensor with a specific size
579
+ struct TORCH_API TensorType : public SharedType {
580
+ static TensorTypePtr create(const at::Tensor& t);
581
+
582
+ // used by TensorType::create(size_t dim) which in turn used by
583
+ // shape_analysis.cpp
584
+ static TensorTypePtr create(
585
+ c10::optional<at::ScalarType> scalar_type,
586
+ c10::optional<Device> device,
587
+ const VaryingShape<int64_t>& sizes,
588
+ const VaryingShape<int64_t>& strides,
589
+ c10::optional<bool> requires_grad,
590
+ c10::optional<bool> undefined = false,
591
+ bool tensor_contiguity = false);
592
+
593
+ static TensorTypePtr create(
594
+ c10::optional<at::ScalarType> scalar_type,
595
+ c10::optional<Device> device,
596
+ const SymbolicShape& sizes,
597
+ const VaryingShape<Stride>& stride_,
598
+ c10::optional<bool> requires_grad,
599
+ c10::optional<bool> undefined = false);
600
+
601
+ static TensorTypePtr create(
602
+ c10::optional<at::ScalarType> scalar_type,
603
+ c10::optional<Device> device,
604
+ c10::optional<size_t> dim,
605
+ c10::optional<bool> requires_grad);
606
+
607
+ // overloaded create variadic template argument as it could not distinguish
608
+ // initializer list
609
+ static TensorTypePtr createContiguous(
610
+ at::ScalarType scalar_type,
611
+ at::Device device,
612
+ at::IntArrayRef sizes);
613
+
614
+ static TypePtr fromNumberType(const Type& typ);
615
+ static TypePtr fromBoolType();
616
+
617
+ c10::optional<size_t> dim() const {
618
+ return sizes().size();
619
+ }
620
+
621
+ VaryingShape<int64_t> sizes() const;
622
+
623
+ VaryingShape<int64_t> strides() const;
624
+
625
+ const VaryingShape<Stride>& stride_properties() const {
626
+ return strides_;
627
+ }
628
+
629
+ c10::optional<at::Device> device() const {
630
+ return device_;
631
+ }
632
+ c10::optional<at::ScalarType> scalarType() const {
633
+ return scalar_type_;
634
+ }
635
+ c10::optional<bool> requiresGrad() const {
636
+ return requires_grad_;
637
+ }
638
+ bool requires_grad() const override {
639
+ return requires_grad_ ? *requires_grad_ : true;
640
+ }
641
+
642
+ bool equals(const Type& rhs) const override;
643
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
644
+
645
+ std::string str() const override;
646
+
647
+ std::string repr_str() const override {
648
+ if (isInferredType()) {
649
+ return str() + " (inferred)";
650
+ } else {
651
+ return str();
652
+ }
653
+ }
654
+
655
+ c10::optional<size_t> numel() const {
656
+ size_t prod = 1;
657
+ const auto& shape = sizes();
658
+
659
+ for (size_t i = 0; i < shape.size(); i++) {
660
+ if (!shape[i]) {
661
+ return c10::optional<size_t>{};
662
+ }
663
+ prod *= shape[i].value();
664
+ }
665
+ return prod;
666
+ }
667
+
668
+ TensorTypePtr withRequiresGrad(c10::optional<bool> s) {
669
+ auto copy = clone();
670
+ copy->requires_grad_ = s;
671
+ return copy;
672
+ }
673
+
674
+ TensorTypePtr withScalarType(c10::optional<ScalarType> st) {
675
+ auto copy = clone();
676
+ copy->scalar_type_ = st;
677
+ return copy;
678
+ }
679
+
680
+ TensorTypePtr withDim(c10::optional<size_t> d) {
681
+ auto copy = clone();
682
+ // withDim is only used by the legacy executor
683
+ // that only cares about the rank, so create dummy symbols)) :
684
+ copy->sizes_ = SymbolicShape(d);
685
+ copy->strides_ = VaryingShape<Stride>(d);
686
+ return copy;
687
+ }
688
+
689
+ TensorTypePtr withStrides(VaryingShape<Stride> sstrides) const {
690
+ auto cloned = clone();
691
+ cloned->strides_ = std::move(sstrides);
692
+ return cloned;
693
+ }
694
+
695
+ TensorTypePtr withSizesStrides(
696
+ at::IntArrayRef sizes,
697
+ at::IntArrayRef strides) const {
698
+ auto cloned = clone();
699
+ auto ssizes = SymbolicShape(sizes);
700
+ cloned->sizes_ = ssizes;
701
+ cloned->strides_ = computeStrideProps(sizes, strides);
702
+ return cloned;
703
+ }
704
+
705
+ TensorTypePtr withSymbolicShapes(SymbolicShape ssizes) const {
706
+ auto cloned = clone();
707
+ cloned->sizes_ = std::move(ssizes);
708
+ return cloned;
709
+ }
710
+
711
+ TensorTypePtr withSizes(at::IntArrayRef sizes) const {
712
+ return withSizesStrides(
713
+ sizes, contiguousStridesOf(sizes));
714
+ }
715
+
716
+ TensorTypePtr withDevice(const c10::optional<at::Device> device) const {
717
+ auto copy = clone();
718
+ copy->device_ = device;
719
+ return copy;
720
+ }
721
+
722
+ TensorTypePtr dimensionedOnly() const {
723
+ auto copy = clone();
724
+ copy->sizes_ = SymbolicShape(sizes().size());
725
+ copy->strides_ = VaryingShape<Stride>(sizes().size());
726
+ return copy;
727
+ }
728
+
729
+ TensorTypePtr contiguous() const {
730
+ auto cloned = clone();
731
+ TORCH_INTERNAL_ASSERT(sizes().concrete_sizes().has_value());
732
+ auto strides = computeStrideProps(
733
+ *sizes().concrete_sizes(),
734
+ contiguousStridesOf(*sizes().concrete_sizes()));
735
+ cloned->strides_ = strides;
736
+ return cloned;
737
+ }
738
+
739
+ const SymbolicShape& symbolic_sizes() const;
740
+
741
+ TensorTypePtr merge(const TensorType& other, bool merge_sizes = true) const;
742
+
743
+ bool matchTensor(const at::Tensor& t);
744
+
745
+ // is all information about the type specified except for autograd?
746
+ // This replaces the notion of a 'CompleteTensorType' that used to exist
747
+ // in the type-hierarchy. Excluding require_grad and undefined allows
748
+ // this to match the old behavior.
749
+ bool isComplete() const {
750
+ return scalar_type_ && device_ && sizes_.isComplete() && strides_.isComplete();
751
+ }
752
+
753
+ bool isInferredType() const {
754
+ return is_inferred_;
755
+ }
756
+
757
+ static TensorTypePtr getInferred() {
758
+ static auto valueInferred = TensorType::create(
759
+ /*scalar_type=*/{},
760
+ /*device=*/{},
761
+ /*sizes=*/SymbolicShape(),
762
+ /*stride=*/VaryingShape<Stride>{},
763
+ /*requires_grad=*/{},
764
+ /*undefined=*/false);
765
+ valueInferred->is_inferred_ = true;
766
+ return valueInferred;
767
+ }
768
+
769
+ // this property is used by GuardElimination
770
+ // please see `checkInputs` for more details
771
+ bool isSummarized() const {
772
+ return !(isComplete() && requiresGrad().has_value() &&
773
+ undefined().has_value());
774
+ }
775
+
776
+ TensorTypePtr withUndefined() {
777
+ auto r = clone();
778
+ r->undefined_ = true;
779
+ return r;
780
+ }
781
+
782
+ TensorTypePtr withPossiblyUndefined() {
783
+ auto r = clone();
784
+ r->undefined_ = c10::nullopt;
785
+ return r;
786
+ }
787
+
788
+ c10::optional<bool> undefined() const { return undefined_; }
789
+
790
+ static const TensorTypePtr& get();
791
+
792
+ static const TypeKind Kind = TypeKind::TensorType;
793
+
794
+ static std::vector<int64_t> contiguousStridesOf(
795
+ at::IntArrayRef in_sizes,
796
+ at::MemoryFormat memory_format = MemoryFormat::Contiguous) {
797
+ auto contiguous_fn = [](const at::IntArrayRef& sizes,
798
+ const std::vector<int64_t>& dim_order) {
799
+ std::vector<int64_t> strides(sizes.size());
800
+ if (sizes.empty()) // zero-dim case
801
+ return strides;
802
+
803
+ strides[dim_order[0]] = 1;
804
+ for (size_t i = 1; i < dim_order.size(); i++) {
805
+ auto cur_dim = dim_order[i];
806
+ auto pre_dim = dim_order[i - 1];
807
+ strides[cur_dim] = strides[pre_dim] * sizes[pre_dim];
808
+ }
809
+ return strides;
810
+ };
811
+
812
+ std::vector<int64_t> dim_order(in_sizes.size());
813
+ if (memory_format == MemoryFormat::ChannelsLast) {
814
+ dim_order = {1, 3, 2, 0};
815
+ } else if (memory_format == MemoryFormat::ChannelsLast3d) {
816
+ dim_order = {1, 4, 3, 2, 0};
817
+ } else {
818
+ auto ndims = in_sizes.size();
819
+ for (size_t i = 0; i < ndims; i++) {
820
+ dim_order[i] = static_cast<int64_t>(ndims - i - 1); // Reverse
821
+ }
822
+ }
823
+ return contiguous_fn(in_sizes, dim_order);
824
+ }
825
+
826
+ private:
827
+ TensorType(
828
+ c10::optional<at::ScalarType> scalar_type,
829
+ c10::optional<Device> device,
830
+ SymbolicShape sizes,
831
+ VaryingShape<Stride> strides,
832
+ c10::optional<bool> requires_grad,
833
+ c10::optional<bool> undefined = false);
834
+
835
+ TensorTypePtr clone() const {
836
+ return TensorTypePtr(new TensorType(
837
+ scalar_type_, device_, sizes_, strides_, requires_grad_, undefined_));
838
+ }
839
+
840
+ static VaryingShape<Stride> computeStrideProps(
841
+ at::IntArrayRef sizes,
842
+ at::IntArrayRef strides,
843
+ bool tensor_contiguity = false);
844
+
845
+ c10::optional<at::ScalarType> scalar_type_;
846
+ c10::optional<at::Device> device_;
847
+ SymbolicShape sizes_;
848
+ VaryingShape<Stride> strides_;
849
+ c10::optional<bool> requires_grad_;
850
+ // we exploit the fact certain tensors must be zero in the autograd to
851
+ // optimize gradient computation. Such zero tensors are currently implemented
852
+ // with `UndefinedTensorImpl.` They can be handled only by special operators
853
+ // (e.g. `AutogradAdd`) and their `Tensor::defined()` property returns false.
854
+ // Normally, `undefined_` is set to false, unless a type was created
855
+ // with `withUndefined`
856
+ // This will also mean that `undefined` tensors will fail
857
+ // `subtypeOf(TensorType::get())` check
858
+ // undefined_ may become `c10::nullopt` if the tensor was observed to be both
859
+ // defined and undefined. However, no tensor type starts out with
860
+ // `undefined_` set to `c10::nullopt`
861
+ c10::optional<bool> undefined_;
862
+ // Represents whether or not this type was inferred.
863
+ bool is_inferred_ = false;
864
+ };
865
+
866
+ struct ListType;
867
+ using ListTypePtr = std::shared_ptr<ListType>;
868
+ struct TORCH_API ListType
869
+ : public SingleElementType<TypeKind::ListType, ListType> {
870
+ // It's not exactly a singleton, but there should be exactly one instance of
871
+ // List[T] for every T
872
+ friend struct Type;
873
+ template <typename... T>
874
+ static ListTypePtr create(T&&... all) {
875
+ return ListTypePtr(
876
+ new ListType(std::forward<T>(all)...)); // NOLINT(modernize-make-shared)
877
+ }
878
+
879
+ std::string str() const override {
880
+ std::stringstream ss;
881
+ ss << getElementType()->str() << "[]";
882
+ return ss.str();
883
+ }
884
+ TypePtr createWithContained(
885
+ std::vector<TypePtr> contained_types) const override {
886
+ return create(std::move(contained_types.at(0)));
887
+ }
888
+
889
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
890
+
891
+ // global singleton
892
+ // Given an inner type T and an identifier,
893
+ // this function wil return the global singleton type pointer
894
+ // the type List<T>.
895
+ // The extra "identifier" argument is needed beccause we have multiple container types
896
+ // that all re-use this function (List<T>, array<T, N>, etc.)
897
+ static TypePtr get(const std::string& identifier, TypePtr inner);
898
+
899
+ // common cast List[Tensor]
900
+ static ListTypePtr ofTensors();
901
+ static ListTypePtr ofOptionalTensors();
902
+ static ListTypePtr ofInts();
903
+ static ListTypePtr ofSymInts();
904
+ static ListTypePtr ofFloats();
905
+ static ListTypePtr ofComplexDoubles();
906
+ static ListTypePtr ofBools();
907
+ static ListTypePtr ofStrings();
908
+ static ListTypePtr ofNumbers();
909
+
910
+ private:
911
+ ListType(TypePtr elem) : SingleElementType(std::move(elem)) {}
912
+
913
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
914
+ std::stringstream ss;
915
+ ss << "List[" << getElementType()->annotation_str(std::move(printer)) << "]";
916
+ return ss.str();
917
+ }
918
+ };
919
+
920
+ struct DictType;
921
+ using DictTypePtr = std::shared_ptr<DictType>;
922
+ struct TORCH_API DictType : public SharedType {
923
+ friend struct Type;
924
+ static const TypeKind Kind = TypeKind::DictType;
925
+
926
+ static DictTypePtr create(TypePtr key, TypePtr value) {
927
+ auto kind = key->kind();
928
+ if (auto dyn = key->castRaw<DynamicType>()) {
929
+ kind = dyn->dynamicKind();
930
+ }
931
+ switch (kind) {
932
+ case TypeKind::AnyType:
933
+ case TypeKind::IntType:
934
+ case TypeKind::BoolType:
935
+ case TypeKind::FloatType:
936
+ case TypeKind::ComplexType:
937
+ case TypeKind::StringType:
938
+ case TypeKind::TensorType:
939
+ case TypeKind::DeviceObjType:
940
+ return DictTypePtr(new DictType(std::move(key), std::move(value)));
941
+ default:
942
+ AT_ERROR(
943
+ "Cannot create dict for key type '",
944
+ key->str(),
945
+ "', only int, float, complex, Tensor, device and string keys are supported");
946
+ }
947
+ }
948
+
949
+ // aligned with the format in FunctionSchema
950
+ std::string str() const override {
951
+ std::stringstream ss;
952
+ ss << "Dict(" << getKeyType()->str() << ", " << getValueType()->str()
953
+ << ")";
954
+ return ss.str();
955
+ }
956
+
957
+ TypePtr createWithContained(
958
+ std::vector<TypePtr> contained_types) const override {
959
+ if (contained_types.size() != 2) {
960
+ throw std::runtime_error("Expected 2 contained types");
961
+ }
962
+ return create(std::move(contained_types.at(0)), std::move(contained_types.at(1)));
963
+ }
964
+
965
+ const TypePtr& getKeyType() const {
966
+ return types.at(0);
967
+ }
968
+
969
+ const TypePtr& getValueType() const {
970
+ return types.at(1);
971
+ }
972
+
973
+ bool hasFreeVariables() const override {
974
+ return has_free_variables;
975
+ }
976
+
977
+ at::ArrayRef<TypePtr> containedTypes() const override {
978
+ return types;
979
+ }
980
+
981
+ bool equals(const Type& rhs) const override {
982
+ if (auto* dict_rhs = rhs.castRaw<DictType>()) {
983
+ return *getKeyType() == *(dict_rhs->getKeyType()) &&
984
+ *getValueType() == *(dict_rhs->getValueType());
985
+ }
986
+ return false;
987
+ }
988
+
989
+ // global singleton
990
+ // Given an inner type T and an identifier,
991
+ // this function will return the global singleton type pointer
992
+ // the type List<T>.
993
+ // The extra "identifier" argument is needed because we have multiple container types
994
+ // that all re-use this function (Dict<K, V> and unordered_map<K, V>)
995
+ static TypePtr get(const std::string& identifier, TypePtr key, TypePtr val);
996
+
997
+ private:
998
+ DictType(TypePtr key, TypePtr value)
999
+ : SharedType(TypeKind::DictType),
1000
+ has_free_variables(
1001
+ key->hasFreeVariables() || value->hasFreeVariables()) {
1002
+ types.reserve(2);
1003
+ types.push_back(std::move(key));
1004
+ types.push_back(std::move(value));
1005
+ }
1006
+
1007
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override;
1008
+
1009
+ std::vector<TypePtr> types;
1010
+ bool has_free_variables;
1011
+ };
1012
+
1013
+ struct FutureType;
1014
+ using FutureTypePtr = std::shared_ptr<FutureType>;
1015
+
1016
+ struct TORCH_API FutureType
1017
+ : public SingleElementType<TypeKind::FutureType, FutureType> {
1018
+ friend struct Type;
1019
+ template <typename... T>
1020
+ static FutureTypePtr create(TypePtr elem) {
1021
+ return FutureTypePtr(
1022
+ new FutureType(std::move(elem))); // NOLINT(modernize-make-shared)
1023
+ }
1024
+
1025
+ std::string str() const override {
1026
+ std::stringstream ss;
1027
+ ss << "Future(" << getElementType()->str() << ")";
1028
+ return ss.str();
1029
+ }
1030
+ TypePtr createWithContained(
1031
+ std::vector<TypePtr> contained_types) const override {
1032
+ return create(std::move(contained_types.at(0)));
1033
+ }
1034
+
1035
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override {
1036
+ if (Type::isSubtypeOfExt(rhs, why_not)) {
1037
+ return true;
1038
+ }
1039
+ if (auto rhs_ = rhs.castRaw<FutureType>()) {
1040
+ return getElementType()->isSubtypeOfExt(*rhs_->getElementType(), why_not);
1041
+ }
1042
+ return false;
1043
+ }
1044
+
1045
+ private:
1046
+ FutureType(TypePtr elem) : SingleElementType(std::move(elem)) {}
1047
+
1048
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1049
+ std::stringstream ss;
1050
+ ss << "Future[" << getElementType()->annotation_str(std::move(printer)) << "]";
1051
+ return ss.str();
1052
+ }
1053
+ };
1054
+
1055
+ struct AwaitType;
1056
+ using AwaitTypePtr = std::shared_ptr<AwaitType>;
1057
+
1058
+ struct TORCH_API AwaitType
1059
+ : public SingleElementType<TypeKind::AwaitType, AwaitType> {
1060
+ friend struct Type;
1061
+ template <typename... T>
1062
+ static AwaitTypePtr create(TypePtr elem) {
1063
+ return AwaitTypePtr(
1064
+ new AwaitType(std::move(elem))); // NOLINT(modernize-make-shared)
1065
+ }
1066
+
1067
+ std::string str() const override {
1068
+ std::stringstream ss;
1069
+ ss << "Await(" << getElementType()->str() << ")";
1070
+ return ss.str();
1071
+ }
1072
+ TypePtr createWithContained(
1073
+ std::vector<TypePtr> contained_types) const override {
1074
+ return create(std::move(contained_types.at(0)));
1075
+ }
1076
+
1077
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override {
1078
+ if (Type::isSubtypeOfExt(rhs, why_not)) {
1079
+ return true;
1080
+ }
1081
+ if (auto rhs_ = rhs.castRaw<AwaitType>()) {
1082
+ return getElementType()->isSubtypeOfExt(*rhs_->getElementType(), why_not);
1083
+ }
1084
+ return false;
1085
+ }
1086
+
1087
+ private:
1088
+ AwaitType(TypePtr elem) : SingleElementType(std::move(elem)) {}
1089
+
1090
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1091
+ std::stringstream ss;
1092
+ ss << "Await[" << getElementType()->annotation_str(printer) << "]";
1093
+ return ss.str();
1094
+ }
1095
+ };
1096
+
1097
+ struct RRefType;
1098
+ using RRefTypePtr = std::shared_ptr<RRefType>;
1099
+
1100
+ struct TORCH_API RRefType
1101
+ : public SingleElementType<TypeKind::RRefType, RRefType> {
1102
+ friend struct Type;
1103
+ template <typename... T>
1104
+ static RRefTypePtr create(TypePtr elem) {
1105
+ return RRefTypePtr(
1106
+ new RRefType(std::move(elem))); // NOLINT(modernize-make-shared)
1107
+ }
1108
+
1109
+ std::string str() const override {
1110
+ std::stringstream ss;
1111
+ ss << "RRef(" << getElementType()->str() << ")";
1112
+ return ss.str();
1113
+ }
1114
+ TypePtr createWithContained(
1115
+ std::vector<TypePtr> contained_types) const override {
1116
+ return create(std::move(contained_types.at(0)));
1117
+ }
1118
+
1119
+ private:
1120
+ RRefType(TypePtr elem) : SingleElementType(std::move(elem)) {}
1121
+
1122
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1123
+ std::stringstream ss;
1124
+ ss << "RRef[" << getElementType()->annotation_str(std::move(printer)) << "]";
1125
+ return ss.str();
1126
+ }
1127
+ };
1128
+
1129
+ // Any should never appear in a named type like a class, namedtuple or
1130
+ // interface. If it does, then dynamic type information will be lost in the
1131
+ // Pickler, leading to hard-to-track-down bugs that will only occur
1132
+ // after saving or loading a model. This is because we rely on the
1133
+ // static types in named types to reconstruct type tags of loaded
1134
+ // values. Lifting this restriction requires solving the serialization
1135
+ // problem first.
1136
+ TORCH_API void checkNoAny(
1137
+ const Type& base,
1138
+ const char* what,
1139
+ const std::string& attrname,
1140
+ const TypePtr& attrtype);
1141
+
1142
+ struct TupleType;
1143
+ using TupleTypePtr = std::shared_ptr<TupleType>;
1144
+ using NameList = std::vector<std::string>;
1145
+ // This type represents a Tuple
1146
+ struct TORCH_API TupleType : public NamedType {
1147
+
1148
+ static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
1149
+ const std::vector<std::string>& field_names,
1150
+ const std::vector<TypePtr>& field_types,
1151
+ std::vector<IValue>& field_defaults);
1152
+
1153
+ static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
1154
+ const std::vector<std::string>& field_names,
1155
+ const std::vector<TypePtr>& field_types);
1156
+
1157
+ static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
1158
+ const std::vector<c10::string_view>& field_names,
1159
+ const std::vector<TypePtr>& field_types);
1160
+
1161
+ static TupleTypePtr create(
1162
+ std::vector<TypePtr> types) {
1163
+ return TupleTypePtr(new TupleType(
1164
+ std::move(types),
1165
+ c10::nullopt,
1166
+ nullptr)); // NOLINT(modernize-make-shared)
1167
+ }
1168
+ static TupleTypePtr create() {
1169
+ return create({});
1170
+ }
1171
+
1172
+ at::ArrayRef<TypePtr> elements() const {
1173
+ return elements_;
1174
+ }
1175
+
1176
+ bool equals(const Type& rhs) const override;
1177
+ bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override;
1178
+
1179
+ std::string str() const override;
1180
+ bool hasFreeVariables() const override {
1181
+ return has_free_variables_;
1182
+ }
1183
+ at::ArrayRef<TypePtr> containedTypes() const override {
1184
+ return elements_;
1185
+ }
1186
+ TypePtr createWithContained(
1187
+ std::vector<TypePtr> contained_types) const override {
1188
+ return std::shared_ptr<TupleType>(
1189
+ new TupleType(std::move(contained_types), name(), schema()));
1190
+ }
1191
+ const std::shared_ptr<FunctionSchema>& schema() const {
1192
+ return schema_;
1193
+ }
1194
+ c10::optional<std::vector<c10::string_view>> names() const;
1195
+
1196
+ static const TypeKind Kind = TypeKind::TupleType;
1197
+
1198
+ private:
1199
+ template <typename S>
1200
+ static TupleTypePtr createWithSpec(
1201
+ const c10::optional<c10::QualifiedName>& name,
1202
+ const std::vector<S>& field_names,
1203
+ const std::vector<TypePtr>& field_types,
1204
+ std::vector<IValue>& field_defaults);
1205
+
1206
+ TupleType(
1207
+ std::vector<TypePtr> elements_,
1208
+ c10::optional<c10::QualifiedName> name,
1209
+ std::shared_ptr<FunctionSchema> schema);
1210
+
1211
+ bool compare(
1212
+ const Type& rhs,
1213
+ const std::function<bool(const Type&, const Type&)>& fn) const {
1214
+ if (rhs.kind() != kind()) {
1215
+ return false;
1216
+ }
1217
+
1218
+ const auto& l_elements = elements();
1219
+ const auto& r_elements = rhs.castRaw<TupleType>()->elements();
1220
+ if (l_elements.size() != r_elements.size())
1221
+ return false;
1222
+ for (size_t i = 0; i < l_elements.size(); ++i) {
1223
+ if (!fn(*l_elements[i], *r_elements[i]))
1224
+ return false;
1225
+ }
1226
+ return true;
1227
+ }
1228
+
1229
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override;
1230
+
1231
+ std::vector<TypePtr> elements_;
1232
+ bool has_free_variables_;
1233
+ std::shared_ptr<FunctionSchema> schema_;
1234
+ };
1235
+
1236
+ // the common supertype of all Enums, only used in operator registraion.
1237
+ // EnumType <: AnyEnumType for all Enums
1238
+ struct AnyEnumType;
1239
+ using AnyEnumTypePtr = SingletonTypePtr<AnyEnumType>;
1240
+ struct TORCH_API AnyEnumType final : public Type {
1241
+ bool equals(const Type& rhs) const override {
1242
+ return rhs.kind() == kind();
1243
+ }
1244
+ std::string str() const override {
1245
+ return "AnyEnumType";
1246
+ }
1247
+ static const TypeKind Kind = TypeKind::AnyEnumType;
1248
+ // global singleton
1249
+ static AnyEnumTypePtr get();
1250
+ private:
1251
+ AnyEnumType()
1252
+ : Type(TypeKind::AnyEnumType) {}
1253
+ };
1254
+
1255
+ struct NumberType;
1256
+ using NumberTypePtr = SingletonTypePtr<NumberType>;
1257
+ // This type represents a Python number
1258
+ // Subtype hierarchy for Number Types (NumberType as the base type):
1259
+ // IntType <: NumberType
1260
+ // FloatType <: NumberType
1261
+ // ComplexType <:NumberType
1262
+ //
1263
+ // WARNING: if you add a new subtype of NumberType that is not
1264
+ // represented by a global singleton, you need to change NumberTypePtr
1265
+ // to a SingletonOrSharedTypePtr and deal with NumberType needing to
1266
+ // both inherit and not inherit from SharedType!
1267
+ struct TORCH_API NumberType : public Type {
1268
+ bool equals(const Type& rhs) const override;
1269
+
1270
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
1271
+
1272
+ std::string str() const override {
1273
+ return "Scalar"; // match what PythonArgParser says for clarity
1274
+ }
1275
+ static const TypeKind Kind = TypeKind::NumberType;
1276
+ // global singleton
1277
+ static NumberTypePtr get();
1278
+
1279
+ protected:
1280
+ NumberType(TypeKind kind = TypeKind::NumberType) : Type(kind) {}
1281
+
1282
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1283
+ return "number"; // technically not a valid python type, but
1284
+ // we need to use it when parsing back in annotations
1285
+ // for implicit conversions
1286
+ }
1287
+ };
1288
+
1289
+ struct FloatType;
1290
+ using FloatTypePtr = SingletonTypePtr<FloatType>;
1291
+ // This type represents a Python float number
1292
+ struct TORCH_API FloatType : public NumberType {
1293
+ bool equals(const Type& rhs) const override {
1294
+ return rhs.kind() == kind();
1295
+ }
1296
+ std::string str() const override {
1297
+ return "float";
1298
+ }
1299
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override {
1300
+ // NOLINTNEXTLINE(bugprone-parent-virtual-call)
1301
+ return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not);
1302
+ }
1303
+ static const TypeKind Kind = TypeKind::FloatType;
1304
+ // global singleton
1305
+ static FloatTypePtr get();
1306
+
1307
+ private:
1308
+ FloatType() : NumberType(TypeKind::FloatType) {}
1309
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1310
+ return "float";
1311
+ }
1312
+ };
1313
+
1314
+ struct ComplexType;
1315
+ using ComplexTypePtr = SingletonTypePtr<ComplexType>;
1316
+ // This type represents a Python float number
1317
+ struct TORCH_API ComplexType : public NumberType {
1318
+ bool equals(const Type& rhs) const override {
1319
+ return rhs.kind() == kind();
1320
+ }
1321
+ std::string str() const override {
1322
+ return "complex";
1323
+ }
1324
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override {
1325
+ // NOLINTNEXTLINE(bugprone-parent-virtual-call)
1326
+ return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not);
1327
+ }
1328
+ static const TypeKind Kind = TypeKind::ComplexType;
1329
+ // global singleton
1330
+ static ComplexTypePtr get();
1331
+
1332
+ private:
1333
+ ComplexType() : NumberType(TypeKind::ComplexType) {}
1334
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1335
+ return "complex";
1336
+ }
1337
+ };
1338
+
1339
+ // We need to introduce `SymIntType` to represent the `SymInt` type
1340
+ // used in function schemas e.g. `aten::narrow_copy(... SymInt length)
1341
+ // `SymInt` will be used to enable tracing arithmetic operations on
1342
+ // dimension values. Please see [SymInt.h] for more information
1343
+ struct SymIntType;
1344
+ using SymIntTypePtr = SingletonTypePtr<SymIntType>;
1345
+ struct TORCH_API SymIntType : public Type {
1346
+ bool equals(const Type& rhs) const override {
1347
+ return rhs.kind() == kind();
1348
+ }
1349
+ std::string str() const override {
1350
+ return "SymInt";
1351
+ }
1352
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1353
+ return "int";
1354
+ }
1355
+ static const TypeKind Kind = TypeKind::SymIntType;
1356
+ // global singleton
1357
+ static SymIntTypePtr get();
1358
+
1359
+ private:
1360
+ SymIntType() : Type(TypeKind::SymIntType) {}
1361
+ };
1362
+
1363
+ struct SymFloatType;
1364
+ using SymFloatTypePtr = SingletonTypePtr<SymFloatType>;
1365
+ struct TORCH_API SymFloatType : public Type {
1366
+ bool equals(const Type& rhs) const override {
1367
+ return rhs.kind() == kind();
1368
+ }
1369
+ std::string str() const override {
1370
+ return "SymFloat";
1371
+ }
1372
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1373
+ return "float";
1374
+ }
1375
+ static const TypeKind Kind = TypeKind::SymFloatType;
1376
+ // global singleton
1377
+ static SymFloatTypePtr get();
1378
+
1379
+ private:
1380
+ SymFloatType() : Type(TypeKind::SymFloatType) {}
1381
+ };
1382
+
1383
+ struct SymBoolType;
1384
+ using SymBoolTypePtr = SingletonTypePtr<SymBoolType>;
1385
+ struct TORCH_API SymBoolType : public Type {
1386
+ bool equals(const Type& rhs) const override {
1387
+ return rhs.kind() == kind();
1388
+ }
1389
+ std::string str() const override {
1390
+ return "SymBool";
1391
+ }
1392
+ std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
1393
+ return "bool";
1394
+ }
1395
+ static const TypeKind Kind = TypeKind::SymBoolType;
1396
+ // global singleton
1397
+ static SymBoolTypePtr get();
1398
+
1399
+ private:
1400
+ SymBoolType() : Type(TypeKind::SymBoolType) {}
1401
+ };
1402
+
1403
+ struct IntType;
1404
+ using IntTypePtr = SingletonTypePtr<IntType>;
1405
+ // This type represents a Python int number
1406
+ struct TORCH_API IntType : public NumberType {
1407
+ bool equals(const Type& rhs) const override {
1408
+ return rhs.kind() == kind();
1409
+ }
1410
+ std::string str() const override {
1411
+ return "int";
1412
+ }
1413
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override {
1414
+ // NOLINTNEXTLINE(bugprone-parent-virtual-call)
1415
+ return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not);
1416
+ }
1417
+ static const TypeKind Kind = TypeKind::IntType;
1418
+ // global singleton
1419
+ static IntTypePtr get();
1420
+
1421
+ private:
1422
+ IntType() : NumberType(TypeKind::IntType) {}
1423
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1424
+ return "int";
1425
+ }
1426
+ };
1427
+
1428
+ struct BoolType;
1429
+ using BoolTypePtr = SingletonTypePtr<BoolType>;
1430
+ // This node represents a Python bool value
1431
+ struct TORCH_API BoolType : public Type {
1432
+ bool equals(const Type& rhs) const override {
1433
+ return rhs.kind() == kind();
1434
+ }
1435
+ std::string str() const override {
1436
+ return "bool";
1437
+ }
1438
+ static const TypeKind Kind = TypeKind::BoolType;
1439
+ // global singleton
1440
+ static BoolTypePtr get();
1441
+
1442
+ private:
1443
+ BoolType() : Type(TypeKind::BoolType) {}
1444
+ };
1445
+
1446
+ struct StringType;
1447
+ using StringTypePtr = SingletonTypePtr<StringType>;
1448
+ // This type represents a Python string
1449
+ struct TORCH_API StringType : public Type {
1450
+ bool equals(const Type& rhs) const override {
1451
+ return rhs.kind() == kind();
1452
+ }
1453
+ std::string str() const override {
1454
+ // we only use "str" (not "string") in both FunctionSchema and script
1455
+ return annotation_str();
1456
+ }
1457
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1458
+ return "str";
1459
+ }
1460
+ static const TypeKind Kind = TypeKind::StringType;
1461
+ // global singleton
1462
+ static StringTypePtr get();
1463
+
1464
+ private:
1465
+ StringType() : Type(TypeKind::StringType) {}
1466
+ };
1467
+
1468
+ struct StorageType;
1469
+ using StorageTypePtr = SingletonTypePtr<StorageType>;
1470
+ struct TORCH_API StorageType : public Type {
1471
+ bool equals(const Type& rhs) const override {
1472
+ return rhs.kind() == kind();
1473
+ }
1474
+ std::string str() const override {
1475
+ return annotation_str();
1476
+ }
1477
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1478
+ return "Storage";
1479
+ }
1480
+ static const TypeKind Kind = TypeKind::StorageType;
1481
+ // global singleton
1482
+ static StorageTypePtr get();
1483
+
1484
+ private:
1485
+ StorageType() : Type(TypeKind::StorageType) {}
1486
+ };
1487
+
1488
+ struct FunctionType;
1489
+ using FunctionTypePtr = std::shared_ptr<FunctionType>;
1490
+ struct TORCH_API FunctionType : public NamedType {
1491
+ static FunctionTypePtr create(torch::jit::Function* function) {
1492
+ return FunctionTypePtr(
1493
+ new FunctionType(function)); // NOLINT(modernize-make-shared)
1494
+ }
1495
+ bool equals(const Type& rhs) const override {
1496
+ if (auto func_type = rhs.cast<FunctionType>()) {
1497
+ return func_type->function_ == function_;
1498
+ }
1499
+
1500
+ return false;
1501
+ }
1502
+ std::string str() const override {
1503
+ return "Function";
1504
+ }
1505
+ torch::jit::Function* function() const {
1506
+ return function_;
1507
+ }
1508
+ static const TypeKind Kind = TypeKind::FunctionType;
1509
+
1510
+ private:
1511
+ FunctionType(torch::jit::Function* function);
1512
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
1513
+ const auto& n = name().value();
1514
+ return n.qualifiedName();
1515
+ }
1516
+ torch::jit::Function* function_;
1517
+ };
1518
+
1519
+ struct NoneType;
1520
+ using NoneTypePtr = SingletonTypePtr<NoneType>;
1521
+ // This type represents a Python None
1522
+ struct TORCH_API NoneType : public Type {
1523
+ bool equals(const Type& rhs) const override {
1524
+ return rhs.kind() == kind();
1525
+ }
1526
+ std::string str() const override {
1527
+ return "NoneType";
1528
+ }
1529
+ bool isSubtypeOfExt(const Type& rhs, std::ostream *why_not) const override;
1530
+
1531
+ static const TypeKind Kind = TypeKind::NoneType;
1532
+ // global singleton
1533
+ static NoneTypePtr get();
1534
+
1535
+ private:
1536
+ NoneType() : Type(TypeKind::NoneType) {}
1537
+ };
1538
+
1539
+ struct GeneratorType;
1540
+ using GeneratorTypePtr = SingletonTypePtr<GeneratorType>;
1541
+ // This type represents a Generator
1542
+ struct TORCH_API GeneratorType : public Type {
1543
+ bool equals(const Type& rhs) const override {
1544
+ return rhs.kind() == kind();
1545
+ }
1546
+ std::string str() const override {
1547
+ return "Generator";
1548
+ }
1549
+ static const TypeKind Kind = TypeKind::GeneratorType;
1550
+ // global singleton
1551
+ static GeneratorTypePtr get();
1552
+
1553
+ private:
1554
+ GeneratorType() : Type(TypeKind::GeneratorType) {}
1555
+ };
1556
+
1557
+ struct QuantizerType;
1558
+ using QuantizerTypePtr = SingletonTypePtr<QuantizerType>;
1559
+ // This type represents a Quantizer
1560
+ struct TORCH_API QuantizerType : public Type {
1561
+ bool equals(const Type& rhs) const override {
1562
+ return rhs.kind() == kind();
1563
+ }
1564
+ std::string str() const override {
1565
+ return "Quantizer";
1566
+ }
1567
+ static const TypeKind Kind = TypeKind::QuantizerType;
1568
+ // global singleton
1569
+ static QuantizerTypePtr get();
1570
+
1571
+ private:
1572
+ QuantizerType() : Type(TypeKind::QuantizerType) {}
1573
+ };
1574
+
1575
+ struct QSchemeType;
1576
+ using QSchemeTypePtr = SingletonTypePtr<QSchemeType>;
1577
+ // This type represents a QScheme
1578
+ struct TORCH_API QSchemeType : public Type {
1579
+ bool equals(const Type& rhs) const override {
1580
+ return rhs.kind() == kind();
1581
+ }
1582
+ std::string str() const override {
1583
+ return "QScheme";
1584
+ }
1585
+ static const TypeKind Kind = TypeKind::QSchemeType;
1586
+ // global singleton
1587
+ static QSchemeTypePtr get();
1588
+
1589
+ private:
1590
+ QSchemeType() : Type(TypeKind::QSchemeType) {}
1591
+ };
1592
+
1593
+ struct DeviceObjType;
1594
+ using DeviceObjTypePtr = SingletonTypePtr<DeviceObjType>;
1595
+ // This type represents a Device
1596
+ struct TORCH_API DeviceObjType : public Type {
1597
+ bool equals(const Type& rhs) const override {
1598
+ return rhs.kind() == kind();
1599
+ }
1600
+ std::string str() const override {
1601
+ return "Device";
1602
+ }
1603
+ static const TypeKind Kind = TypeKind::DeviceObjType;
1604
+ // global singleton
1605
+ static DeviceObjTypePtr get();
1606
+
1607
+ private:
1608
+ DeviceObjType() : Type(TypeKind::DeviceObjType) {}
1609
+ };
1610
+
1611
+ struct StreamObjType;
1612
+ using StreamObjTypePtr = SingletonTypePtr<StreamObjType>;
1613
+ // This type represents a Generator
1614
+ struct TORCH_API StreamObjType : public Type {
1615
+ bool equals(const Type& rhs) const override {
1616
+ return rhs.kind() == kind();
1617
+ }
1618
+ std::string str() const override {
1619
+ return "Stream";
1620
+ }
1621
+ static const TypeKind Kind = TypeKind::StreamObjType;
1622
+ // global singleton
1623
+ static StreamObjTypePtr get();
1624
+
1625
+ private:
1626
+ StreamObjType() : Type(TypeKind::StreamObjType) {}
1627
+ };
1628
+
1629
+ struct VarType;
1630
+ using VarTypePtr = std::shared_ptr<VarType>;
1631
+ // This type represents a type variable, used in FunctionSchema
1632
+ struct VarType : public SharedType {
1633
+ static VarTypePtr create(std::string name_) {
1634
+ return VarTypePtr(new VarType(std::move(name_)));
1635
+ }
1636
+ bool equals(const Type& rhs) const override {
1637
+ return rhs.kind() == kind();
1638
+ }
1639
+ std::string str() const override {
1640
+ return name();
1641
+ }
1642
+ const std::string& name() const {
1643
+ return name_;
1644
+ }
1645
+ bool hasFreeVariables() const override {
1646
+ return true;
1647
+ }
1648
+ static const TypeKind Kind = TypeKind::VarType;
1649
+
1650
+ private:
1651
+ VarType(std::string name_)
1652
+ : SharedType(TypeKind::VarType), name_(std::move(name_)) {}
1653
+ std::string name_;
1654
+ };
1655
+
1656
+ struct CapsuleType;
1657
+ using CapsuleTypePtr = SingletonTypePtr<CapsuleType>;
1658
+ // This type represents a Python Capsule.
1659
+ // It does not appear in the IR and is only used during runtime
1660
+ struct TORCH_API CapsuleType : public Type {
1661
+ bool equals(const Type& rhs) const override {
1662
+ return rhs.kind() == kind();
1663
+ }
1664
+ std::string str() const override {
1665
+ return "Capsule";
1666
+ }
1667
+ static const TypeKind Kind = TypeKind::CapsuleType;
1668
+ // global singleton
1669
+ static CapsuleTypePtr get();
1670
+ private:
1671
+ CapsuleType()
1672
+ : Type(TypeKind::CapsuleType) {}
1673
+ };
1674
+
1675
+ struct PyObjectType;
1676
+ using PyObjectTypePtr = SingletonTypePtr<PyObjectType>;
1677
+ // This type represents a PyObject Type
1678
+ struct TORCH_API PyObjectType : public Type {
1679
+ bool equals(const Type& rhs) const override {
1680
+ return rhs.kind() == kind();
1681
+ }
1682
+ std::string str() const override {
1683
+ return "PyObject";
1684
+ }
1685
+ static const TypeKind Kind = TypeKind::PyObjectType;
1686
+ // global singleton
1687
+ static PyObjectTypePtr get();
1688
+ private:
1689
+ PyObjectType()
1690
+ : Type(TypeKind::PyObjectType) {}
1691
+ };
1692
+
1693
+ enum class TypeVerbosity {
1694
+ None,
1695
+ Type,
1696
+ TypeAndStride,
1697
+ Full,
1698
+ Symbolic,
1699
+ Default = Full,
1700
+ };
1701
+
1702
+ TORCH_API TypeVerbosity type_verbosity();
1703
+
1704
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Type& t);
1705
+ template <typename T>
1706
+ TORCH_API std::ostream& operator<<(
1707
+ std::ostream& out,
1708
+ const VaryingShape<T>& t);
1709
+ TORCH_API std::ostream& operator<<(std::ostream& os, const SymbolicShape& s);
1710
+ TORCH_API std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s);
1711
+ TORCH_API std::ostream& operator<<(std::ostream& os, const Stride& s);
1712
+ // what is the type, ignoring extra size/shape information?
1713
+ // e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...)
1714
+
1715
+ // `unshapedType` is used to remove Tensor subtypes. We treat all Tensor
1716
+ // subtypes as simply "Tensor"; we also create a new version of any
1717
+ // container types in which internal Tensors have undergone the same
1718
+ // operation. This is used for type comparisons between two Tensor types
1719
+ // (`unshapedType` means that we don't falsely return `false` for e.g.
1720
+ // Tensors of different dimensions). It's also used in the alias
1721
+ // analysis pass.
1722
+ // Be careful with calls because this can be very slow. If calling this
1723
+ // on a graph, use `EraseShapeInformation` in shape_analysis.h
1724
+ inline TypePtr unshapedType(const TypePtr& type) {
1725
+ if (type->isSubtypeOf(*TensorType::get())) {
1726
+ return TensorType::get();
1727
+ }
1728
+ at::ArrayRef<TypePtr> contained = type->containedTypes();
1729
+ if (contained.empty()) {
1730
+ return type;
1731
+ }
1732
+ return type->withContained(fmap(type->containedTypes(), unshapedType));
1733
+ }
1734
+
1735
+ inline TypePtr TensorType::fromNumberType(const Type& typ) {
1736
+ if (typ.isSubtypeOf(*IntType::get())) {
1737
+ return TensorType::createContiguous(at::kLong, at::kCPU, {});
1738
+ } else if (typ.isSubtypeOf(*FloatType::get())) {
1739
+ return TensorType::createContiguous(at::kDouble, at::kCPU, {});
1740
+ } else if (typ.isSubtypeOf(*BoolType::get())) {
1741
+ return TensorType::createContiguous(at::kBool, at::kCPU, {});
1742
+ } else if (typ.kind() == NumberType::Kind) {
1743
+ return TensorType::create(c10::nullopt, at::kCPU, {}, c10::nullopt);
1744
+ }
1745
+ TORCH_CHECK(false, "Unknown number type: ", typ.str());
1746
+ }
1747
+ inline TypePtr TensorType::fromBoolType() {
1748
+ return TensorType::createContiguous(at::kBool, at::kCPU, {});
1749
+ }
1750
+
1751
+ inline c10::optional<c10::ScalarType> tryScalarTypeFromJitType(const Type& type) {
1752
+ if (type == *FloatType::get()) {
1753
+ return at::typeMetaToScalarType(c10::get_default_dtype());
1754
+ } else if (type == *IntType::get()) {
1755
+ return at::ScalarType::Long;
1756
+ } else if (type == *BoolType::get()) {
1757
+ return at::ScalarType::Bool;
1758
+ }
1759
+ return c10::nullopt;
1760
+ }
1761
+
1762
+ inline at::ScalarType scalarTypeFromJitType(const Type& type) {
1763
+ auto result = tryScalarTypeFromJitType(type);
1764
+ TORCH_CHECK(
1765
+ result,
1766
+ "Add new condition, expected Float, Complex, Int, or Bool but got",
1767
+ type.str());
1768
+ return *result;
1769
+ }
1770
+
1771
+ // Attempt to find the correct supertype of the two types `t1` and `t2`.
1772
+ // If no supertype is found, then nullopt will be returned if
1773
+ // `default_to_union` is false, and `Union[t1, t2]` will be returned
1774
+ // if it is true. If `t1 == t2`, or `t1` is a type refinement of `t2`,
1775
+ // then `t2` will be returned (and vice versa).
1776
+ //
1777
+ // Two different tensortypes will return dynamic.
1778
+ //
1779
+ // Currently we chose not to support returning a NumberType for
1780
+ // two types from the set of {FloatType, IntType, ComplexType}, because
1781
+ // there is a lack of operator support for NumberType.
1782
+ //
1783
+ // If `type_hint` is an `InterfaceType`, then we can use that as a
1784
+ // potential supertype for `ClassType`s in the list. Otherwise, we have
1785
+ // no way to find and use some common interface type
1786
+ TORCH_API c10::optional<TypePtr> unifyTypes(
1787
+ const TypePtr& t1,
1788
+ const TypePtr& t2,
1789
+ bool default_to_union = false,
1790
+ const TypePtr& type_hint = nullptr);
1791
+
1792
+ TORCH_API c10::optional<TypePtr> unifyTypeList(
1793
+ at::ArrayRef<TypePtr> elements,
1794
+ std::ostream& why_not,
1795
+ bool default_to_union = false,
1796
+ const TypePtr& type_hint = nullptr);
1797
+
1798
+ namespace detail {
1799
+ template <typename T>
1800
+ struct getTypePtr_ final {
1801
+ static decltype(auto) call() {
1802
+ return ([]() {
1803
+ try {
1804
+ return getCustomClassType<T>();
1805
+ } catch(const c10::Error&) {
1806
+ TORCH_CHECK(
1807
+ false,
1808
+ "Type ",
1809
+ c10::util::get_fully_qualified_type_name<T>(),
1810
+ " could not be converted to any of the known types."
1811
+ );
1812
+ }
1813
+ }());
1814
+ }
1815
+ };
1816
+
1817
+ template <typename T, bool fake>
1818
+ struct getMaybeFakeTypePtr_ final {
1819
+ static decltype(auto) call() {
1820
+ return getTypePtr_<T>::call();
1821
+ }
1822
+ };
1823
+
1824
+ template <>
1825
+ struct getTypePtr_<at::IValue> final {
1826
+ static decltype(auto) call() {
1827
+ return AnyType::get();
1828
+ }
1829
+ };
1830
+
1831
+ template <>
1832
+ struct getTypePtr_<at::Tensor> final {
1833
+ static decltype(auto) call() {
1834
+ return TensorType::get();
1835
+ }
1836
+ };
1837
+ template <>
1838
+ struct getTypePtr_<c10::Storage> final {
1839
+ static decltype(auto) call() {
1840
+ return StorageType::get();
1841
+ }
1842
+ };
1843
+ template <>
1844
+ struct getTypePtr_<c10::Stream> final {
1845
+ static decltype(auto) call() {
1846
+ return StreamObjType::get();
1847
+ }
1848
+ };
1849
+ template <>
1850
+ struct getTypePtr_<double> final {
1851
+ static decltype(auto) call() {
1852
+ return FloatType::get();
1853
+ }
1854
+ };
1855
+ template <>
1856
+ struct getTypePtr_<c10::complex<double>> final {
1857
+ static decltype(auto) call() {
1858
+ return ComplexType::get();
1859
+ }
1860
+ };
1861
+ template <>
1862
+ struct getTypePtr_<int64_t> final {
1863
+ static decltype(auto) call() {
1864
+ return IntType::get();
1865
+ }
1866
+ };
1867
+
1868
+ template <>
1869
+ struct getTypePtr_<DeviceIndex> final {
1870
+ static decltype(auto) call() {
1871
+ return IntType::get();
1872
+ }
1873
+ };
1874
+
1875
+ template <>
1876
+ struct getMaybeFakeTypePtr_<SymInt, false> final {
1877
+ static decltype(auto) call() {
1878
+ return SymIntType::get();
1879
+ }
1880
+ };
1881
+ template <>
1882
+ struct getMaybeFakeTypePtr_<SymInt, true> final {
1883
+ static decltype(auto) call() {
1884
+ return IntType::get();
1885
+ }
1886
+ };
1887
+
1888
+ template <>
1889
+ struct getMaybeFakeTypePtr_<SymFloat, false> final {
1890
+ static decltype(auto) call() {
1891
+ return SymFloatType::get();
1892
+ }
1893
+ };
1894
+ template <>
1895
+ struct getMaybeFakeTypePtr_<SymFloat, true> final {
1896
+ static decltype(auto) call() {
1897
+ return FloatType::get();
1898
+ }
1899
+ };
1900
+
1901
+ template <>
1902
+ struct getMaybeFakeTypePtr_<SymBool, false> final {
1903
+ static decltype(auto) call() {
1904
+ return SymBoolType::get();
1905
+ }
1906
+ };
1907
+ template <>
1908
+ struct getMaybeFakeTypePtr_<SymBool, true> final {
1909
+ static decltype(auto) call() {
1910
+ return BoolType::get();
1911
+ }
1912
+ };
1913
+
1914
+ template <>
1915
+ struct getTypePtr_<c10::Device> final {
1916
+ static decltype(auto) call() {
1917
+ return DeviceObjType::get();
1918
+ }
1919
+ };
1920
+ template <>
1921
+ struct getTypePtr_<bool> final {
1922
+ static decltype(auto) call() {
1923
+ return BoolType::get();
1924
+ }
1925
+ };
1926
+ template <>
1927
+ struct getTypePtr_<at::Scalar> final {
1928
+ static decltype(auto) call() {
1929
+ return NumberType::get();
1930
+ }
1931
+ };
1932
+ template <>
1933
+ struct getTypePtr_<c10::QScheme> final {
1934
+ static decltype(auto) call() {
1935
+ return QSchemeType::get();
1936
+ }
1937
+ };
1938
+ template <>
1939
+ struct getTypePtr_<at::Generator> final {
1940
+ static decltype(auto) call() {
1941
+ return TypeFactory::create<OptionalType>(
1942
+ TypeFactory::get<GeneratorType>());
1943
+ }
1944
+ };
1945
+ template <>
1946
+ struct getTypePtr_<std::string> final {
1947
+ static decltype(auto) call() {
1948
+ return StringType::get();
1949
+ }
1950
+ };
1951
+ template <>
1952
+ struct getTypePtr_<c10::string_view> final {
1953
+ static decltype(auto) call() {
1954
+ return StringType::get();
1955
+ }
1956
+ };
1957
+ template <>
1958
+ struct getTypePtr_<at::Dimname> final {
1959
+ static decltype(auto) call() {
1960
+ return StringType::get();
1961
+ }
1962
+ };
1963
+ template <class T, bool fake>
1964
+ struct getMaybeFakeTypePtr_<std::vector<T>, fake> final {
1965
+ static const auto& call() {
1966
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
1967
+ // The "per vector<T>" static singleton needs to live in a .cpp file,
1968
+ // otherwise we'll end up with one singleton instance per shared library.
1969
+ static auto type = ListType::get("vector", inner_type);
1970
+ return type;
1971
+ }
1972
+ };
1973
+ template <class T, bool fake>
1974
+ struct getMaybeFakeTypePtr_<c10::ArrayRef<T>, fake> final {
1975
+ static const auto& call() {
1976
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
1977
+ // The "per ArrayRef<T>" static singleton needs to live in a .cpp file,
1978
+ // otherwise we'll end up with one singleton instance per shared library.
1979
+ static auto type = ListType::get("ArrayRef", inner_type);
1980
+ return type;
1981
+ }
1982
+ };
1983
+ template <bool fake>
1984
+ struct getMaybeFakeTypePtr_<c10::SymIntArrayRef, fake> final {
1985
+ static const auto& call() {
1986
+ static auto type = ListType::create(getMaybeFakeTypePtr_<c10::SymInt, fake>::call());
1987
+ return type;
1988
+ }
1989
+ };
1990
+ template <class T, bool fake>
1991
+ struct getMaybeFakeTypePtr_<c10::List<T>, fake> final {
1992
+ static const auto& call() {
1993
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
1994
+ // The "per List<T>" static singleton needs to live in a .cpp file,
1995
+ // otherwise we'll end up with one singleton instance per shared library.
1996
+ static auto type = ListType::get("List", inner_type);
1997
+ return type;
1998
+ }
1999
+ };
2000
+ template <class T, bool fake>
2001
+ struct getMaybeFakeTypePtr_<c10::IListRef<T>, fake> final {
2002
+ static const auto& call() {
2003
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
2004
+ static auto type = ListType::get("List", inner_type);
2005
+ return type;
2006
+ }
2007
+ };
2008
+ template <class T, size_t N, bool fake>
2009
+ struct getMaybeFakeTypePtr_<std::array<T, N>, fake> final {
2010
+ static const auto& call() {
2011
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
2012
+ // The "per array<T, N>" static singleton needs to live in a .cpp file,
2013
+ // otherwise we'll end up with one singleton instance per shared library.
2014
+ // (Concatenating the length onto the end of the string because we want a unique
2015
+ // type_ptr created for every std::array<T, N> type).
2016
+ static auto type = ListType::get(std::string("array") + std::to_string(N), inner_type);
2017
+ return type;
2018
+ }
2019
+ };
2020
+ template <class K, class V, bool fake>
2021
+ struct getMaybeFakeTypePtr_<std::unordered_map<K, V>, fake> final {
2022
+ static const auto& call() {
2023
+ static auto inner_key_type = getMaybeFakeTypePtr_<K, fake>::call();
2024
+ static auto inner_val_type = getMaybeFakeTypePtr_<V, fake>::call();
2025
+ // The "per unordered_map<K, V>" static singleton needs to live in a .cpp file,
2026
+ // otherwise we'll end up with one singleton instance per shared library.
2027
+ static auto type = DictType::get("unordered_map", inner_key_type, inner_val_type);
2028
+ return type;
2029
+ }
2030
+ };
2031
+ template <class K, class V, bool fake>
2032
+ struct getMaybeFakeTypePtr_<c10::Dict<K, V>, fake> final {
2033
+ static const auto& call() {
2034
+ static auto inner_key_type = getMaybeFakeTypePtr_<K, fake>::call();
2035
+ static auto inner_val_type = getMaybeFakeTypePtr_<V, fake>::call();
2036
+ // The "per Dict<K, V>" static singleton needs to live in a .cpp file,
2037
+ // otherwise we'll end up with one singleton instance per shared library.
2038
+ static auto type = DictType::get("Dict", inner_key_type, inner_val_type);
2039
+ return type;
2040
+ }
2041
+ };
2042
+
2043
+ template <class T, bool fake>
2044
+ struct getMaybeFakeTypePtr_<at::optional<T>, fake> final {
2045
+ static const auto& call() {
2046
+ static auto inner_type = getMaybeFakeTypePtr_<T, fake>::call();
2047
+ // The "per optional<T>" static singleton needs to live in a .cpp file,
2048
+ // otherwise we'll end up with one singleton instance per shared library.
2049
+ static auto type = OptionalType::get(inner_type);
2050
+ return type;
2051
+ }
2052
+ };
2053
+
2054
+
2055
+ template<>
2056
+ struct getTypePtr_<at::OptionalIntArrayRef> final {
2057
+ static const auto& call() {
2058
+ static auto inner_type = getMaybeFakeTypePtr_<IntArrayRef, false>::call();
2059
+ // The "per optional<T>" static singleton needs to live in a .cpp file,
2060
+ // otherwise we'll end up with one singleton instance per shared library.
2061
+ static auto type = OptionalType::get(inner_type);
2062
+ return type;
2063
+ }
2064
+ };
2065
+
2066
+ template <bool fake>
2067
+ struct getMaybeFakeTypePtr_<at::OptionalSymIntArrayRef, fake> final {
2068
+ static const auto& call() {
2069
+ // The "per optional<T>" static singleton needs to live in a .cpp file,
2070
+ // otherwise we'll end up with one singleton instance per shared library.
2071
+ static auto inner_type = getMaybeFakeTypePtr_<SymIntArrayRef, fake>::call();
2072
+ static auto type = OptionalType::get(inner_type);
2073
+ return type;
2074
+ }
2075
+ };
2076
+
2077
+ template <class... Contained, bool fake>
2078
+ struct getMaybeFakeTypePtr_<std::tuple<Contained...>, fake> final {
2079
+ static const auto& call() {
2080
+ static auto type = ([]() {
2081
+ std::vector<TypePtr> contained_types = {
2082
+ (getMaybeFakeTypePtr_<Contained, fake>::call())...
2083
+ };
2084
+ return TupleType::create(std::move(contained_types));
2085
+ })();
2086
+ return type;
2087
+ }
2088
+ };
2089
+ template <>
2090
+ struct getTypePtr_<void> final {
2091
+ static decltype(auto) call() {
2092
+ return NoneType::get();
2093
+ }
2094
+ };
2095
+ } // namespace detail
2096
+ template <class T>
2097
+ inline decltype(auto) getTypePtr() {
2098
+ // TODO: static_assert that a templated function exists, and throw a friendly
2099
+ // error message if not
2100
+ return detail::getMaybeFakeTypePtr_<T, false>::call();
2101
+ }
2102
+
2103
+ template <class T>
2104
+ inline TypePtr getTypePtrCopy() {
2105
+ // TODO: static_assert that a templated function exists, and throw a friendly
2106
+ // error message if not
2107
+ return getTypePtr<T>();
2108
+ }
2109
+
2110
+ template <class T>
2111
+ inline decltype(auto) getFakeTypePtr() {
2112
+ return detail::getMaybeFakeTypePtr_<T, true>::call();
2113
+ }
2114
+
2115
+ template <class T>
2116
+ inline TypePtr getFakeTypePtrCopy() {
2117
+ return getFakeTypePtr<T>();
2118
+ }
2119
+
2120
+ using TypeEnv = std::unordered_map<std::string, TypePtr>;
2121
+ struct MatchTypeReturn {
2122
+ MatchTypeReturn(std::string reason) : reason_(std::move(reason)) {}
2123
+ static MatchTypeReturn Success() {
2124
+ return MatchTypeReturn();
2125
+ }
2126
+ bool success() const {
2127
+ return !reason_.has_value();
2128
+ }
2129
+ const std::string& reason() const {
2130
+ return reason_.value();
2131
+ }
2132
+
2133
+ private:
2134
+ MatchTypeReturn()
2135
+ : reason_(c10::nullopt) {}
2136
+ c10::optional<std::string> reason_; // is there is no match, this contains the reason
2137
+ };
2138
+
2139
+ // attempt to match the type variables in formal to actual, adding them to type_env.
2140
+ // If no match is possible this returns a MatchTypeReturn with r.success() == false
2141
+ // and a r.reason() that describes why it could not match.
2142
+ // note: It is possible to successfully match a formal, but for type variables
2143
+ // in the formal to still not be defined. In particular, None matches Optional[T]
2144
+ // but does not define the value of T.
2145
+ TORCH_API MatchTypeReturn
2146
+ matchTypeVariables(const TypePtr& formal, const TypePtr& actual, TypeEnv& type_env);
2147
+
2148
+ // replace type variables appearing in `type` with the values in
2149
+ // `type_env`. Returns nullptr if a variable used in `type`
2150
+ // does not appear in `type_env`
2151
+ TORCH_API TypePtr tryEvalTypeVariables(const TypePtr& type, TypeEnv& type_env);
2152
+
2153
+ TORCH_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type);
2154
+
2155
+ struct InterfaceType;
2156
+ using InterfaceTypePtr = std::shared_ptr<InterfaceType>;
2157
+
2158
+ // Interfaces are a list of abstract methods that a class might meet.
2159
+ // If a class provides those methods, it implicitly meets the interface.
2160
+
2161
+ // Subtype relations for Interface with ClassType:
2162
+ // lhs (ClassType or InterfaceType) is a subtype of rhs if:
2163
+ // 1. lhs methods are a superset of rhs methods
2164
+ // 2. if rhs is module interface, the lhs must be module interface or module itself
2165
+ struct TORCH_API InterfaceType : public NamedType {
2166
+ static InterfaceTypePtr create(
2167
+ QualifiedName qualifiedName, bool is_module=false);
2168
+
2169
+ bool equals(const Type& rhs) const override {
2170
+ if (auto user_rhs = rhs.castRaw<InterfaceType>()) {
2171
+ return isSubTypeImpl(*this, *user_rhs, nullptr) &&
2172
+ isSubTypeImpl(*user_rhs, *this, nullptr);
2173
+ }
2174
+ return false;
2175
+ }
2176
+
2177
+ std::string str() const override {
2178
+ return std::string("InterfaceType<") + name()->name() + ">";
2179
+ }
2180
+
2181
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
2182
+
2183
+ // try to find a method of this interface,
2184
+ // returns nullptr if not found.
2185
+ const FunctionSchema* getMethod(const std::string& name) const;
2186
+ void addMethod(FunctionSchema schema);
2187
+ const std::vector<FunctionSchema>& methods() const {
2188
+ return *methods_;
2189
+ }
2190
+
2191
+ bool is_module() const override{
2192
+ return is_module_;
2193
+ }
2194
+ static const TypeKind Kind = TypeKind::InterfaceType;
2195
+ ~InterfaceType() override;
2196
+ private:
2197
+ InterfaceType(QualifiedName name, bool is_module);
2198
+ static bool isSubTypeImpl(
2199
+ const InterfaceType& lhs,
2200
+ const InterfaceType& rhs,
2201
+ std::ostream* why_not);
2202
+
2203
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
2204
+ return name()->qualifiedName();
2205
+ }
2206
+
2207
+ // shared_ptr so that this header does not have to depend on
2208
+ // FunctionSchema.h
2209
+ std::shared_ptr<std::vector<FunctionSchema>> methods_;
2210
+ // flag to distinguish if it's an interface type from a module or not
2211
+ bool is_module_;
2212
+ };
2213
+
2214
+ template <TypeKind K>
2215
+ struct EnumerationType : public Type {
2216
+ static const TypeKind Kind = K;
2217
+
2218
+ bool equals(const Type& rhs) const override {
2219
+ return rhs.kind() == kind();
2220
+ }
2221
+
2222
+ protected:
2223
+ EnumerationType() : Type(Kind) {}
2224
+ };
2225
+
2226
+ // WARNING: These enumeration types below DO NOT actually get parsed out
2227
+ // from the logical schema strings, instead they are mapped as ints. To
2228
+ // observe these types, use real_type() instead of type() on Argument
2229
+
2230
+ struct ScalarTypeType;
2231
+ using ScalarTypeTypePtr = SingletonTypePtr<ScalarTypeType>;
2232
+ struct TORCH_API ScalarTypeType : public EnumerationType<TypeKind::ScalarTypeType> {
2233
+ std::string str() const override {
2234
+ return "ScalarType";
2235
+ }
2236
+ static const TypeKind Kind = TypeKind::ScalarTypeType;
2237
+ // global singleton
2238
+ static ScalarTypeTypePtr get();
2239
+
2240
+ private:
2241
+ ScalarTypeType() : EnumerationType() {}
2242
+ };
2243
+
2244
+ struct MemoryFormatType;
2245
+ using MemoryFormatTypePtr = SingletonTypePtr<MemoryFormatType>;
2246
+ struct TORCH_API MemoryFormatType : public EnumerationType<TypeKind::MemoryFormatType> {
2247
+ std::string str() const override {
2248
+ return "MemoryFormat";
2249
+ }
2250
+ static const TypeKind Kind = TypeKind::MemoryFormatType;
2251
+ // global singleton
2252
+ static MemoryFormatTypePtr get();
2253
+
2254
+ private:
2255
+ MemoryFormatType() : EnumerationType() {}
2256
+ };
2257
+
2258
+ struct LayoutType;
2259
+ using LayoutTypePtr = SingletonTypePtr<LayoutType>;
2260
+ struct TORCH_API LayoutType : public EnumerationType<TypeKind::LayoutType> {
2261
+ std::string str() const override {
2262
+ return "Layout";
2263
+ }
2264
+ static const TypeKind Kind = TypeKind::LayoutType;
2265
+ // global singleton
2266
+ static LayoutTypePtr get();
2267
+
2268
+ private:
2269
+ LayoutType() : EnumerationType() {}
2270
+ };
2271
+
2272
+ namespace detail {
2273
+ template <>
2274
+ struct getMaybeFakeTypePtr_<c10::ScalarType, false> final {
2275
+ static decltype(auto) call() {
2276
+ return ScalarTypeType::get();
2277
+ }
2278
+ };
2279
+ template <>
2280
+ struct getMaybeFakeTypePtr_<c10::Layout, false> final {
2281
+ static decltype(auto) call() {
2282
+ return LayoutType::get();
2283
+ }
2284
+ };
2285
+ template <>
2286
+ struct getMaybeFakeTypePtr_<c10::MemoryFormat, false> final {
2287
+ static decltype(auto) call() {
2288
+ return MemoryFormatType::get();
2289
+ }
2290
+ };
2291
+ template <>
2292
+ struct getMaybeFakeTypePtr_<c10::ScalarType, true> final {
2293
+ static decltype(auto) call() {
2294
+ return IntType::get();
2295
+ }
2296
+ };
2297
+ template <>
2298
+ struct getMaybeFakeTypePtr_<c10::Layout, true> final {
2299
+ static decltype(auto) call() {
2300
+ return IntType::get();
2301
+ }
2302
+ };
2303
+ template <>
2304
+ struct getMaybeFakeTypePtr_<c10::MemoryFormat, true> final {
2305
+ static decltype(auto) call() {
2306
+ return IntType::get();
2307
+ }
2308
+ };
2309
+ } // namespace detail
2310
+
2311
+ // the common supertype of all lists,
2312
+ // List[T] <: AnyList for all T
2313
+ struct AnyListType;
2314
+ using AnyListTypePtr = SingletonTypePtr<AnyListType>;
2315
+ struct TORCH_API AnyListType : public Type {
2316
+ bool equals(const Type& rhs) const override {
2317
+ return rhs.kind() == kind();
2318
+ }
2319
+ std::string str() const override {
2320
+ return "list";
2321
+ }
2322
+ static const TypeKind Kind = TypeKind::AnyListType;
2323
+ // global singleton
2324
+ static AnyListTypePtr get();
2325
+ private:
2326
+ AnyListType()
2327
+ : Type(TypeKind::AnyListType) {}
2328
+ };
2329
+
2330
+ // the common supertype of all tuples,
2331
+ // Tuple[T...] <: AnyTuple for all T
2332
+ struct AnyTupleType;
2333
+ using AnyTupleTypePtr = SingletonTypePtr<AnyTupleType>;
2334
+ struct TORCH_API AnyTupleType : public Type {
2335
+ bool equals(const Type& rhs) const override {
2336
+ return rhs.kind() == kind();
2337
+ }
2338
+
2339
+ std::string str() const override {
2340
+ return "tuple";
2341
+ }
2342
+ static const TypeKind Kind = TypeKind::AnyTupleType;
2343
+
2344
+ // global singleton
2345
+ static AnyTupleTypePtr get();
2346
+ private:
2347
+ AnyTupleType()
2348
+ : Type(TypeKind::AnyTupleType) {}
2349
+ };
2350
+
2351
+ // the common supertype of all classes,
2352
+ // ClassType <: AnyClassType for all classes
2353
+ struct AnyClassType;
2354
+ using AnyClassTypePtr = SingletonTypePtr<AnyClassType>;
2355
+ struct TORCH_API AnyClassType : public Type {
2356
+ bool equals(const Type& rhs) const override {
2357
+ return rhs.kind() == kind();
2358
+ }
2359
+ std::string str() const override {
2360
+ return "AnyClassType";
2361
+ }
2362
+ static const TypeKind Kind = TypeKind::AnyClassType;
2363
+ // global singleton
2364
+ static AnyClassTypePtr get();
2365
+ private:
2366
+ AnyClassType()
2367
+ : Type(TypeKind::AnyClassType) {}
2368
+ };
2369
+
2370
+ template<>
2371
+ inline typename detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
2372
+ if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
2373
+ kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
2374
+ return std::static_pointer_cast<NamedType>(static_cast<NamedType *>(this)->shared_from_this());
2375
+ }
2376
+ return nullptr;
2377
+ }
2378
+
2379
+ template<>
2380
+ inline typename detail::CastConstReturnType<NamedType>::type Type::cast<NamedType>() const {
2381
+ if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
2382
+ kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
2383
+ return std::static_pointer_cast<const NamedType>(static_cast<const NamedType *>(this)->shared_from_this());
2384
+ }
2385
+ return nullptr;
2386
+ }
2387
+
2388
+ template<>
2389
+ inline const NamedType* Type::castRaw<NamedType>() const {
2390
+ if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
2391
+ kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
2392
+ return static_cast<const NamedType*>(this);
2393
+ }
2394
+ return nullptr;
2395
+ }
2396
+
2397
+ // Used as a return type when inferring the IValue type of a Python object.
2398
+ struct InferredType {
2399
+ /* implicit */ InferredType(TypePtr type) : type_(std::move(type)) {}
2400
+ /* implicit */ InferredType(std::string reason)
2401
+ : type_(nullptr), reason_(std::move(reason)) {}
2402
+ TypePtr type() const {
2403
+ TORCH_INTERNAL_ASSERT(
2404
+ type_,
2405
+ "Tried to get the type from an InferredType but the type is null. ",
2406
+ "Reason: ",
2407
+ reason_);
2408
+ return type_;
2409
+ }
2410
+ bool success() const {
2411
+ return type_ != nullptr;
2412
+ }
2413
+ const std::string& reason() const {
2414
+ TORCH_INTERNAL_ASSERT(!type_);
2415
+ return reason_;
2416
+ }
2417
+
2418
+ private:
2419
+ TypePtr type_;
2420
+ std::string reason_;
2421
+ };
2422
+
2423
+ TORCH_API bool containsAnyType(const TypePtr& type);
2424
+
2425
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/operator_name.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/string_view.h>
7
+ #include <string>
8
+ #include <utility>
9
+ #include <ostream>
10
+
11
+ namespace c10 {
12
+
13
+ // TODO: consider storing namespace separately too
14
+ struct OperatorName final {
15
+ std::string name;
16
+ std::string overload_name;
17
+ OperatorName(std::string name, std::string overload_name)
18
+ : name(std::move(name)), overload_name(std::move(overload_name)) {}
19
+
20
+ // TODO: These two functions below are slow! Fix internal data structures so
21
+ // I don't have to manually reconstruct the namespaces!
22
+
23
+ // Return the namespace of this OperatorName, if it exists. The
24
+ // returned string_view is only live as long as the OperatorName
25
+ // exists and name is not mutated
26
+ c10::optional<c10::string_view> getNamespace() const {
27
+ auto pos = name.find("::");
28
+ if (pos == std::string::npos) {
29
+ return c10::nullopt;
30
+ } else {
31
+ return c10::make_optional(c10::string_view(name.data(), pos));
32
+ }
33
+ }
34
+
35
+ // Returns true if we successfully set the namespace
36
+ bool setNamespaceIfNotSet(const char* ns) {
37
+ if (!getNamespace().has_value()) {
38
+ const auto ns_len = strlen(ns);
39
+ const auto old_name_size = name.size();
40
+ name.resize(ns_len + 2 + old_name_size);
41
+ // Shift current value of name to the end of the new space.
42
+ name.replace(name.size() - old_name_size, old_name_size, name, 0, old_name_size);
43
+ name.replace(0, ns_len, ns, ns_len);
44
+ name[ns_len] = ':';
45
+ name[ns_len + 1] = ':';
46
+ return true;
47
+ } else {
48
+ return false;
49
+ }
50
+ }
51
+ };
52
+
53
+ // Non-owning view of an OperatorName. Unlike OperatorName, most of
54
+ // its functions are constexpr, so it can be used for compile time
55
+ // computations
56
+ struct OperatorNameView final {
57
+ c10::string_view name;
58
+ c10::string_view overload_name;
59
+ constexpr OperatorNameView(c10::string_view name, c10::string_view overload_name)
60
+ : name(name), overload_name(overload_name) {}
61
+ // Parses strings like "foo.overload" and also "foo"
62
+ constexpr static OperatorNameView parse(c10::string_view full_name) {
63
+ auto i = full_name.find('.');
64
+ if (i == c10::string_view::npos) {
65
+ return OperatorNameView(full_name, c10::string_view());
66
+ } else {
67
+ return OperatorNameView(full_name.substr(0, i), full_name.substr(i + 1));
68
+ }
69
+ }
70
+ };
71
+
72
+ inline bool operator==(const OperatorName& lhs, const OperatorName& rhs) {
73
+ return lhs.name == rhs.name && lhs.overload_name == rhs.overload_name;
74
+ }
75
+
76
+ inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) {
77
+ return !operator==(lhs, rhs);
78
+ }
79
+
80
+ TORCH_API std::string toString(const OperatorName& opName);
81
+ TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&);
82
+
83
+ } // namespace c10
84
+
85
+ namespace std {
86
+ template <>
87
+ struct hash<::c10::OperatorName> {
88
+ size_t operator()(const ::c10::OperatorName& x) const {
89
+ return std::hash<std::string>()(x.name) ^ (~ std::hash<std::string>()(x.overload_name));
90
+ }
91
+ };
92
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/stack.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <type_traits>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <c10/util/Deprecated.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ // TODO move this to c10 namespace
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+
14
+ using c10::IValue;
15
+ using Stack = std::vector<IValue>;
16
+
17
+ class Operation {
18
+ template <typename F, typename Arg>
19
+ using accepts = std::is_constructible<std::function<void(Arg)>, F&&>;
20
+
21
+ public:
22
+ template <typename F,
23
+ std::enable_if_t<accepts<F, Stack*>::value, int> = 0>
24
+ C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.")
25
+ Operation(F&& raw): op_([raw = std::forward<F>(raw)](Stack& stack) {
26
+ raw(&stack);
27
+ }) {}
28
+
29
+ template <typename F,
30
+ std::enable_if_t<accepts<F, Stack&>::value &&
31
+ !std::is_same<std::decay_t<F>, Operation>::value, int> = 0>
32
+ Operation(F&& op): op_(std::forward<F>(op)) {}
33
+
34
+ Operation(std::nullptr_t) noexcept {}
35
+
36
+ explicit operator bool() const noexcept {
37
+ return op_ ? true : false;
38
+ }
39
+
40
+ void operator()(Stack& stack) {
41
+ op_(stack);
42
+ }
43
+
44
+ template <typename T>
45
+ T* target() noexcept {
46
+ return op_.target<T>();
47
+ }
48
+
49
+ private:
50
+ std::function<void(Stack&)> op_;
51
+ };
52
+
53
+ // An operation with N inputs and M outputs pops the last N inputs off
54
+ // the stack and pushes its M inputs onto the stack
55
+ // before: <other stack items> I0, I1, ... IN <- stack.back()
56
+ // after: <other stack items> O0, O1, ... OM
57
+ // operations are defined this way so that ownership of inputs can be
58
+ // transferred to the operation and it can incrementally drop ownership of
59
+ // tensors when they become unneeded. For large operations, like 'run an entire
60
+ // subgraph', this functionality is very important for minimizing gpu memory
61
+ // usage return value is the relative 'offset' to jump to for the next
62
+ // operation:
63
+ // pc += 1 + offset
64
+ // so a return value of 0 goes to the next instruction
65
+
66
+ // treat the last N elements of the stack as a list, looking up
67
+ // element i
68
+ static inline IValue& peek(Stack& stack, size_t i, size_t N) {
69
+ return *(stack.end() - N + i);
70
+ }
71
+ static inline IValue& peek(Stack* stack, size_t i, size_t N) {
72
+ return peek(*stack, i, N);
73
+ }
74
+ static inline const IValue& peek(const Stack& stack, size_t i, size_t N) {
75
+ return *(stack.end() - N + i);
76
+ }
77
+ static inline const IValue& peek(const Stack* stack, size_t i, size_t N) {
78
+ return peek(*stack, i, N);
79
+ }
80
+ // treat the last N elements of the stack as a list, looking up the
81
+ // slice starting at index i and having length len
82
+ static inline at::ArrayRef<IValue> peekSlice(
83
+ const Stack& stack,
84
+ size_t i,
85
+ size_t len,
86
+ size_t N) {
87
+ return at::ArrayRef<IValue>(stack).slice(stack.size() - N + i, len);
88
+ }
89
+ static inline at::ArrayRef<IValue> last(const Stack& stack, size_t N) {
90
+ return peekSlice(stack, 0, N, N);
91
+ }
92
+ static inline at::ArrayRef<IValue> last(const Stack* stack, size_t N) {
93
+ return last(*stack, N);
94
+ }
95
+ static inline void drop(Stack& stack, size_t n) {
96
+ stack.erase(stack.end() - n, stack.end());
97
+ }
98
+ static inline void drop(Stack* stack, size_t n) {
99
+ drop(*stack, n);
100
+ }
101
+ static inline IValue pop(Stack& stack) {
102
+ auto r = std::move(stack.back());
103
+ stack.pop_back();
104
+ return r;
105
+ }
106
+ static inline IValue pop(Stack* stack) {
107
+ return pop(*stack);
108
+ }
109
+ static inline std::vector<IValue> pop(Stack& stack, size_t n) {
110
+ std::vector<IValue> result;
111
+ result.reserve(n);
112
+ for (const auto i : c10::irange(n)) {
113
+ result.push_back(std::move(peek(stack, i, n)));
114
+ }
115
+ drop(stack, n);
116
+ return result;
117
+ }
118
+
119
+ // variadic pop:
120
+ // int64_t a; at::Tensor b;
121
+ // pop(stack, a, b);
122
+ // equivalent to:
123
+ // b = pop(stack).toTensor();
124
+ // a = pop(stack).toInt();
125
+ template <typename... Types>
126
+ static inline void pop(Stack& stack, Types&... args) {
127
+ size_t i = 0;
128
+ constexpr size_t N = sizeof...(args);
129
+ (void)std::initializer_list<int>{
130
+ (args = std::move(peek(stack, i++, N)).template to<Types>(), 0)...};
131
+ drop(stack, N);
132
+ }
133
+ template <typename... Types>
134
+ static inline void pop(Stack* stack, Types&... args) {
135
+ pop(*stack, args...);
136
+ }
137
+ template <typename Type>
138
+ static inline void push_one(Stack& stack, Type&& arg) {
139
+ stack.emplace_back(std::forward<Type>(arg));
140
+ }
141
+
142
+ static inline void push_one(Stack& stack, c10::TensorOptions options) {
143
+ stack.emplace_back(c10::typeMetaToScalarType(options.dtype()));
144
+ stack.emplace_back(options.layout());
145
+ stack.emplace_back(options.device());
146
+ stack.emplace_back(options.pinned_memory());
147
+ }
148
+
149
+ template <typename... Types>
150
+ static inline void push(Stack& stack, Types&&... args) {
151
+ (void)std::initializer_list<int>{(push_one(stack, std::forward<Types>(args)), 0)...};
152
+ }
153
+ template <typename... Types>
154
+ static inline void push(Stack* stack, Types&&... args) {
155
+ return push(*stack, std::forward<Types>(args)...);
156
+ }
157
+ template <class T>
158
+ static inline void push_list_elements(Stack& stack, const c10::List<T>& elements) {
159
+ for (T elem : elements) {
160
+ stack.push_back(std::move(elem));
161
+ }
162
+ }
163
+
164
+ // The packer here is carefully written not to make any unnecessary
165
+ // copies.
166
+
167
+ // pack takes the return values of aten functions pushes them onto the stack
168
+ template <typename T>
169
+ inline void pack(Stack& stack, T&& v) {
170
+ stack.emplace_back(std::forward<T>(v));
171
+ }
172
+ template <typename T>
173
+ inline void pack(Stack* stack, T&& v) {
174
+ pack(*stack, std::forward<T>(v));
175
+ }
176
+
177
+ template <std::size_t remaining, typename... Args>
178
+ struct TuplePacker {
179
+ // NB: *Not* a universal reference.
180
+ static void execute(Stack& stack, std::tuple<Args...>&& t) {
181
+ // NB: The move here does not "destroy" the entire tuple, that is
182
+ // not what std::move does; only the particular tuple index
183
+ // processed here gets stolen.
184
+ pack(stack, std::get<sizeof...(Args) - remaining>(std::move(t)));
185
+ TuplePacker<remaining - 1, Args...>::execute(stack, std::move(t));
186
+ }
187
+ };
188
+
189
+ template <typename... Args>
190
+ struct TuplePacker<0, Args...> {
191
+ static void execute(Stack& /*stack*/, std::tuple<Args...>&& /*t*/){};
192
+ };
193
+
194
+ template <typename... Args>
195
+ inline void pack(Stack& stack, std::tuple<Args...>&& t) {
196
+ TuplePacker<sizeof...(Args), Args...>::execute(stack, std::move(t));
197
+ }
198
+
199
+ } // namespace jit
200
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/symbol.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <cstdint>
4
+ #include <functional> // For std::hash
5
+ #include <string>
6
+
7
+
8
+ namespace c10 {
9
+
10
+ // 'prim' symbols are synthetic operators that occur only in the IR
11
+ // and don't have corresponding implementations in ATen.
12
+
13
+ // 'onnx' symbols correspond to ONNX operators. Their semantics
14
+ // are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md
15
+ // The particular version we are targeting is specified by '_onnx_opset_version'
16
+ // in torch.onnx.symbolic_helper
17
+ //
18
+ // In general, most ONNX operators won't get an entry here, because they
19
+ // are handled from the Python end. However, you may occasionally need
20
+ // to intern an ONNX symbol here so that you can conveniently write an
21
+ // optimization on ONNX operations.
22
+
23
+ // 'attr' symbols are attribute keys. They are shared between both ONNX and ATen
24
+ // operators (you disambiguate their meaning by looking at the operator itself).
25
+ // In general, you only need to define attribute keys that are used by
26
+ // onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS.
27
+
28
+ // Note [Symbol allocation]
29
+ // ~~~~~~~~~~~~~~~~~~~~~~~~
30
+ //
31
+ // 1. Symbol namespace is split up into namespaces.
32
+ //
33
+ // 2. The intended access pattern for built-in symbols is onnx::MatMul
34
+ // in the c10 namespace (this is a Symbol).
35
+ //
36
+
37
+ // Built-in constant definition strategy:
38
+ // - Enum is the most convenient way to generate a contiguous sequence
39
+ // of numbers for an identifier.
40
+ // - However, an enum gives you a fresh type. We want onnx::MatMul to
41
+ // be type Symbol, not some random enum type!
42
+ // - Therefore, after using enums to generate the sequence of integers,
43
+ // we then declare constexpr Symbols to get everything the actual Symbol
44
+ // type we want. Symbols must be constexpr to be valid to be "case"ed on.
45
+
46
+ using unique_t = uint32_t;
47
+
48
+ const std::string& domain_prefix();
49
+
50
+ // A Symbol is like an interned string, but with a little extra
51
+ // structure; it is namespaced via SymbolNamespace and the resulting
52
+ // intern pointers support efficient namespace testing.
53
+ struct TORCH_API Symbol {
54
+ explicit constexpr Symbol() : value(0) {};
55
+ explicit constexpr Symbol(unique_t uniq)
56
+ : value(uniq) {}
57
+
58
+ // Get a Symbol for a qualified string like "attr::bar"
59
+ static Symbol fromQualString(const std::string & s);
60
+
61
+ // Get a Symbol from a domain and an unqualified string like "org.pytorch.attr" and "bar"
62
+ static Symbol fromDomainAndUnqualString(const std::string & d, const std::string & s);
63
+
64
+ // Constructors for our various namespaced strings. This will construct
65
+ // the appropriate namespaced string, e.g., "attr::foo" for the
66
+ // argument "foo", and then attempt to intern it. DO NOT USE THIS
67
+ // with a string literal; attr::foo should be available in that case
68
+ // (and if it's not, you should add it to the built-ins list above.)
69
+ static Symbol attr(const std::string & s);
70
+ static Symbol aten(const std::string & s);
71
+ static Symbol cuda(const std::string & s);
72
+ static Symbol onnx(const std::string & s);
73
+ static Symbol prim(const std::string & s);
74
+ static Symbol user(const std::string & s);
75
+ static Symbol caffe2(const std::string & s);
76
+ static Symbol dimname(const std::string & s);
77
+ // TODO: eliminate me
78
+ static Symbol scope(const std::string & s);
79
+
80
+ bool is_attr() const;
81
+ bool is_aten() const;
82
+ bool is_cuda() const;
83
+ bool is_prim() const;
84
+ bool is_prims() const;
85
+ bool is_nvprims() const;
86
+ bool is_onnx() const;
87
+ bool is_user() const;
88
+ bool is_caffe2() const;
89
+ bool is_dimname() const;
90
+
91
+ // So we can switch on this
92
+ constexpr operator unique_t() const {
93
+ return value;
94
+ }
95
+
96
+ Symbol ns() const;
97
+
98
+ // Give a string corresponding to the unqualified version of this name, e.g.,
99
+ // "mm". Use this in a context where the intended namespace of the string is
100
+ // obvious; this is a *lossy* conversion.
101
+ const char * toUnqualString() const;
102
+
103
+ // Give a string corresponding to the qualified version of this name,
104
+ // e.g., "aten::mm". This string format is made available to Python bindings
105
+ // (so we know how to parse it.)
106
+ const char * toQualString() const;
107
+
108
+ // This describes a symbol in a case where humans read it. At the moment it's
109
+ // the same as toQualString. This has to be a const char* returned because
110
+ // a lot of printf style macros use it.
111
+ const char * toDisplayString() const;
112
+
113
+ // Give a string corresponding to the domain name for the symbol,
114
+ // e.g., "org.pytorch.aten".
115
+ std::string domainString() const;
116
+
117
+ private:
118
+
119
+ explicit Symbol(Symbol ns, const std::string & s);
120
+ unique_t value;
121
+ };
122
+
123
+ static inline bool operator==(Symbol lhs, Symbol rhs) {
124
+ return static_cast<unique_t>(lhs) == static_cast<unique_t>(rhs);
125
+ }
126
+
127
+ inline Symbol Symbol::attr(const std::string & s) { return Symbol::fromQualString("attr::" + s); }
128
+ inline Symbol Symbol::aten(const std::string & s) { return Symbol::fromQualString("aten::" + s); }
129
+ inline Symbol Symbol::cuda(const std::string & s) { return Symbol::fromQualString("cuda::" + s); }
130
+ inline Symbol Symbol::onnx(const std::string & s) { return Symbol::fromQualString("onnx::" + s); }
131
+ inline Symbol Symbol::prim(const std::string & s) { return Symbol::fromQualString("prim::" + s); }
132
+ inline Symbol Symbol::scope(const std::string & s) { return Symbol::fromQualString("scope::" + s); }
133
+ inline Symbol Symbol::user(const std::string & s) { return Symbol::fromQualString("user::" + s); }
134
+ inline Symbol Symbol::caffe2(const std::string & s) { return Symbol::fromQualString("_caffe2::" + s); }
135
+ inline Symbol Symbol::dimname(const std::string & s) { return Symbol::fromQualString("dimname::" + s); }
136
+
137
+ } // namespace c10
138
+
139
+ // make symbol behave like an integer in hash tables
140
+ namespace std {
141
+ template <>
142
+ struct hash<c10::Symbol> {
143
+ size_t operator()(c10::Symbol s) const {
144
+ return std::hash<uint32_t>()(static_cast<uint32_t>(s));
145
+ }
146
+ };
147
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cuda_runtime.h>
5
+ #include <cuda_fp16.h>
6
+
7
+ #include <c10/macros/Export.h>
8
+
9
+ // Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ // Collection of direct PTX functions
5
+
6
+ namespace at::cuda {
7
+
8
+ template <typename T>
9
+ struct Bitfield {};
10
+
11
+ template <>
12
+ struct Bitfield<unsigned int> {
13
+ static __device__ __host__ __forceinline__
14
+ unsigned int getBitfield(unsigned int val, int pos, int len) {
15
+ #if !defined(__CUDA_ARCH__)
16
+ pos &= 0xff;
17
+ len &= 0xff;
18
+
19
+ unsigned int m = (1u << len) - 1u;
20
+ return (val >> pos) & m;
21
+ #else
22
+ unsigned int ret;
23
+ asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
24
+ return ret;
25
+ #endif
26
+ }
27
+
28
+ static __device__ __host__ __forceinline__
29
+ unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
30
+ #if !defined(__CUDA_ARCH__)
31
+ pos &= 0xff;
32
+ len &= 0xff;
33
+
34
+ unsigned int m = (1u << len) - 1u;
35
+ toInsert &= m;
36
+ toInsert <<= pos;
37
+ m <<= pos;
38
+
39
+ return (val & ~m) | toInsert;
40
+ #else
41
+ unsigned int ret;
42
+ asm("bfi.b32 %0, %1, %2, %3, %4;" :
43
+ "=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
44
+ return ret;
45
+ #endif
46
+ }
47
+ };
48
+
49
+ template <>
50
+ struct Bitfield<uint64_t> {
51
+ static __device__ __host__ __forceinline__
52
+ uint64_t getBitfield(uint64_t val, int pos, int len) {
53
+ #if !defined(__CUDA_ARCH__)
54
+ pos &= 0xff;
55
+ len &= 0xff;
56
+
57
+ uint64_t m = (1u << len) - 1u;
58
+ return (val >> pos) & m;
59
+ #else
60
+ uint64_t ret;
61
+ asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len));
62
+ return ret;
63
+ #endif
64
+ }
65
+
66
+ static __device__ __host__ __forceinline__
67
+ uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) {
68
+ #if !defined(__CUDA_ARCH__)
69
+ pos &= 0xff;
70
+ len &= 0xff;
71
+
72
+ uint64_t m = (1u << len) - 1u;
73
+ toInsert &= m;
74
+ toInsert <<= pos;
75
+ m <<= pos;
76
+
77
+ return (val & ~m) | toInsert;
78
+ #else
79
+ uint64_t ret;
80
+ asm("bfi.b64 %0, %1, %2, %3, %4;" :
81
+ "=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len));
82
+ return ret;
83
+ #endif
84
+ }
85
+ };
86
+
87
+ __device__ __forceinline__ int getLaneId() {
88
+ #if defined(USE_ROCM)
89
+ return __lane_id();
90
+ #else
91
+ int laneId;
92
+ asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
93
+ return laneId;
94
+ #endif
95
+ }
96
+
97
+ #if defined(USE_ROCM)
98
+ __device__ __forceinline__ unsigned long long int getLaneMaskLt() {
99
+ const std::uint64_t m = (1ull << getLaneId()) - 1ull;
100
+ return m;
101
+ }
102
+ #else
103
+ __device__ __forceinline__ unsigned getLaneMaskLt() {
104
+ unsigned mask;
105
+ asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask));
106
+ return mask;
107
+ }
108
+ #endif
109
+
110
+ #if defined (USE_ROCM)
111
+ __device__ __forceinline__ unsigned long long int getLaneMaskLe() {
112
+ std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1));
113
+ return m;
114
+ }
115
+ #else
116
+ __device__ __forceinline__ unsigned getLaneMaskLe() {
117
+ unsigned mask;
118
+ asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask));
119
+ return mask;
120
+ }
121
+ #endif
122
+
123
+ #if defined(USE_ROCM)
124
+ __device__ __forceinline__ unsigned long long int getLaneMaskGt() {
125
+ const std::uint64_t m = getLaneMaskLe();
126
+ return m ? ~m : m;
127
+ }
128
+ #else
129
+ __device__ __forceinline__ unsigned getLaneMaskGt() {
130
+ unsigned mask;
131
+ asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask));
132
+ return mask;
133
+ }
134
+ #endif
135
+
136
+ #if defined(USE_ROCM)
137
+ __device__ __forceinline__ unsigned long long int getLaneMaskGe() {
138
+ const std::uint64_t m = getLaneMaskLt();
139
+ return ~m;
140
+ }
141
+ #else
142
+ __device__ __forceinline__ unsigned getLaneMaskGe() {
143
+ unsigned mask;
144
+ asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask));
145
+ return mask;
146
+ }
147
+ #endif
148
+
149
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ApplyGridUtils.cuh>
4
+ #include <ATen/cuda/detail/IndexUtils.cuh>
5
+ #include <ATen/core/TensorBase.h>
6
+ #include <ATen/ceil_div.h>
7
+ #include <ATen/cuda/Atomic.cuh>
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <c10/macros/Macros.h>
10
+ #include <ATen/native/Copy.h>
11
+
12
+ #include <math.h>
13
+
14
+ //
15
+ // This file contains pointwise operation functions and kernels that
16
+ // work on both contiguous and non-contiguous tensor arguments of
17
+ // arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without
18
+ // copying or temporary storage.
19
+ //
20
+
21
+ /*
22
+ NOTE [ CUDA_tensor_applyN helpers ]
23
+
24
+ The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4)
25
+ functions apply a pointwise operator to N tensor(s).
26
+
27
+ The calling convention is
28
+
29
+ 1. The template arguments should be, sequentially,
30
+ - First N typename args specify the scalar types of each of the N tensors.
31
+ - (Optional) `int step` arg specifies the number of elements processed
32
+ together at the same time.
33
+ Default is 1.
34
+ - A usually omitted (i.e., inferred) typename arg specifies the type of the
35
+ function/functor applied on `N * step` values in each iteration of each
36
+ CUDA thread.
37
+ 2. The arguments should be, sequentially,
38
+ - N tensors
39
+ - op: a function/functor that processes `N * step` values at the same time.
40
+ - If `step == 1`, it must have signature
41
+ `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where
42
+ `scalar*_t`s are the first N typename template args, and the inputs
43
+ are the `N` values from the `N` tensors retrieved at a common index.
44
+ - Otherwise, it must must have signature
45
+ void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times
46
+ scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times
47
+ ...,
48
+ scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times
49
+ Different from `step == 1` case, it processes `N * step` values taken
50
+ from `step` common indices. Moreover, the first input `n` represents the
51
+ number of valid indices (it will always have `0 < n <= step`). It will
52
+ almost always be `step`, but at the boundary we may not have full `step`
53
+ elements and `n` can be a lesser value.
54
+
55
+ E.g., if `step == 4` and `N == 2`, `op` could be
56
+
57
+ [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4,
58
+ scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) {
59
+ // Only process u1, ..., un and v1, ..., vn.
60
+ // So if `n == 3`, `u4` and `v4` need not to be considered.
61
+ }
62
+
63
+ In both cases, the references can actually be const, but at least one of
64
+ them should be non-const in order to write the output.
65
+ - (Optional, but recommended) N TensorArgType args that specify for each
66
+ tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite),
67
+ or only reads (i.e., TensorArgType::ReadOnly).
68
+ Default is TensorArgType::ReadWrite for first Tensor, and
69
+ TensorArgType::ReadOnly for the rest.
70
+
71
+ E.g.,
72
+
73
+ to compute a = b^2 for a and b of same dtype, we can call
74
+
75
+ CUDA_tensor_apply2<scalar, scalar>(
76
+ a, b,
77
+ [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; }
78
+ );
79
+
80
+ to work on 2 values at the same time, we can call
81
+
82
+ CUDA_tensor_apply2<scalar1, scalar2, 2>(
83
+ a, b,
84
+ [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2,
85
+ const scalar2 &b_val1, const scalar2 &b_val2) {
86
+ // call special vectorized op here, or just do elementwise and enjoy unrolling...
87
+ // if n == 1, only process a_val1 and b_val1
88
+ }
89
+ );
90
+ */
91
+
92
+ namespace at::cuda {
93
+
94
+ // TODO: combine with TensorArg? So far that's been for debugging, and this is functional...
95
+ enum class TensorArgType { ReadWrite, ReadOnly };
96
+
97
+ namespace {
98
+
99
+ // Rearrange dimensions for pointwise operations so that strides are in
100
+ // decreasing order as much as possible, so that kernels have better memory
101
+ // access patterns.
102
+ //
103
+ // For example, consider a binary operation on two "transposed" 2-dim tensors:
104
+ // sizes: 256 512
105
+ // aInfo->strides: 1 256
106
+ // bInfo->strides: 1 256
107
+ //
108
+ // Given this, each concurrent memory access inside kernelPointwiseApply2() is
109
+ // exactly 256 elements apart, resulting in poor performance.
110
+ //
111
+ // This function exchanges dimensions so that memory access is contiguous:
112
+ // sizes: 512 256
113
+ // aInfo->strides: 256 1
114
+ // bInfo->strides: 256 1
115
+ //
116
+ // (Actually, it becomes even better because now collapseDims() can turn each
117
+ // input into one contiguous array.)
118
+ //
119
+ // In general, given M (<=4) TensorInfo's with N dimensions, we can view each
120
+ // strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange
121
+ // strides[i] and [j] if
122
+ // (1) strides[i][k] < strides[j][k] for some k (0 <= k < M)
123
+ // (exchanging them will benefit input #k), and
124
+ // (2) strides[i][k] <= strieds[j][k] for all k
125
+ // (exchanging them will not make any input worse).
126
+ template <typename T1, typename IndexType,
127
+ typename T2 = void, typename T3 = void, typename T4 = void>
128
+ inline void rearrangeDims(detail::TensorInfo<T1, IndexType>* aInfo,
129
+ detail::TensorInfo<T2, IndexType>* bInfo = nullptr,
130
+ detail::TensorInfo<T3, IndexType>* cInfo = nullptr,
131
+ detail::TensorInfo<T4, IndexType>* dInfo = nullptr) {
132
+ int numInfos = 1;
133
+ int dims = aInfo->dims;
134
+ IndexType *sizes[4] = { aInfo->sizes, };
135
+ IndexType *strides[4] = { aInfo->strides, };
136
+
137
+ if (bInfo != nullptr) {
138
+ ++numInfos;
139
+ if (bInfo->dims != dims) return;
140
+ sizes[1] = bInfo->sizes;
141
+ strides[1] = bInfo->strides;
142
+ }
143
+
144
+ if (cInfo != nullptr) {
145
+ ++numInfos;
146
+ if (cInfo->dims != dims) return;
147
+ sizes[2] = cInfo->sizes;
148
+ strides[2] = cInfo->strides;
149
+ }
150
+
151
+ if (dInfo != nullptr) {
152
+ ++numInfos;
153
+ if (dInfo->dims != dims) return;
154
+ sizes[3] = dInfo->sizes;
155
+ strides[3] = dInfo->strides;
156
+ }
157
+
158
+ // Bail out if sizes do not match: we are using "deprecated pointwise
159
+ // behavior" among tensors of different shapes but same number of elements.
160
+ for (int i = 1; i < numInfos; ++i) {
161
+ for (int j = 0; j < dims; ++j) {
162
+ if (sizes[i][j] != sizes[0][j]) return;
163
+ }
164
+ }
165
+
166
+ for (int i = 0; i < dims - 1; ++i) {
167
+ // No need to consider dimensions of size 1.
168
+ if (sizes[0][i] == 1) continue;
169
+
170
+ for (int j = i + 1; j < dims; ++j) {
171
+ if (sizes[0][j] == 1) continue;
172
+
173
+ // Compare the relative sizes of strides between dim #i and dim #j.
174
+ bool hasIncreasingStrides = false;
175
+ bool hasDecreasingStrides = false;
176
+
177
+ for (int k = 0; k < numInfos; k++) {
178
+ IndexType stride_i = strides[k][i];
179
+ IndexType stride_j = strides[k][j];
180
+ if (stride_i < stride_j) {
181
+ hasIncreasingStrides = true;
182
+ } else if (stride_i > stride_j) {
183
+ hasDecreasingStrides = true;
184
+ }
185
+ }
186
+
187
+ if (hasIncreasingStrides && !hasDecreasingStrides) {
188
+ for (int k = 0; k < numInfos; k++) {
189
+ IndexType size = sizes[k][i];
190
+ sizes[k][i] = sizes[k][j];
191
+ sizes[k][j] = size;
192
+
193
+ IndexType stride = strides[k][i];
194
+ strides[k][i] = strides[k][j];
195
+ strides[k][j] = stride;
196
+ }
197
+ }
198
+ }
199
+ }
200
+ }
201
+
202
+ // The `remaining_steps` argument is used to support Op that operates on
203
+ // multiple elements at the same time. Generally, the strategy of ApplyOpN is to
204
+ // 1. Initialize `remaining_steps = step`, where `step` is the template arg of
205
+ // CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the
206
+ // number of elements in bound for this call. It will almost always equal to
207
+ // `step` except at boundaries.
208
+ // 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in
209
+ // bound), and recursively call `ApplyOpN` with `remaining_steps - 1`.
210
+ // 3. At `remaining_steps = 0`,
211
+ // if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`;
212
+ // if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep,
213
+ // tensor2_val1, tensor2_val2, ..., tesor2_valstep,
214
+ // ...
215
+ // tensorN_val1, tensorN_val2, ..., tesorN_valstep);`
216
+ //
217
+ // See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like.
218
+
219
+ template <typename Op,
220
+ typename scalar,
221
+ typename IndexType,
222
+ int ADims,
223
+ int remaining_steps,
224
+ typename... Offsets>
225
+ struct ApplyOp1 {
226
+ __device__ __forceinline__
227
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
228
+ IndexType linearIndex, Offsets... aOffsets) {
229
+ // Convert `linearIndex` into an offset of `a`
230
+ const IndexType aOffset = sizeof...(Offsets) < n ?
231
+ detail::IndexToOffset<scalar, IndexType, ADims>::get(linearIndex, a) : 0;
232
+
233
+ ApplyOp1<Op, scalar, IndexType, ADims, remaining_steps - 1, const IndexType, Offsets...>::apply(
234
+ a, op, n, linearIndex + 1, aOffsets..., aOffset
235
+ );
236
+ }
237
+ };
238
+
239
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
240
+ // We don't need to pass in how many elements need to processed in this case.
241
+ template <typename Op,
242
+ typename scalar,
243
+ typename IndexType,
244
+ int ADims,
245
+ typename Offset>
246
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offset> {
247
+ __device__ __forceinline__
248
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op,
249
+ int n, IndexType linearIndex, Offset offset) {
250
+ op(a.data[offset]);
251
+ }
252
+ };
253
+
254
+ template <typename Op,
255
+ typename scalar,
256
+ typename IndexType,
257
+ int ADims,
258
+ typename... Offsets>
259
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offsets...> {
260
+ __device__ __forceinline__
261
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
262
+ IndexType linearIndex, Offsets... offsets) {
263
+ op(n, a.data[offsets]...);
264
+ }
265
+ };
266
+
267
+ template <typename Op,
268
+ typename scalar,
269
+ typename IndexType,
270
+ int ADims,
271
+ int step>
272
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
273
+ C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
274
+ #endif
275
+ __global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
276
+ IndexType totalElements, const Op op) {
277
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
278
+ linearIndex < totalElements;
279
+ linearIndex += gridDim.x * blockDim.x * step) {
280
+ ApplyOp1<Op, scalar, IndexType, ADims, step>::apply(
281
+ a, op, ::min(step, static_cast<int>(totalElements - linearIndex)), linearIndex);
282
+ }
283
+ }
284
+
285
+
286
+ template <typename Op,
287
+ typename scalar1,
288
+ typename scalar2,
289
+ typename IndexType,
290
+ int ADims,
291
+ int BDims,
292
+ int remaining_steps,
293
+ typename... Offsets>
294
+ struct ApplyOp2 {
295
+ __device__ __forceinline__
296
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
297
+ detail::TensorInfo<scalar2, IndexType> &b,
298
+ const Op &op, int64_t n, IndexType linearIndex,
299
+ Offsets... aOffsets, Offsets... bOffsets) {
300
+ // Convert `linearIndex` into an offset of `a`
301
+ const IndexType aOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
302
+ detail::IndexToOffset<scalar1, IndexType, ADims>::get(linearIndex, a) : 0;
303
+
304
+ // Convert `linearIndex` into an offset of `b`
305
+ const IndexType bOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
306
+ detail::IndexToOffset<scalar2, IndexType, BDims>::get(linearIndex, b) : 0;
307
+
308
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, remaining_steps - 1, const IndexType, Offsets...>::apply(
309
+ a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset
310
+ );
311
+ }
312
+ };
313
+
314
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
315
+ // We don't need to pass in how many elements need to processed in this case.
316
+ template <typename Op,
317
+ typename scalar1,
318
+ typename scalar2,
319
+ typename IndexType,
320
+ int ADims,
321
+ int BDims,
322
+ typename Offset>
323
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offset> {
324
+ __device__ __forceinline__
325
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
326
+ detail::TensorInfo<scalar2, IndexType> &b,
327
+ const Op &op, int /*n*/, IndexType /*linearIndex*/,
328
+ Offset aOffset, Offset bOffset) {
329
+ op(a.data[aOffset], b.data[bOffset]);
330
+ }
331
+ };
332
+
333
+ template <typename Op,
334
+ typename scalar1,
335
+ typename scalar2,
336
+ typename IndexType,
337
+ int ADims,
338
+ int BDims,
339
+ typename... Offsets>
340
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offsets...> {
341
+ __device__ __forceinline__
342
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
343
+ detail::TensorInfo<scalar2, IndexType> &b,
344
+ const Op &op, int n, IndexType linearIndex,
345
+ Offsets... aOffsets, Offsets... bOffsets) {
346
+ op(n, a.data[aOffsets]..., b.data[bOffsets]...);
347
+ }
348
+ };
349
+
350
+ template <typename Op,
351
+ typename scalar1,
352
+ typename scalar2,
353
+ typename IndexType,
354
+ int ADims, int BDims,
355
+ int step,
356
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
357
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
358
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
359
+ C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm)
360
+ #endif
361
+ __global__ void
362
+ kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
363
+ detail::TensorInfo<scalar2, IndexType> b,
364
+ IndexType totalElements,
365
+ const Op op) {
366
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
367
+ linearIndex < totalElements;
368
+ linearIndex += gridDim.x * blockDim.x * step) {
369
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, step>::apply(
370
+ a, b, op, ::min(step, static_cast<int>(totalElements - linearIndex)),
371
+ linearIndex);
372
+ }
373
+ }
374
+
375
+ } // anonymous namespace
376
+
377
+ template <typename scalar1, typename scalar2, int step, typename Op,
378
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
379
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
380
+ inline bool CUDA_tensor_apply2(at::TensorBase a,
381
+ at::TensorBase b,
382
+ const Op op,
383
+ TensorArgType aType = TensorArgType::ReadWrite,
384
+ TensorArgType bType = TensorArgType::ReadOnly) {
385
+ TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(),
386
+ "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got "
387
+ "tensors with type ", a.device().type(), " and ", b.device().type());
388
+ int64_t totalElements = a.numel();
389
+
390
+ if (totalElements != b.numel()) {
391
+ return false;
392
+ }
393
+
394
+ if (a.dim() > MAX_TENSORINFO_DIMS ||
395
+ b.dim() > MAX_TENSORINFO_DIMS) {
396
+ return false;
397
+ }
398
+
399
+ if (a.numel() == 0) {
400
+ // Empty tensor; do nothing
401
+ return true;
402
+ }
403
+ const dim3 block = getApplyBlock(max_threads_per_block);
404
+
405
+ dim3 grid;
406
+ auto curDevice = current_device();
407
+ if (curDevice == -1) return false;
408
+ if (!getApplyGrid<step>(totalElements, grid, curDevice, max_threads_per_block)) {
409
+ return false;
410
+ }
411
+
412
+ /*
413
+ Expands readable/writable tensors whose indices may be "overlapped."
414
+ This ensures that each element of the tensor is operated on once and only
415
+ once.
416
+ */
417
+ TensorBase oldA;
418
+ TensorBase oldB;
419
+
420
+ if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) {
421
+ // Must perform in contiguous space
422
+ oldA = std::exchange(a, a.contiguous());
423
+ }
424
+ if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) {
425
+ // Must perform in contiguous space
426
+ oldB = std::exchange(b, b.contiguous());
427
+ }
428
+
429
+ // It is possible that the tensor dimensions are able to be collapsed,
430
+ // and thus we can reduce the actual code complexity of the copy by
431
+ // exploiting this knowledge statically, since the div/mod is the
432
+ // most expensive part of the operation, more so than memory accesses.
433
+ // For instance, when copying a non-contiguous to a contiguous tensor
434
+ // (or vice versa), the contiguous tensor can be collapsed to one
435
+ // dimension, and the loop to translate the linear index to the array
436
+ // index can be similarly collapsed. That is what this unrolling is for.
437
+
438
+ #define HANDLE_CASE(TYPE, A, B) \
439
+ kernelPointwiseApply2<Op, \
440
+ scalar1, \
441
+ scalar2, \
442
+ TYPE, A, B, step, \
443
+ max_threads_per_block, \
444
+ min_blocks_per_sm> \
445
+ <<<grid, block, 0, at::cuda::getCurrentCUDAStream(curDevice)>>>( \
446
+ aInfo, bInfo, static_cast<TYPE>(totalElements), op); \
447
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
448
+
449
+ #define HANDLE_B_CASE(TYPE, A, B) { \
450
+ switch (B) { \
451
+ case 1: \
452
+ HANDLE_CASE(TYPE, A, 1); \
453
+ break; \
454
+ case 2: \
455
+ HANDLE_CASE(TYPE, A, 2); \
456
+ break; \
457
+ default: \
458
+ HANDLE_CASE(TYPE, A, -1); \
459
+ break; \
460
+ } \
461
+ }
462
+
463
+ #define HANDLE_A_CASE(TYPE, A, B) { \
464
+ switch (A) { \
465
+ case 1: \
466
+ HANDLE_B_CASE(TYPE, 1, B); \
467
+ break; \
468
+ case 2: \
469
+ HANDLE_B_CASE(TYPE, 2, B); \
470
+ break; \
471
+ default: \
472
+ HANDLE_B_CASE(TYPE, -1, B); \
473
+ break; \
474
+ } \
475
+ }
476
+
477
+ if (detail::canUse32BitIndexMath(a) &&
478
+ detail::canUse32BitIndexMath(b)) {
479
+ detail::TensorInfo<scalar1, unsigned int> aInfo =
480
+ detail::getTensorInfo<scalar1, unsigned int>(a);
481
+
482
+ detail::TensorInfo<scalar2, unsigned int> bInfo =
483
+ detail::getTensorInfo<scalar2, unsigned int>(b);
484
+ rearrangeDims(&aInfo, &bInfo);
485
+ aInfo.collapseDims();
486
+ bInfo.collapseDims();
487
+
488
+ HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims);
489
+ } else {
490
+ detail::TensorInfo<scalar1, uint64_t> aInfo =
491
+ detail::getTensorInfo<scalar1, uint64_t>(a);
492
+
493
+ detail::TensorInfo<scalar2, uint64_t> bInfo =
494
+ detail::getTensorInfo<scalar2, uint64_t>(b);
495
+ rearrangeDims(&aInfo, &bInfo);
496
+ aInfo.collapseDims();
497
+ bInfo.collapseDims();
498
+
499
+ /*
500
+ Only instantiates the all 1D special case and the fallback all nD case for
501
+ large (64-bit indexed) tensors to reduce compilation time.
502
+ */
503
+ if (aInfo.dims == 1 && bInfo.dims == 1) {
504
+ HANDLE_CASE(uint64_t, 1, 1);
505
+ } else {
506
+ HANDLE_CASE(uint64_t, -1, -1);
507
+ }
508
+ }
509
+ #undef HANDLE_CASE
510
+ #undef HANDLE_B_CASE
511
+ #undef HANDLE_A_CASE
512
+
513
+ if (oldA.defined()) {
514
+ at::native::copy_ignoring_overlaps(oldA, a);
515
+ }
516
+
517
+ if (oldB.defined()) {
518
+ at::native::copy_ignoring_overlaps(oldB, b);
519
+ }
520
+
521
+ return true;
522
+ }
523
+
524
+ /* Provides default step = 1 to CUDA_tensor_apply2. */
525
+ template <typename scalar1, typename scalar2, typename Op,
526
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
527
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
528
+ inline bool CUDA_tensor_apply2(const at::TensorBase &a,
529
+ const at::TensorBase &b,
530
+ const Op op,
531
+ TensorArgType aType = TensorArgType::ReadWrite,
532
+ TensorArgType bType = TensorArgType::ReadOnly) {
533
+ return CUDA_tensor_apply2<scalar1, scalar2, 1, Op,
534
+ max_threads_per_block, min_blocks_per_sm>(a, b, op, aType, bType);
535
+ }
536
+
537
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Test these using #if AT_CUDNN_ENABLED(), not #ifdef, so that it's
4
+ // obvious if you forgot to include Config.h
5
+ // c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
6
+ //
7
+ // NB: This header MUST NOT be included from other headers; it should
8
+ // only be included from C++ files.
9
+ #define AT_CUDNN_ENABLED() 1
10
+ #define AT_CUSPARSELT_ENABLED() 1
11
+ #define AT_ROCM_ENABLED() 0
12
+ #define AT_MAGMA_ENABLED() 1
13
+
14
+ // Needed for hipMAGMA to correctly identify implementation
15
+ #if (AT_ROCM_ENABLED() && AT_MAGMA_ENABLED())
16
+ #define HAVE_HIP 1
17
+ #endif
18
+
19
+ #define NVCC_FLAGS_EXTRA "-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90"
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ATenCUDAGeneral.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <c10/core/impl/GPUTrace.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/cuda/CUDAGuard.h>
8
+ #include <ATen/cuda/Exceptions.h>
9
+ #include <c10/util/Exception.h>
10
+
11
+ #include <cuda_runtime_api.h>
12
+
13
+ #include <cstdint>
14
+ #include <utility>
15
+
16
+ namespace at::cuda {
17
+
18
+ /*
19
+ * CUDAEvents are movable not copyable wrappers around CUDA's events.
20
+ *
21
+ * CUDAEvents are constructed lazily when first recorded unless it is
22
+ * reconstructed from a cudaIpcEventHandle_t. The event has a device, and this
23
+ * device is acquired from the first recording stream. However, if reconstructed
24
+ * from a handle, the device should be explicitly specified; or if ipc_handle() is
25
+ * called before the event is ever recorded, it will use the current device.
26
+ * Later streams that record the event must match this device.
27
+ */
28
+ struct TORCH_CUDA_CPP_API CUDAEvent {
29
+ // Constructors
30
+ // Default value for `flags` is specified below - it's cudaEventDisableTiming
31
+ CUDAEvent() noexcept = default;
32
+ CUDAEvent(unsigned int flags) noexcept : flags_{flags} {}
33
+
34
+ CUDAEvent(
35
+ DeviceIndex device_index, const cudaIpcEventHandle_t* handle) {
36
+ device_index_ = device_index;
37
+ CUDAGuard guard(device_index_);
38
+
39
+ AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle));
40
+ is_created_ = true;
41
+ }
42
+
43
+ // Note: event destruction done on creating device to avoid creating a
44
+ // CUDA context on other devices.
45
+ ~CUDAEvent() {
46
+ try {
47
+ if (is_created_) {
48
+ CUDAGuard guard(device_index_);
49
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
50
+ if (C10_UNLIKELY(interp)) {
51
+ (*interp)->trace_gpu_event_deletion(reinterpret_cast<uintptr_t>(event_));
52
+ }
53
+ AT_CUDA_CHECK(cudaEventDestroy(event_));
54
+ }
55
+ } catch (...) { /* No throw */ }
56
+ }
57
+
58
+ CUDAEvent(const CUDAEvent&) = delete;
59
+ CUDAEvent& operator=(const CUDAEvent&) = delete;
60
+
61
+ CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); }
62
+ CUDAEvent& operator=(CUDAEvent&& other) noexcept {
63
+ if (this != &other) {
64
+ moveHelper(std::move(other));
65
+ }
66
+ return *this;
67
+ }
68
+
69
+ operator cudaEvent_t() const { return event(); }
70
+
71
+ // Less than operator (to allow use in sets)
72
+ friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) {
73
+ return left.event_ < right.event_;
74
+ }
75
+
76
+ optional<at::Device> device() const {
77
+ if (is_created_) {
78
+ return at::Device(at::kCUDA, device_index_);
79
+ } else {
80
+ return {};
81
+ }
82
+ }
83
+
84
+ bool isCreated() const { return is_created_; }
85
+ DeviceIndex device_index() const {return device_index_;}
86
+ cudaEvent_t event() const { return event_; }
87
+
88
+ // Note: cudaEventQuery can be safely called from any device
89
+ bool query() const {
90
+ if (!is_created_) {
91
+ return true;
92
+ }
93
+
94
+ cudaError_t err = cudaEventQuery(event_);
95
+ if (err == cudaSuccess) {
96
+ return true;
97
+ } else if (err != cudaErrorNotReady) {
98
+ C10_CUDA_CHECK(err);
99
+ } else {
100
+ // ignore and clear the error if not ready
101
+ (void)cudaGetLastError();
102
+ }
103
+
104
+ return false;
105
+ }
106
+
107
+ void record() { record(getCurrentCUDAStream()); }
108
+
109
+ void recordOnce(const CUDAStream& stream) {
110
+ if (!was_recorded_) record(stream);
111
+ }
112
+
113
+ // Note: cudaEventRecord must be called on the same device as the event.
114
+ void record(const CUDAStream& stream) {
115
+ if (!is_created_) {
116
+ createEvent(stream.device_index());
117
+ }
118
+
119
+ TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_,
120
+ " does not match recording stream's device ", stream.device_index(), ".");
121
+ CUDAGuard guard(device_index_);
122
+ AT_CUDA_CHECK(cudaEventRecord(event_, stream));
123
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
124
+ if (C10_UNLIKELY(interp)) {
125
+ (*interp)->trace_gpu_event_record(
126
+ reinterpret_cast<uintptr_t>(event_),
127
+ reinterpret_cast<uintptr_t>(stream.stream())
128
+ );
129
+ }
130
+ was_recorded_ = true;
131
+ }
132
+
133
+ // Note: cudaStreamWaitEvent must be called on the same device as the stream.
134
+ // The event has no actual GPU resources associated with it.
135
+ void block(const CUDAStream& stream) {
136
+ if (is_created_) {
137
+ CUDAGuard guard(stream.device_index());
138
+ AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0));
139
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
140
+ if (C10_UNLIKELY(interp)) {
141
+ (*interp)->trace_gpu_event_wait(
142
+ reinterpret_cast<uintptr_t>(event_),
143
+ reinterpret_cast<uintptr_t>(stream.stream())
144
+ );
145
+ }
146
+ }
147
+ }
148
+
149
+ // Note: cudaEventElapsedTime can be safely called from any device
150
+ float elapsed_time(const CUDAEvent& other) const {
151
+ TORCH_CHECK(is_created_ && other.isCreated(),
152
+ "Both events must be recorded before calculating elapsed time.");
153
+ float time_ms = 0;
154
+ // raise cudaErrorNotReady if either event is recorded but not yet completed
155
+ AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_));
156
+ return time_ms;
157
+ }
158
+
159
+ // Note: cudaEventSynchronize can be safely called from any device
160
+ void synchronize() const {
161
+ if (is_created_) {
162
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
163
+ if (C10_UNLIKELY(interp)) {
164
+ (*interp)->trace_gpu_event_synchronization(reinterpret_cast<uintptr_t>(event_));
165
+ }
166
+ AT_CUDA_CHECK(cudaEventSynchronize(event_));
167
+ }
168
+ }
169
+
170
+ // Note: cudaIpcGetEventHandle must be called on the same device as the event
171
+ void ipc_handle(cudaIpcEventHandle_t * handle) {
172
+ if (!is_created_) {
173
+ // this CUDAEvent object was initially constructed from flags but event_
174
+ // is not created yet.
175
+ createEvent(getCurrentCUDAStream().device_index());
176
+ }
177
+ CUDAGuard guard(device_index_);
178
+ AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_));
179
+ }
180
+
181
+ private:
182
+ unsigned int flags_ = cudaEventDisableTiming;
183
+ bool is_created_ = false;
184
+ bool was_recorded_ = false;
185
+ DeviceIndex device_index_ = -1;
186
+ cudaEvent_t event_{};
187
+
188
+ void createEvent(DeviceIndex device_index) {
189
+ device_index_ = device_index;
190
+ CUDAGuard guard(device_index_);
191
+ AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_));
192
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
193
+ if (C10_UNLIKELY(interp)) {
194
+ (*interp)->trace_gpu_event_creation(reinterpret_cast<uintptr_t>(event_));
195
+ }
196
+ is_created_ = true;
197
+ }
198
+
199
+ void moveHelper(CUDAEvent&& other) {
200
+ std::swap(flags_, other.flags_);
201
+ std::swap(is_created_, other.is_created_);
202
+ std::swap(was_recorded_, other.was_recorded_);
203
+ std::swap(device_index_, other.device_index_);
204
+ std::swap(event_, other.event_);
205
+ }
206
+ };
207
+
208
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/cuda/CUDASparse.h>
6
+
7
+ #include <c10/core/ScalarType.h>
8
+
9
+ #if defined(USE_ROCM)
10
+ #include <type_traits>
11
+ #endif
12
+
13
+ namespace at::cuda::sparse {
14
+
15
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
16
+ struct CuSparseDescriptorDeleter {
17
+ void operator()(T* x) {
18
+ if (x != nullptr) {
19
+ TORCH_CUDASPARSE_CHECK(destructor(x));
20
+ }
21
+ }
22
+ };
23
+
24
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
25
+ class CuSparseDescriptor {
26
+ public:
27
+ T* descriptor() const {
28
+ return descriptor_.get();
29
+ }
30
+ T* descriptor() {
31
+ return descriptor_.get();
32
+ }
33
+
34
+ protected:
35
+ std::unique_ptr<T, CuSparseDescriptorDeleter<T, destructor>> descriptor_;
36
+ };
37
+
38
+ #if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
39
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
40
+ struct ConstCuSparseDescriptorDeleter {
41
+ void operator()(T* x) {
42
+ if (x != nullptr) {
43
+ TORCH_CUDASPARSE_CHECK(destructor(x));
44
+ }
45
+ }
46
+ };
47
+
48
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
49
+ class ConstCuSparseDescriptor {
50
+ public:
51
+ T* descriptor() const {
52
+ return descriptor_.get();
53
+ }
54
+ T* descriptor() {
55
+ return descriptor_.get();
56
+ }
57
+
58
+ protected:
59
+ std::unique_ptr<T, ConstCuSparseDescriptorDeleter<T, destructor>> descriptor_;
60
+ };
61
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS
62
+
63
+ #if defined(USE_ROCM)
64
+ using cusparseMatDescr = std::remove_pointer<hipsparseMatDescr_t>::type;
65
+ using cusparseDnMatDescr = std::remove_pointer<hipsparseDnMatDescr_t>::type;
66
+ using cusparseDnVecDescr = std::remove_pointer<hipsparseDnVecDescr_t>::type;
67
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
68
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
69
+ using cusparseSpGEMMDescr = std::remove_pointer<hipsparseSpGEMMDescr_t>::type;
70
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
71
+ using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type;
72
+ using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type;
73
+ #endif
74
+ #endif
75
+
76
+ // NOTE: This is only needed for CUDA 11 and earlier, since CUDA 12 introduced
77
+ // API for const descriptors
78
+ cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr);
79
+
80
+ class TORCH_CUDA_CPP_API CuSparseMatDescriptor
81
+ : public CuSparseDescriptor<cusparseMatDescr, &cusparseDestroyMatDescr> {
82
+ public:
83
+ CuSparseMatDescriptor() {
84
+ cusparseMatDescr_t raw_descriptor;
85
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
86
+ descriptor_.reset(raw_descriptor);
87
+ }
88
+
89
+ CuSparseMatDescriptor(bool upper, bool unit) {
90
+ cusparseFillMode_t fill_mode =
91
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
92
+ cusparseDiagType_t diag_type =
93
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
94
+ cusparseMatDescr_t raw_descriptor;
95
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
96
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode));
97
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type));
98
+ descriptor_.reset(raw_descriptor);
99
+ }
100
+ };
101
+
102
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
103
+
104
+ class TORCH_CUDA_CPP_API CuSparseBsrsv2Info
105
+ : public CuSparseDescriptor<bsrsv2Info, &cusparseDestroyBsrsv2Info> {
106
+ public:
107
+ CuSparseBsrsv2Info() {
108
+ bsrsv2Info_t raw_descriptor;
109
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor));
110
+ descriptor_.reset(raw_descriptor);
111
+ }
112
+ };
113
+
114
+ class TORCH_CUDA_CPP_API CuSparseBsrsm2Info
115
+ : public CuSparseDescriptor<bsrsm2Info, &cusparseDestroyBsrsm2Info> {
116
+ public:
117
+ CuSparseBsrsm2Info() {
118
+ bsrsm2Info_t raw_descriptor;
119
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor));
120
+ descriptor_.reset(raw_descriptor);
121
+ }
122
+ };
123
+
124
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
125
+
126
+ #if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
127
+
128
+ cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type);
129
+
130
+ #if AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS()
131
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
132
+ : public CuSparseDescriptor<cusparseDnMatDescr, &cusparseDestroyDnMat> {
133
+ public:
134
+ explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
135
+ };
136
+
137
+ class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
138
+ : public CuSparseDescriptor<const cusparseDnMatDescr, &destroyConstDnMat> {
139
+ public:
140
+ explicit CuSparseConstDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
141
+ cusparseDnMatDescr* unsafe_mutable_descriptor() const {
142
+ return const_cast<cusparseDnMatDescr*>(descriptor());
143
+ }
144
+ cusparseDnMatDescr* unsafe_mutable_descriptor() {
145
+ return const_cast<cusparseDnMatDescr*>(descriptor());
146
+ }
147
+ };
148
+
149
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
150
+ : public CuSparseDescriptor<cusparseDnVecDescr, &cusparseDestroyDnVec> {
151
+ public:
152
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
153
+ };
154
+
155
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
156
+ : public CuSparseDescriptor<cusparseSpMatDescr, &cusparseDestroySpMat> {};
157
+
158
+ #elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
159
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
160
+ : public ConstCuSparseDescriptor<
161
+ cusparseDnMatDescr,
162
+ &cusparseDestroyDnMat> {
163
+ public:
164
+ explicit CuSparseDnMatDescriptor(
165
+ const Tensor& input,
166
+ int64_t batch_offset = -1);
167
+ };
168
+
169
+ class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
170
+ : public ConstCuSparseDescriptor<
171
+ const cusparseDnMatDescr,
172
+ &destroyConstDnMat> {
173
+ public:
174
+ explicit CuSparseConstDnMatDescriptor(
175
+ const Tensor& input,
176
+ int64_t batch_offset = -1);
177
+ cusparseDnMatDescr* unsafe_mutable_descriptor() const {
178
+ return const_cast<cusparseDnMatDescr*>(descriptor());
179
+ }
180
+ cusparseDnMatDescr* unsafe_mutable_descriptor() {
181
+ return const_cast<cusparseDnMatDescr*>(descriptor());
182
+ }
183
+ };
184
+
185
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
186
+ : public ConstCuSparseDescriptor<
187
+ cusparseDnVecDescr,
188
+ &cusparseDestroyDnVec> {
189
+ public:
190
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
191
+ };
192
+
193
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
194
+ : public ConstCuSparseDescriptor<
195
+ cusparseSpMatDescr,
196
+ &cusparseDestroySpMat> {};
197
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
198
+
199
+ class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor
200
+ : public CuSparseSpMatDescriptor {
201
+ public:
202
+ explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1);
203
+
204
+ std::tuple<int64_t, int64_t, int64_t> get_size() {
205
+ int64_t rows, cols, nnz;
206
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize(
207
+ this->descriptor(),
208
+ &rows,
209
+ &cols,
210
+ &nnz));
211
+ return std::make_tuple(rows, cols, nnz);
212
+ }
213
+
214
+ void set_tensor(const Tensor& input) {
215
+ auto crow_indices = input.crow_indices();
216
+ auto col_indices = input.col_indices();
217
+ auto values = input.values();
218
+
219
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous());
220
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous());
221
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous());
222
+ TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers(
223
+ this->descriptor(),
224
+ crow_indices.data_ptr(),
225
+ col_indices.data_ptr(),
226
+ values.data_ptr()));
227
+ }
228
+
229
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
230
+ void set_mat_fill_mode(bool upper) {
231
+ cusparseFillMode_t fill_mode =
232
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
233
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
234
+ this->descriptor(),
235
+ CUSPARSE_SPMAT_FILL_MODE,
236
+ &fill_mode,
237
+ sizeof(fill_mode)));
238
+ }
239
+
240
+ void set_mat_diag_type(bool unit) {
241
+ cusparseDiagType_t diag_type =
242
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
243
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
244
+ this->descriptor(),
245
+ CUSPARSE_SPMAT_DIAG_TYPE,
246
+ &diag_type,
247
+ sizeof(diag_type)));
248
+ }
249
+ #endif
250
+ };
251
+
252
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
253
+ class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor
254
+ : public CuSparseDescriptor<cusparseSpSVDescr, &cusparseSpSV_destroyDescr> {
255
+ public:
256
+ CuSparseSpSVDescriptor() {
257
+ cusparseSpSVDescr_t raw_descriptor;
258
+ TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor));
259
+ descriptor_.reset(raw_descriptor);
260
+ }
261
+ };
262
+ #endif
263
+
264
+ #if AT_USE_CUSPARSE_GENERIC_SPSM()
265
+ class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor
266
+ : public CuSparseDescriptor<cusparseSpSMDescr, &cusparseSpSM_destroyDescr> {
267
+ public:
268
+ CuSparseSpSMDescriptor() {
269
+ cusparseSpSMDescr_t raw_descriptor;
270
+ TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor));
271
+ descriptor_.reset(raw_descriptor);
272
+ }
273
+ };
274
+ #endif
275
+
276
+ #if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || !defined(USE_ROCM)
277
+ class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor
278
+ : public CuSparseDescriptor<cusparseSpGEMMDescr, &cusparseSpGEMM_destroyDescr> {
279
+ public:
280
+ CuSparseSpGEMMDescriptor() {
281
+ cusparseSpGEMMDescr_t raw_descriptor;
282
+ TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor));
283
+ descriptor_.reset(raw_descriptor);
284
+ }
285
+ };
286
+ #endif
287
+
288
+ #endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
289
+
290
+ } // namespace at::cuda::sparse
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cublas_v2.h>
4
+ #include <cusparse.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ #ifdef CUDART_VERSION
8
+ #include <cusolver_common.h>
9
+ #endif
10
+
11
+ #include <ATen/Context.h>
12
+ #include <c10/util/Exception.h>
13
+ #include <c10/cuda/CUDAException.h>
14
+
15
+
16
+ namespace c10 {
17
+
18
+ class CuDNNError : public c10::Error {
19
+ using Error::Error;
20
+ };
21
+
22
+ } // namespace c10
23
+
24
+ #define AT_CUDNN_FRONTEND_CHECK(EXPR, ...) \
25
+ do { \
26
+ auto error_object = EXPR; \
27
+ if (!error_object.is_good()) { \
28
+ TORCH_CHECK_WITH(CuDNNError, false, \
29
+ "cuDNN Frontend error: ", error_object.get_message()); \
30
+ } \
31
+ } while (0) \
32
+
33
+ #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__)
34
+
35
+ // See Note [CHECK macro]
36
+ #define AT_CUDNN_CHECK(EXPR, ...) \
37
+ do { \
38
+ cudnnStatus_t status = EXPR; \
39
+ if (status != CUDNN_STATUS_SUCCESS) { \
40
+ if (status == CUDNN_STATUS_NOT_SUPPORTED) { \
41
+ TORCH_CHECK_WITH(CuDNNError, false, \
42
+ "cuDNN error: ", \
43
+ cudnnGetErrorString(status), \
44
+ ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \
45
+ } else { \
46
+ TORCH_CHECK_WITH(CuDNNError, false, \
47
+ "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \
48
+ } \
49
+ } \
50
+ } while (0)
51
+
52
+ namespace at::cuda::blas {
53
+ C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error);
54
+ } // namespace at::cuda::blas
55
+
56
+ #define TORCH_CUDABLAS_CHECK(EXPR) \
57
+ do { \
58
+ cublasStatus_t __err = EXPR; \
59
+ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \
60
+ "CUDA error: ", \
61
+ at::cuda::blas::_cublasGetErrorEnum(__err), \
62
+ " when calling `" #EXPR "`"); \
63
+ } while (0)
64
+
65
+ const char *cusparseGetErrorString(cusparseStatus_t status);
66
+
67
+ #define TORCH_CUDASPARSE_CHECK(EXPR) \
68
+ do { \
69
+ cusparseStatus_t __err = EXPR; \
70
+ TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \
71
+ "CUDA error: ", \
72
+ cusparseGetErrorString(__err), \
73
+ " when calling `" #EXPR "`"); \
74
+ } while (0)
75
+
76
+ // cusolver related headers are only supported on cuda now
77
+ #ifdef CUDART_VERSION
78
+
79
+ namespace at::cuda::solver {
80
+ C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status);
81
+
82
+ constexpr const char* _cusolver_backend_suggestion = \
83
+ "If you keep seeing this error, you may use " \
84
+ "`torch.backends.cuda.preferred_linalg_library()` to try " \
85
+ "linear algebra operators with other supported backends. " \
86
+ "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library";
87
+
88
+ } // namespace at::cuda::solver
89
+
90
+ // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
91
+ // When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
92
+ #define TORCH_CUSOLVER_CHECK(EXPR) \
93
+ do { \
94
+ cusolverStatus_t __err = EXPR; \
95
+ if ((CUDA_VERSION < 11500 && \
96
+ __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \
97
+ (CUDA_VERSION >= 11500 && \
98
+ __err == CUSOLVER_STATUS_INVALID_VALUE)) { \
99
+ TORCH_CHECK_LINALG( \
100
+ false, \
101
+ "cusolver error: ", \
102
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
103
+ ", when calling `" #EXPR "`", \
104
+ ". This error may appear if the input matrix contains NaN. ", \
105
+ at::cuda::solver::_cusolver_backend_suggestion); \
106
+ } else { \
107
+ TORCH_CHECK( \
108
+ __err == CUSOLVER_STATUS_SUCCESS, \
109
+ "cusolver error: ", \
110
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
111
+ ", when calling `" #EXPR "`. ", \
112
+ at::cuda::solver::_cusolver_backend_suggestion); \
113
+ } \
114
+ } while (0)
115
+
116
+ #else
117
+ #define TORCH_CUSOLVER_CHECK(EXPR) EXPR
118
+ #endif
119
+
120
+ #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR)
121
+
122
+ // For CUDA Driver API
123
+ //
124
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
125
+ // in ATen, and we need to use its nvrtcGetErrorString.
126
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
127
+ #if !defined(USE_ROCM)
128
+
129
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
130
+ do { \
131
+ CUresult __err = EXPR; \
132
+ if (__err != CUDA_SUCCESS) { \
133
+ const char* err_str; \
134
+ CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
135
+ if (get_error_str_err != CUDA_SUCCESS) { \
136
+ AT_ERROR("CUDA driver error: unknown error"); \
137
+ } else { \
138
+ AT_ERROR("CUDA driver error: ", err_str); \
139
+ } \
140
+ } \
141
+ } while (0)
142
+
143
+ #else
144
+
145
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
146
+ do { \
147
+ CUresult __err = EXPR; \
148
+ if (__err != CUDA_SUCCESS) { \
149
+ AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \
150
+ } \
151
+ } while (0)
152
+
153
+ #endif
154
+
155
+ // For CUDA NVRTC
156
+ //
157
+ // Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE,
158
+ // incorrectly produces the error string "NVRTC unknown error."
159
+ // The following maps it correctly.
160
+ //
161
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
162
+ // in ATen, and we need to use its nvrtcGetErrorString.
163
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
164
+ #define AT_CUDA_NVRTC_CHECK(EXPR) \
165
+ do { \
166
+ nvrtcResult __err = EXPR; \
167
+ if (__err != NVRTC_SUCCESS) { \
168
+ if (static_cast<int>(__err) != 7) { \
169
+ AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
170
+ } else { \
171
+ AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
172
+ } \
173
+ } \
174
+ } while (0)
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <ATen/cuda/CachingHostAllocator.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() {
9
+ return getCachingHostAllocator();
10
+ }
11
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(USE_ROCM)
4
+ #include <cuda.h> // for CUDA_VERSION
5
+ #endif
6
+
7
+ #if !defined(USE_ROCM)
8
+ #include <cub/version.cuh>
9
+ #else
10
+ #define CUB_VERSION 0
11
+ #endif
12
+
13
+ // cub sort support for __nv_bfloat16 is added to cub 1.13 in:
14
+ // https://github.com/NVIDIA/cub/pull/306
15
+ #if CUB_VERSION >= 101300
16
+ #define CUB_SUPPORTS_NV_BFLOAT16() true
17
+ #else
18
+ #define CUB_SUPPORTS_NV_BFLOAT16() false
19
+ #endif
20
+
21
+ // cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
22
+ // https://github.com/NVIDIA/cub/pull/326
23
+ // CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
24
+ // starting from CUDA 11.5
25
+ #if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
26
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
27
+ #else
28
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
29
+ #endif
30
+
31
+ // cub support for UniqueByKey is added to cub 1.16 in:
32
+ // https://github.com/NVIDIA/cub/pull/405
33
+ #if CUB_VERSION >= 101600
34
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() true
35
+ #else
36
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() false
37
+ #endif
38
+
39
+ // cub support for scan by key is added to cub 1.15
40
+ // in https://github.com/NVIDIA/cub/pull/376
41
+ #if CUB_VERSION >= 101500
42
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 1
43
+ #else
44
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 0
45
+ #endif
46
+
47
+ // cub support for cub::FutureValue is added to cub 1.15 in:
48
+ // https://github.com/NVIDIA/cub/pull/305
49
+ #if CUB_VERSION >= 101500
50
+ #define CUB_SUPPORTS_FUTURE_VALUE() true
51
+ #else
52
+ #define CUB_SUPPORTS_FUTURE_VALUE() false
53
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/AcceleratorHooksInterface.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+
5
+ namespace at {
6
+
7
+ // AcceleratorHooksInterface is a shared interface provided by all
8
+ // accelerators to allow generic code.
9
+ // This inferface is hook-based as it corresponds to all the functions
10
+ // that are going to be called in a generic way from the CPU code.
11
+
12
+ struct TORCH_API AcceleratorHooksInterface {
13
+ // This should never actually be implemented, but it is used to
14
+ // squelch -Werror=non-virtual-dtor
15
+ virtual ~AcceleratorHooksInterface() = default;
16
+
17
+ // Whether the device at device_index is fully initialized or not.
18
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const = 0;
19
+ };
20
+
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/Registry.h>
6
+
7
+ #include <ATen/detail/AcceleratorHooksInterface.h>
8
+
9
+ // Forward-declares at::Generator and at::cuda::NVRTC
10
+ namespace at {
11
+ struct Generator;
12
+ namespace cuda {
13
+ struct NVRTC;
14
+ } // namespace cuda
15
+ } // namespace at
16
+
17
+ // NB: Class must live in `at` due to limitations of Registry.h.
18
+ namespace at {
19
+
20
+ #ifdef _MSC_VER
21
+ constexpr const char* CUDA_HELP =
22
+ "PyTorch splits its backend into two shared libraries: a CPU library "
23
+ "and a CUDA library; this error has occurred because you are trying "
24
+ "to use some CUDA functionality, but the CUDA library has not been "
25
+ "loaded by the dynamic linker for some reason. The CUDA library MUST "
26
+ "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
27
+ "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ "
28
+ "in your link arguments; many dynamic linkers will delete dynamic library "
29
+ "dependencies if you don't depend on any of their symbols. You can check "
30
+ "if this has occurred by using link on your binary to see if there is a "
31
+ "dependency on *_cuda.dll library.";
32
+ #else
33
+ constexpr const char* CUDA_HELP =
34
+ "PyTorch splits its backend into two shared libraries: a CPU library "
35
+ "and a CUDA library; this error has occurred because you are trying "
36
+ "to use some CUDA functionality, but the CUDA library has not been "
37
+ "loaded by the dynamic linker for some reason. The CUDA library MUST "
38
+ "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
39
+ "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many "
40
+ "dynamic linkers will delete dynamic library dependencies if you don't "
41
+ "depend on any of their symbols. You can check if this has occurred by "
42
+ "using ldd on your binary to see if there is a dependency on *_cuda.so "
43
+ "library.";
44
+ #endif
45
+
46
+ // The CUDAHooksInterface is an omnibus interface for any CUDA functionality
47
+ // which we may want to call into from CPU code (and thus must be dynamically
48
+ // dispatched, to allow for separate compilation of CUDA code). How do I
49
+ // decide if a function should live in this class? There are two tests:
50
+ //
51
+ // 1. Does the *implementation* of this function require linking against
52
+ // CUDA libraries?
53
+ //
54
+ // 2. Is this function *called* from non-CUDA ATen code?
55
+ //
56
+ // (2) should filter out many ostensible use-cases, since many times a CUDA
57
+ // function provided by ATen is only really ever used by actual CUDA code.
58
+ //
59
+ // TODO: Consider putting the stub definitions in another class, so that one
60
+ // never forgets to implement each virtual function in the real implementation
61
+ // in CUDAHooks. This probably doesn't buy us much though.
62
+ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
63
+ // This should never actually be implemented, but it is used to
64
+ // squelch -Werror=non-virtual-dtor
65
+ virtual ~CUDAHooksInterface() override = default;
66
+
67
+ // Initialize THCState and, transitively, the CUDA state
68
+ virtual void initCUDA() const {
69
+ TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
70
+ }
71
+
72
+ virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
73
+ TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
74
+ }
75
+
76
+ virtual Device getDeviceFromPtr(void* /*data*/) const {
77
+ TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
78
+ }
79
+
80
+ virtual bool isPinnedPtr(const void* /*data*/) const {
81
+ return false;
82
+ }
83
+
84
+ virtual bool hasCUDA() const {
85
+ return false;
86
+ }
87
+
88
+ virtual bool hasCUDART() const {
89
+ return false;
90
+ }
91
+
92
+ virtual bool hasMAGMA() const {
93
+ return false;
94
+ }
95
+
96
+ virtual bool hasCuDNN() const {
97
+ return false;
98
+ }
99
+
100
+ virtual bool hasCuSOLVER() const {
101
+ return false;
102
+ }
103
+
104
+ virtual bool hasROCM() const {
105
+ return false;
106
+ }
107
+
108
+ virtual const at::cuda::NVRTC& nvrtc() const {
109
+ TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP);
110
+ }
111
+
112
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
113
+ TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP);
114
+ }
115
+
116
+ virtual DeviceIndex current_device() const {
117
+ return -1;
118
+ }
119
+
120
+ virtual Allocator* getPinnedMemoryAllocator() const {
121
+ TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP);
122
+ }
123
+
124
+ virtual Allocator* getCUDADeviceAllocator() const {
125
+ TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP);
126
+ }
127
+
128
+ virtual bool compiledWithCuDNN() const {
129
+ return false;
130
+ }
131
+
132
+ virtual bool compiledWithMIOpen() const {
133
+ return false;
134
+ }
135
+
136
+ virtual bool supportsDilatedConvolutionWithCuDNN() const {
137
+ return false;
138
+ }
139
+
140
+ virtual bool supportsDepthwiseConvolutionWithCuDNN() const {
141
+ return false;
142
+ }
143
+
144
+ virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const {
145
+ return false;
146
+ }
147
+
148
+ virtual long versionCuDNN() const {
149
+ TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
150
+ }
151
+
152
+ virtual long versionCUDART() const {
153
+ TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP);
154
+ }
155
+
156
+ virtual std::string showConfig() const {
157
+ TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP);
158
+ }
159
+
160
+ virtual double batchnormMinEpsilonCuDNN() const {
161
+ TORCH_CHECK(false,
162
+ "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP);
163
+ }
164
+
165
+ virtual int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex /*device_index*/) const {
166
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
167
+ }
168
+
169
+ virtual void cuFFTSetPlanCacheMaxSize(DeviceIndex /*device_index*/, int64_t /*max_size*/) const {
170
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
171
+ }
172
+
173
+ virtual int64_t cuFFTGetPlanCacheSize(DeviceIndex /*device_index*/) const {
174
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
175
+ }
176
+
177
+ virtual void cuFFTClearPlanCache(DeviceIndex /*device_index*/) const {
178
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
179
+ }
180
+
181
+ virtual int getNumGPUs() const {
182
+ return 0;
183
+ }
184
+
185
+ virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
186
+ TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP);
187
+ }
188
+ };
189
+
190
+ // NB: dummy argument to suppress "ISO C++11 requires at least one argument
191
+ // for the "..." in a variadic macro"
192
+ struct TORCH_API CUDAHooksArgs {};
193
+
194
+ TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
195
+ #define REGISTER_CUDA_HOOKS(clsname) \
196
+ C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
197
+
198
+ namespace detail {
199
+ TORCH_API const CUDAHooksInterface& getCUDAHooks();
200
+ } // namespace detail
201
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <tuple>
4
+
5
+ // Modified from https://stackoverflow.com/questions/7943525/is-it-possible-to-figure-out-the-parameter-type-and-return-type-of-a-lambda
6
+
7
+ // Fallback, anything with an operator()
8
+ template <typename T>
9
+ struct function_traits : public function_traits<decltype(&T::operator())> {
10
+ };
11
+
12
+ // Pointers to class members that are themselves functors.
13
+ // For example, in the following code:
14
+ // template <typename func_t>
15
+ // struct S {
16
+ // func_t f;
17
+ // };
18
+ // template <typename func_t>
19
+ // S<func_t> make_s(func_t f) {
20
+ // return S<func_t> { .f = f };
21
+ // }
22
+ //
23
+ // auto s = make_s([] (int, float) -> double { /* ... */ });
24
+ //
25
+ // function_traits<decltype(&s::f)> traits;
26
+ template <typename ClassType, typename T>
27
+ struct function_traits<T ClassType::*> : public function_traits<T> {
28
+ };
29
+
30
+ // Const class member functions
31
+ template <typename ClassType, typename ReturnType, typename... Args>
32
+ struct function_traits<ReturnType(ClassType::*)(Args...) const> : public function_traits<ReturnType(Args...)> {
33
+ };
34
+
35
+ // Reference types
36
+ template <typename T>
37
+ struct function_traits<T&> : public function_traits<T> {};
38
+ template <typename T>
39
+ struct function_traits<T*> : public function_traits<T> {};
40
+
41
+ // Free functions
42
+ template <typename ReturnType, typename... Args>
43
+ struct function_traits<ReturnType(Args...)> {
44
+ // arity is the number of arguments.
45
+ enum { arity = sizeof...(Args) };
46
+
47
+ typedef std::tuple<Args...> ArgsTuple;
48
+ typedef ReturnType result_type;
49
+
50
+ template <size_t i>
51
+ struct arg
52
+ {
53
+ typedef typename std::tuple_element<i, std::tuple<Args...>>::type type;
54
+ // the i-th argument is equivalent to the i-th tuple element of a tuple
55
+ // composed of those arguments.
56
+ };
57
+ };
58
+
59
+ template <typename T>
60
+ struct nullary_function_traits {
61
+ using traits = function_traits<T>;
62
+ using result_type = typename traits::result_type;
63
+ };
64
+
65
+ template <typename T>
66
+ struct unary_function_traits {
67
+ using traits = function_traits<T>;
68
+ using result_type = typename traits::result_type;
69
+ using arg1_t = typename traits::template arg<0>::type;
70
+ };
71
+
72
+ template <typename T>
73
+ struct binary_function_traits {
74
+ using traits = function_traits<T>;
75
+ using result_type = typename traits::result_type;
76
+ using arg1_t = typename traits::template arg<0>::type;
77
+ using arg2_t = typename traits::template arg<1>::type;
78
+ };
79
+
80
+
81
+ // Traits for calling with c10::guts::invoke, where member_functions have a first argument of ClassType
82
+ template <typename T>
83
+ struct invoke_traits : public function_traits<T>{
84
+ };
85
+
86
+ template <typename T>
87
+ struct invoke_traits<T&> : public invoke_traits<T>{
88
+ };
89
+
90
+ template <typename T>
91
+ struct invoke_traits<T&&> : public invoke_traits<T>{
92
+ };
93
+
94
+ template <typename ClassType, typename ReturnType, typename... Args>
95
+ struct invoke_traits<ReturnType(ClassType::*)(Args...)> :
96
+ public function_traits<ReturnType(ClassType&, Args...)> {
97
+ };
98
+
99
+ template <typename ClassType, typename ReturnType, typename... Args>
100
+ struct invoke_traits<ReturnType(ClassType::*)(Args...) const> :
101
+ public function_traits<ReturnType(const ClassType&, Args...)> {
102
+ };
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/GeneratorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <c10/util/Registry.h>
8
+
9
+ #include <cstddef>
10
+ #include <memory>
11
+
12
+ namespace at {
13
+ class Context;
14
+ }
15
+
16
+ // NB: Class must live in `at` due to limitations of Registry.h.
17
+ namespace at {
18
+
19
+ // The HIPHooksInterface is an omnibus interface for any HIP functionality
20
+ // which we may want to call into from CPU code (and thus must be dynamically
21
+ // dispatched, to allow for separate compilation of HIP code). See
22
+ // CUDAHooksInterface for more detailed motivation.
23
+ struct TORCH_API HIPHooksInterface {
24
+ // This should never actually be implemented, but it is used to
25
+ // squelch -Werror=non-virtual-dtor
26
+ virtual ~HIPHooksInterface() = default;
27
+
28
+ // Initialize the HIP library state
29
+ virtual void initHIP() const {
30
+ AT_ERROR("Cannot initialize HIP without ATen_hip library.");
31
+ }
32
+
33
+ virtual std::unique_ptr<c10::GeneratorImpl> initHIPGenerator(Context*) const {
34
+ AT_ERROR("Cannot initialize HIP generator without ATen_hip library.");
35
+ }
36
+
37
+ virtual bool hasHIP() const {
38
+ return false;
39
+ }
40
+
41
+ virtual c10::DeviceIndex current_device() const {
42
+ return -1;
43
+ }
44
+
45
+ virtual Allocator* getPinnedMemoryAllocator() const {
46
+ AT_ERROR("Pinned memory requires HIP.");
47
+ }
48
+
49
+ virtual void registerHIPTypes(Context*) const {
50
+ AT_ERROR("Cannot registerHIPTypes() without ATen_hip library.");
51
+ }
52
+
53
+ virtual int getNumGPUs() const {
54
+ return 0;
55
+ }
56
+ };
57
+
58
+ // NB: dummy argument to suppress "ISO C++11 requires at least one argument
59
+ // for the "..." in a variadic macro"
60
+ struct TORCH_API HIPHooksArgs {};
61
+
62
+ TORCH_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs);
63
+ #define REGISTER_HIP_HOOKS(clsname) \
64
+ C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname)
65
+
66
+ namespace detail {
67
+ TORCH_API const HIPHooksInterface& getHIPHooks();
68
+
69
+ } // namespace detail
70
+ } // namespace at