applied-ai-018 commited on
Commit
c139e06
·
verified ·
1 Parent(s): 61224af

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h +147 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h +344 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h +29 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h +24 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h +29 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h +29 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h +29 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Config.h +22 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h +41 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DimVector.h +2 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h +34 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h +124 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h +392 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Functions.h +1405 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Generator.h +2 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedTensorImpl.h +161 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h +183 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h +139 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h +441 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h +1 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h +194 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Operators.h +1336 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h +83 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h +19 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h +58 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h +34 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SavedTensorHooks.h +52 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Scalar.h +3 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h +4 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Storage.h +2 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Tensor.h +3 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h +2 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h +987 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h +137 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h +75 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h +2 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h +87 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h +186 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h +114 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TypeDefault.h +30 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/autocast_mode.h +647 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/code_template.h +245 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpp_custom_type_hack.h +112 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h +7 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h +33 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h +116 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h +450 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h +98 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h +158 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Config.h>
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e5m2.h>
8
+ #include <c10/util/Half.h>
9
+
10
+ // Defines the accumulation type for a scalar type.
11
+ // Example:
12
+ // using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
13
+ //
14
+ // Accumulation types are an important concept in numeric computing
15
+ // because you frequently want to perform intermediate computations
16
+ // at a higher precision than the input and output precision, to avoid
17
+ // compounding internal rounding errors. Accumulation is the most
18
+ // well-known intermediate computation (it is of great importance for
19
+ // sum reduction and matrix multiply, for example), but in PyTorch
20
+ // acc_type ends up getting used for all sorts of other intermediate
21
+ // computations, so it perhaps would be more accurately (ahem) called an
22
+ // "accurate" type. acc_type is especially important for reduced
23
+ // precision operations like float16 and bfloat16, where relatively
24
+ // benign looking inputs can easily end up overflowing/underflowing.
25
+ //
26
+ // acc_type is parametrized by whether or not you are running on CUDA
27
+ // or not, because on CUDA double precision operations are expensive
28
+ // and so by default, we don't actually want to use double as an
29
+ // acc_type on CUDA. A lot of things are typed out below, but
30
+ // basically, the table is generated by a few rules:
31
+ //
32
+ // If bool:
33
+ // Use 'bool' as acc_type.
34
+ // If floating point:
35
+ // If CUDA, use 'float' as acc_type (unless scalar_t is double),
36
+ // otherwise (CPU) use 'double'
37
+ // If integral:
38
+ // Use 'int64_t' as acc_type
39
+ //
40
+ // You're not forced to use this template; if you happen to know
41
+ // something specific about your use case, you can specify your own
42
+ // desired behavior. This template, however, will give you a reasonable
43
+ // default that will work for all dtypes supported in PyTorch.
44
+
45
+ #if defined(__CUDACC__)
46
+ #include <cuda.h>
47
+ #include <cuda_fp16.h>
48
+ #elif defined(__HIPCC__)
49
+ #include <hip/hip_fp16.h>
50
+ #include <hip/hip_runtime.h>
51
+ #endif
52
+
53
+ namespace at {
54
+
55
+ template <typename T, c10::DeviceType D>
56
+ struct AccumulateTypeDevice {};
57
+
58
+ template <typename T, bool>
59
+ struct AccumulateType {};
60
+
61
+ template <typename T>
62
+ struct AccumulateType<T, false> {
63
+ using type = typename AccumulateTypeDevice<T, c10::DeviceType::CPU>::type;
64
+ };
65
+
66
+ template <typename T>
67
+ struct AccumulateType<T, true> {
68
+ using type = typename AccumulateTypeDevice<T, c10::DeviceType::CUDA>::type;
69
+ };
70
+
71
+ template <typename T, c10::DeviceType device>
72
+ using acc_type_device = typename AccumulateTypeDevice<T, device>::type;
73
+
74
+ template <typename T, bool is_cuda>
75
+ using acc_type = typename AccumulateType<T, is_cuda>::type;
76
+
77
+ #define ACC_TYPE(t, acc_t, device_type) \
78
+ template <> \
79
+ struct AccumulateTypeDevice<t, device_type> { \
80
+ using type = acc_t; \
81
+ };
82
+ #define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS)
83
+ #define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA)
84
+ #define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU)
85
+
86
+ MPS_ACC_TYPE(BFloat16, float);
87
+ MPS_ACC_TYPE(Half, float);
88
+ MPS_ACC_TYPE(Float8_e5m2, float);
89
+ MPS_ACC_TYPE(Float8_e4m3fn, float);
90
+ MPS_ACC_TYPE(float, float);
91
+ MPS_ACC_TYPE(double, float);
92
+ MPS_ACC_TYPE(int8_t, int64_t);
93
+ MPS_ACC_TYPE(uint8_t, int64_t);
94
+ MPS_ACC_TYPE(char, int64_t);
95
+ MPS_ACC_TYPE(int16_t, int64_t);
96
+ MPS_ACC_TYPE(int32_t, int64_t);
97
+ MPS_ACC_TYPE(int64_t, int64_t);
98
+ MPS_ACC_TYPE(bool, bool);
99
+ MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
100
+ MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>);
101
+ MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>);
102
+
103
+ #if defined(__CUDACC__) || defined(__HIPCC__)
104
+ CUDA_ACC_TYPE(half, float);
105
+ #endif
106
+ CUDA_ACC_TYPE(BFloat16, float);
107
+ CUDA_ACC_TYPE(Half, float);
108
+ CUDA_ACC_TYPE(Float8_e5m2, float);
109
+ CUDA_ACC_TYPE(Float8_e4m3fn, float);
110
+ CUDA_ACC_TYPE(float, float);
111
+ CUDA_ACC_TYPE(double, double);
112
+ CUDA_ACC_TYPE(int8_t, int64_t);
113
+ CUDA_ACC_TYPE(uint8_t, int64_t);
114
+ CUDA_ACC_TYPE(char, int64_t);
115
+ CUDA_ACC_TYPE(int16_t, int64_t);
116
+ CUDA_ACC_TYPE(int32_t, int64_t);
117
+ CUDA_ACC_TYPE(int64_t, int64_t);
118
+ CUDA_ACC_TYPE(bool, bool);
119
+ CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
120
+ CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>);
121
+ CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>);
122
+
123
+ CPU_ACC_TYPE(BFloat16, float);
124
+ CPU_ACC_TYPE(Half, float);
125
+ CPU_ACC_TYPE(Float8_e5m2, float);
126
+ CPU_ACC_TYPE(Float8_e5m2fnuz, float);
127
+ CPU_ACC_TYPE(Float8_e4m3fn, float);
128
+ CPU_ACC_TYPE(Float8_e4m3fnuz, float);
129
+ CPU_ACC_TYPE(float, double);
130
+ CPU_ACC_TYPE(double, double);
131
+ CPU_ACC_TYPE(int8_t, int64_t);
132
+ CPU_ACC_TYPE(uint8_t, int64_t);
133
+ CPU_ACC_TYPE(char, int64_t);
134
+ CPU_ACC_TYPE(int16_t, int64_t);
135
+ CPU_ACC_TYPE(int32_t, int64_t);
136
+ CPU_ACC_TYPE(int64_t, int64_t);
137
+ CPU_ACC_TYPE(bool, bool);
138
+ CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
139
+ CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>);
140
+ CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
141
+
142
+ TORCH_API c10::ScalarType toAccumulateType(
143
+ c10::ScalarType type,
144
+ c10::DeviceType device);
145
+ TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
146
+
147
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CollapseDims.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <ATen/TensorUtils.h>
6
+ #include <c10/util/irange.h>
7
+ #include <cstring>
8
+ #include <limits>
9
+ #include <utility>
10
+
11
+ namespace at {
12
+
13
+ /*
14
+ * The basic strategy for apply is as follows:
15
+ *
16
+ * 1. Starting with the outermost index, loop until we reach a dimension where
17
+ * the data is no longer contiguous, i.e. the stride at that dimension is not
18
+ * equal to the size of the tensor defined by the outer dimensions. Let's call
19
+ * this outer (contiguous) tensor A. Note that if the Tensor is contiguous, then
20
+ * A is equal to the entire Tensor. Let's call the inner tensor B.
21
+ *
22
+ * 2. We loop through the indices in B, starting at its outermost dimension. For
23
+ * example, if B is a 2x2 matrix, then we do:
24
+ *
25
+ * B[0][0]
26
+ * B[0][1]
27
+ * B[1][0]
28
+ * B[1][1]
29
+ *
30
+ * We set the offset into the underlying storage as (storageOffset + stride_B *
31
+ * index_B), i.e. basically we compute the offset into the storage as we would
32
+ * normally for a Tensor. But because we are guaranteed the subsequent data is
33
+ * contiguous in memory, we can simply loop for sizeof(A) iterations and perform
34
+ * the operation, without having to follow the order described by the strides of
35
+ * A.
36
+ *
37
+ * 3. As an optimization, we merge dimensions of A that are contiguous in
38
+ * memory. For example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor,
39
+ * then the first two dimensions can be merged for the purposes of APPLY,
40
+ * reducing the number of nested loops.
41
+ */
42
+
43
+ inline Tensor sort_strides(Tensor& tensor_) {
44
+ IntArrayRef strides = tensor_.strides();
45
+ std::vector<int64_t> indices;
46
+ indices.reserve(tensor_.ndimension());
47
+ for (const auto i : c10::irange(tensor_.ndimension())) {
48
+ indices.push_back(i);
49
+ }
50
+ std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) {
51
+ return strides[i1] > strides[i2];
52
+ });
53
+ Tensor tensor = tensor_.permute(indices);
54
+ return tensor;
55
+ }
56
+
57
+ template <typename T, int N>
58
+ struct strided_tensor_iter_fixed {
59
+ public:
60
+ T* data_ = NULL;
61
+ int64_t dim_ = 0;
62
+
63
+ int64_t counter_[N] = {0};
64
+ int64_t sizes_[N] = {0};
65
+ int64_t strides_[N] = {0};
66
+
67
+ strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete;
68
+ void operator=(strided_tensor_iter_fixed const& x) = delete;
69
+ strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
70
+ strided_tensor_iter_fixed(
71
+ Tensor& tensor,
72
+ C10_UNUSED bool sort_strides = false)
73
+ : data_(tensor.data_ptr<T>()) {
74
+ std::memset(counter_, 0, sizeof(int64_t) * N);
75
+ if (tensor.dim() > 0) {
76
+ std::memcpy(
77
+ sizes_, tensor.sizes().data(), tensor.dim() * sizeof(int64_t));
78
+ std::memcpy(
79
+ strides_, tensor.strides().data(), tensor.dim() * sizeof(int64_t));
80
+ }
81
+ dim_ = std::get<1>(collapse_dims(sizes_, strides_, tensor.ndimension()));
82
+ }
83
+ };
84
+
85
+ template <typename T>
86
+ struct strided_tensor_iter {
87
+ private:
88
+ public:
89
+ T* data_ = NULL;
90
+ int64_t dim_;
91
+
92
+ std::vector<int64_t> counter_;
93
+ std::vector<int64_t> sizes_;
94
+ std::vector<int64_t> strides_;
95
+
96
+ strided_tensor_iter(strided_tensor_iter const&) = delete;
97
+ void operator=(strided_tensor_iter const& x) = delete;
98
+ strided_tensor_iter(strided_tensor_iter&&) = default;
99
+ strided_tensor_iter(Tensor& tensor)
100
+ : data_(tensor.data_ptr<T>()),
101
+ dim_(tensor.ndimension()),
102
+ counter_(dim_, 0),
103
+ sizes_(tensor.sizes().vec()),
104
+ strides_(tensor.strides().vec()) {
105
+ dim_ = std::get<1>(collapse_dims(sizes_.data(), strides_.data(), dim_));
106
+ }
107
+ };
108
+
109
+ inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
110
+ if (tensors.empty())
111
+ return true;
112
+ int64_t all_numel = tensors[0].numel();
113
+ for (const auto i : c10::irange(1, tensors.size())) {
114
+ if (tensors[i].numel() != all_numel)
115
+ return false;
116
+ }
117
+ return true;
118
+ }
119
+
120
+ inline std::string _all_equal_numel_error(at::ArrayRef<Tensor> tensors) {
121
+ std::ostringstream oss;
122
+ oss << "inconsistent tensor size, expected ";
123
+ for (size_t i = 0; i < tensors.size() - 1; i++) {
124
+ oss << tensors[i].sizes() << ", ";
125
+ }
126
+ oss << "and " << tensors[tensors.size() - 1].sizes()
127
+ << " to have the same number of elements, but got ";
128
+ for (size_t i = 0; i < tensors.size() - 1; i++) {
129
+ oss << tensors[i].numel() << ", ";
130
+ }
131
+ oss << "and " << tensors[tensors.size() - 1].numel()
132
+ << " elements respectively";
133
+ return oss.str();
134
+ }
135
+
136
+ inline bool _apply_preamble(ArrayRef<Tensor> tensors) {
137
+ checkDeviceType("CPU_tensor_apply", tensors, kCPU);
138
+ checkLayout("CPU_tensor_apply", tensors, kStrided);
139
+ if (!_all_equal_numel(tensors))
140
+ AT_ERROR(_all_equal_numel_error(tensors));
141
+ // An empty tensor has no elements
142
+ for (auto& t : tensors)
143
+ if (t.numel() == 0)
144
+ return false;
145
+ return true;
146
+ }
147
+
148
+ inline int64_t _max_dim_tensors(ArrayRef<Tensor> tensors) {
149
+ int64_t dim = 0;
150
+ for (auto& t : tensors)
151
+ dim = std::max(dim, t.ndimension());
152
+ return dim;
153
+ }
154
+
155
+ inline void iterate(int64_t /*size*/){};
156
+
157
+ template <typename Arg, typename... Args>
158
+ inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) {
159
+ iter.counter_[iter.dim_ - 1] += size;
160
+ iter.data_ = iter.data_ + size * iter.strides_[iter.dim_ - 1];
161
+ iterate(size, iter_tail...);
162
+ }
163
+
164
+ inline bool iterate_continue() {
165
+ return true;
166
+ };
167
+
168
+ template <typename Arg, typename... Args>
169
+ inline bool iterate_continue(Arg& iter, Args&... iter_tail) {
170
+ return iter.counter_[iter.dim_ - 1] < iter.sizes_[iter.dim_ - 1] &&
171
+ iterate_continue(iter_tail...);
172
+ }
173
+
174
+ inline int64_t max_iterate_size() {
175
+ return std::numeric_limits<int64_t>::max();
176
+ };
177
+
178
+ template <typename Arg, typename... Args>
179
+ inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) {
180
+ return std::min(
181
+ (iter.sizes_[iter.dim_ - 1] - iter.counter_[iter.dim_ - 1]),
182
+ max_iterate_size(iter_tail...));
183
+ }
184
+
185
+ inline void iterate_overflow(){};
186
+
187
+ template <typename Arg, typename... Args>
188
+ inline void iterate_overflow(Arg& iter, Args&... iter_tail) {
189
+ if (iter.counter_[iter.dim_ - 1] == iter.sizes_[iter.dim_ - 1]) {
190
+ for (int64_t i = iter.dim_ - 1; i > 0; i--) {
191
+ if (iter.counter_[i] == iter.sizes_[i]) {
192
+ iter.counter_[i] = 0;
193
+ iter.counter_[i - 1]++;
194
+ iter.data_ = iter.data_ - (iter.sizes_[i] * iter.strides_[i]) +
195
+ iter.strides_[i - 1];
196
+ }
197
+ }
198
+ }
199
+ iterate_overflow(iter_tail...);
200
+ }
201
+
202
+ inline void forward(int64_t /*offset*/){};
203
+
204
+ template <typename Arg, typename... Args>
205
+ inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) {
206
+ int64_t multi = offset;
207
+ for (int64_t i = iter.dim_ - 1; i >= 0; i--) {
208
+ int64_t inc = multi % iter.sizes_[i];
209
+ multi = multi / iter.sizes_[i];
210
+ iter.data_ = iter.data_ + inc * iter.strides_[i];
211
+ iter.counter_[i] += inc;
212
+ }
213
+ forward(offset, iter_tail...);
214
+ }
215
+
216
+ inline int64_t max_dim() {
217
+ return 0;
218
+ }
219
+
220
+ template <typename Arg, typename... Args>
221
+ inline int64_t max_dim(Arg& iter, Args&... iter_tail) {
222
+ return std::max(iter.dim_, max_dim(iter_tail...));
223
+ }
224
+
225
+ inline void apply_op(){};
226
+
227
+ template <typename Op, typename... Args>
228
+ inline void apply_op(
229
+ int64_t numel,
230
+ int64_t offset,
231
+ const Op& op,
232
+ Args... iters) {
233
+ // For 0-dim tensors
234
+ if (numel == 1 && max_dim(iters...) == 0) {
235
+ op(*iters.data_...);
236
+ return;
237
+ }
238
+ if (offset > 0)
239
+ forward(offset, iters...);
240
+ // Splitting this into chunks helps the compiler create faster assembly
241
+ for (int64_t i = 0; i < numel;) {
242
+ for (; iterate_continue(iters...) && i < numel;) {
243
+ op(*iters.data_...);
244
+ iterate(1, iters...);
245
+ i++;
246
+ }
247
+ iterate_overflow(iters...);
248
+ }
249
+ }
250
+
251
+ /*
252
+ Apply a pointwise operator to sequence of tensors
253
+
254
+ The calling convention for op is a function/functor that takes the same
255
+ number of pointers of type scalar as the number of given tensors. For example,
256
+ to compute a = b * c, op would be of the form:
257
+ [](scalar* a_val, const scalar* b_val, const scalar* c_val) { a_val[0] =
258
+ b_val[0] * c_val[0]; };
259
+ */
260
+
261
+ template <typename scalar1, typename scalar2, typename Op>
262
+ inline void CPU_tensor_apply2(Tensor tensor1, Tensor tensor2, const Op op) {
263
+ if (!_apply_preamble({tensor1, tensor2}))
264
+ return;
265
+ if (_max_dim_tensors({tensor1, tensor2}) <= 8) {
266
+ apply_op(
267
+ tensor1.numel(),
268
+ 0,
269
+ op,
270
+ strided_tensor_iter_fixed<scalar1, 8>(tensor1),
271
+ strided_tensor_iter_fixed<scalar2, 8>(tensor2));
272
+ } else {
273
+ apply_op(
274
+ tensor1.numel(),
275
+ 0,
276
+ op,
277
+ strided_tensor_iter<scalar1>(tensor1),
278
+ strided_tensor_iter<scalar2>(tensor2));
279
+ }
280
+ }
281
+
282
+ template <typename scalar1, typename scalar2, typename scalar3, typename Op>
283
+ inline void CPU_tensor_apply3(
284
+ Tensor tensor1,
285
+ Tensor tensor2,
286
+ Tensor tensor3,
287
+ const Op op) {
288
+ if (!_apply_preamble({tensor1, tensor2, tensor3}))
289
+ return;
290
+ if (_max_dim_tensors({tensor1, tensor2, tensor3}) <= 8) {
291
+ apply_op(
292
+ tensor1.numel(),
293
+ 0,
294
+ op,
295
+ strided_tensor_iter_fixed<scalar1, 8>(tensor1),
296
+ strided_tensor_iter_fixed<scalar2, 8>(tensor2),
297
+ strided_tensor_iter_fixed<scalar3, 8>(tensor3));
298
+ } else {
299
+ apply_op(
300
+ tensor1.numel(),
301
+ 0,
302
+ op,
303
+ strided_tensor_iter<scalar1>(tensor1),
304
+ strided_tensor_iter<scalar2>(tensor2),
305
+ strided_tensor_iter<scalar3>(tensor3));
306
+ }
307
+ }
308
+
309
+ template <
310
+ typename scalar1,
311
+ typename scalar2,
312
+ typename scalar3,
313
+ typename scalar4,
314
+ typename Op>
315
+ inline void CPU_tensor_apply4(
316
+ Tensor tensor1,
317
+ Tensor tensor2,
318
+ Tensor tensor3,
319
+ Tensor tensor4,
320
+ const Op op) {
321
+ if (!_apply_preamble({tensor1, tensor2, tensor3, tensor4}))
322
+ return;
323
+ if (_max_dim_tensors({tensor1, tensor2, tensor3, tensor4}) <= 8) {
324
+ apply_op(
325
+ tensor1.numel(),
326
+ 0,
327
+ op,
328
+ strided_tensor_iter_fixed<scalar1, 8>(tensor1),
329
+ strided_tensor_iter_fixed<scalar2, 8>(tensor2),
330
+ strided_tensor_iter_fixed<scalar3, 8>(tensor3),
331
+ strided_tensor_iter_fixed<scalar4, 8>(tensor4));
332
+ } else {
333
+ apply_op(
334
+ tensor1.numel(),
335
+ 0,
336
+ op,
337
+ strided_tensor_iter<scalar1>(tensor1),
338
+ strided_tensor_iter<scalar2>(tensor2),
339
+ strided_tensor_iter<scalar3>(tensor3),
340
+ strided_tensor_iter<scalar4>(tensor4));
341
+ }
342
+ }
343
+
344
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CUDAFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+
5
+ namespace at::caching {
6
+
7
+ // Some systems (just cudagraphs currently) will persist a static tensor output
8
+ // whose TensorImpl does not change across iterations. For these tensors caching
9
+ // dtype conversions is invalid. Additionally, there will be an extra reference
10
+ // count to these cached tensors that would prevent buffer inplacing and other
11
+ // checks on tensor uniqueness. If we are not using these systems the enabled
12
+ // flag will be false and we will avoid the hash lookup.
13
+
14
+ TORCH_API bool is_cached_tensor(const at::Tensor& t);
15
+ TORCH_API void add_cached_tensor(const at::Tensor& t);
16
+ TORCH_API void remove_cached_tensor(const at::Tensor& t);
17
+ TORCH_API void set_cached_tensors_enabled(bool enable);
18
+
19
+ // For gradient buffer stealing we will adjust the use count of tensors
20
+ // which are persisted by cudagraphs, just as we need to adjust reference
21
+ // count of tensors with hooks.
22
+ TORCH_API size_t adjusted_use_count(const at::Tensor& t);
23
+
24
+ } // namespace at::caching
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeExplicitAutogradFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeImplicitAutogradFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Config.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's
4
+ // obvious if you forgot to include Config.h
5
+ // c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
6
+ //
7
+ // DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h
8
+
9
+ #define AT_MKLDNN_ENABLED() 1
10
+ #define AT_MKLDNN_ACL_ENABLED() 0
11
+ #define AT_MKL_ENABLED() 1
12
+ #define AT_MKL_SEQUENTIAL() 0
13
+ #define AT_POCKETFFT_ENABLED() 0
14
+ #define AT_NNPACK_ENABLED() 1
15
+ #define CAFFE2_STATIC_LINK_CUDA() 0
16
+ #define AT_BUILD_WITH_BLAS() 1
17
+ #define AT_BUILD_WITH_LAPACK() 1
18
+ #define AT_PARALLEL_OPENMP 1
19
+ #define AT_PARALLEL_NATIVE 0
20
+ #define AT_PARALLEL_NATIVE_TBB 0
21
+ #define AT_BLAS_F2C() 0
22
+ #define AT_BLAS_USE_CBLAS_DOT() 0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/IListRef.h>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <c10/core/DeviceGuard.h>
6
+ #include <c10/core/ScalarType.h> // TensorList whyyyyy
7
+
8
+ namespace at {
9
+
10
+ // Are you here because you're wondering why DeviceGuard(tensor) no
11
+ // longer works? For code organization reasons, we have temporarily(?)
12
+ // removed this constructor from DeviceGuard. The new way to
13
+ // spell it is:
14
+ //
15
+ // OptionalDeviceGuard guard(device_of(tensor));
16
+
17
+ /// Return the Device of a Tensor, if the Tensor is defined.
18
+ inline c10::optional<Device> device_of(const Tensor& t) {
19
+ if (t.defined()) {
20
+ return c10::make_optional(t.device());
21
+ } else {
22
+ return c10::nullopt;
23
+ }
24
+ }
25
+
26
+ inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
27
+ return t.has_value() ? device_of(t.value()) : c10::nullopt;
28
+ }
29
+
30
+ /// Return the Device of a TensorList, if the list is non-empty and
31
+ /// the first Tensor is defined. (This function implicitly assumes
32
+ /// that all tensors in the list have the same device.)
33
+ inline c10::optional<Device> device_of(ITensorListRef t) {
34
+ if (!t.empty()) {
35
+ return device_of(t.front());
36
+ } else {
37
+ return c10::nullopt;
38
+ }
39
+ }
40
+
41
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DimVector.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/DimVector.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Utils.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ namespace c10 {
8
+
9
+ class DynamicLibraryError : public Error {
10
+ using Error::Error;
11
+ };
12
+
13
+ } // namespace c10
14
+
15
+ namespace at {
16
+
17
+ struct DynamicLibrary {
18
+ AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary);
19
+
20
+ TORCH_API DynamicLibrary(
21
+ const char* name,
22
+ const char* alt_name = nullptr,
23
+ bool leak_handle = false);
24
+
25
+ TORCH_API void* sym(const char* name);
26
+
27
+ TORCH_API ~DynamicLibrary();
28
+
29
+ private:
30
+ bool leak_handle;
31
+ void* handle = nullptr;
32
+ };
33
+
34
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+
5
+ namespace at::functionalization {
6
+
7
+ // See Note [Functionalization Pass In Core]
8
+
9
+ // ViewMeta is a class used by the functionalization pass to navigate between
10
+ // a base tensor and a view tensor.
11
+ // For example, if I call `b = a.view1(...)`
12
+ // the functionalization pass will generate and store a ViewMeta on b that looks
13
+ // like:
14
+ //
15
+ // ViewMeta(
16
+ // [<captures>](const Tensor& base, int64_t mutated_view_idx) {
17
+ // return base.view1(...);
18
+ // },
19
+ // [<captures>](const at::Tensor& base, const at::Tensor& mutated_view,
20
+ // int64_t mutated_view_idx) -> at::Tensor {
21
+ // return at::functionalization::impl::view1_inverse(base, mutated_view,
22
+ // ...);
23
+ // }
24
+ //
25
+ // The forward_fn lambda describes how to replay view1 on a tensor.
26
+ //
27
+ // The reverse_fn lambda describes how, given a tensor that is already a view,
28
+ // how to get the corresponding base tensor. See Note [Functionalization Pass:
29
+ // View Inverses] for details.
30
+ struct ViewMeta {
31
+ ViewMeta(
32
+ std::function<Tensor(const Tensor&, int64_t)> forward,
33
+ std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse,
34
+ bool is_multi_output = false,
35
+ int64_t out_idx = 0)
36
+ : forward_fn(std::move(forward)),
37
+ reverse_fn(std::move(reverse)),
38
+ out_index(out_idx),
39
+ is_multi_output(is_multi_output) {}
40
+
41
+ std::function<Tensor(const Tensor&, int64_t)> forward_fn;
42
+ std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse_fn;
43
+ // See Note [out_idx in ViewMeta]
44
+ int64_t out_index;
45
+
46
+ // Tells us if this is a multi-output view
47
+ bool is_multi_output;
48
+
49
+ // Returns a copy of the current ViewMeta, if out_idx matches the current
50
+ // out_index. Otherwise, returns a new ViewMeta with the same forward/reverse
51
+ // functions, but a new out index.
52
+ ViewMeta to_out_idx(int64_t out_idx);
53
+ };
54
+
55
+ // FunctionalStorageImpl is a subclass of StorageImpl used by the
56
+ // functionalization pass. It has no underlying data (similar to meta storage).
57
+ // It also knows how to reflect mutations to tensors in the absence of a valid
58
+ // data pointer.
59
+ //
60
+ // A storage represents the state shared by (potentially multiple) views of the
61
+ // same tensor. For example, in the following code:
62
+ //
63
+ // b = a.view1(...)
64
+ // c = b.view2(...)
65
+ // b.add_(1)
66
+ // --> storage.add_update(b, {view1_meta})
67
+ //
68
+ // The call to add_(1) will result in a call to alias.add_update(b,
69
+ // {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose
70
+ // c is used in an expression (e.g. you try to print c, or pass it to an
71
+ // operator). Doing so will involve "syncing" c. First we apply any pending
72
+ // updates to the alias, and then we regenerate c by replaying its views off of
73
+ // the updated alias. E.g:
74
+ //
75
+ // print(str(c))
76
+ // --> c.sync_()
77
+ // --> alias.apply_updates() // after this, the alias will be updated to
78
+ // reflect the mutation to b
79
+ struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl {
80
+ public:
81
+ struct Update {
82
+ const at::Tensor new_val;
83
+ const std::vector<ViewMeta> view_metas;
84
+ };
85
+
86
+ explicit FunctionalStorageImpl(const Tensor& value);
87
+
88
+ void add_update(
89
+ const Tensor& updated_val,
90
+ const std::vector<ViewMeta>& view_metas);
91
+ bool apply_updates();
92
+ const Tensor& base() {
93
+ return base_;
94
+ }
95
+ size_t generation() const {
96
+ return generation_;
97
+ }
98
+ void freeze() {
99
+ frozen_ = true;
100
+ }
101
+
102
+ ~FunctionalStorageImpl() override = default;
103
+
104
+ private:
105
+ // NB: base_ should always point to a tensor BELOW the current
106
+ // functionalization layer. This is mainly to avoid reference cycles. e.g.
107
+ // given `b = a.view(...)` Both a.storage_ and b.storage_ are a
108
+ // FunctionStorageImpl containing an Walualias, with contains a Tensor
109
+ // `base_`. In this case (where a and b are FunctionalTensorWrapper's), base_
110
+ // should point not to a, but to a's unwrapped value, a.value_` See Note
111
+ // [Functionalization: Walualias Removal] for a diagram that shows this
112
+ // visually.
113
+ at::Tensor base_;
114
+ std::vector<Update> updates_;
115
+ // generation_ gets incremented every time a mutation is queued onto the
116
+ // alias. It is used to determine if a given tensor is "up to date", or if it
117
+ // needs to be regenerated from the alias.
118
+ size_t generation_ = 0;
119
+ // If frozen, no more mutations are allowed on this storage. Once frozen, a
120
+ // storage cannot be unfrozen.
121
+ bool frozen_ = false;
122
+ };
123
+
124
+ } // namespace at::functionalization
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <ATen/ArrayRef.h>
5
+ #include <ATen/FunctionalStorageImpl.h>
6
+ #include <ATen/core/IListRef.h>
7
+ #include <ATen/core/List.h>
8
+ #include <ATen/core/boxing/BoxedKernel.h>
9
+ #include <ATen/core/boxing/impl/boxing.h>
10
+ #include <ATen/core/dispatch/Dispatcher.h>
11
+
12
+ #include <c10/core/DispatchKey.h>
13
+
14
+ namespace at {
15
+
16
+ // Note [Functionalization Pass In Core]
17
+ // The Functionalization pass is used to remove aliasing from a pytorch program.
18
+ //
19
+ // This is useful for backends that don't support aliasing, like XLA and Vulkan.
20
+ // It's also necessary in order to remove mutation from a program, which is
21
+ // needed in Functorch.
22
+ //
23
+ // Consider this program:
24
+ // a = torch.ones(...)
25
+ // b = a.view(...)
26
+ // b.add_(1)
27
+ //
28
+ // In this program, b is meant to alias with a due to the use of view(). At the
29
+ // end of the program, both a and b are full of 2's. However, backends that
30
+ // don't support aliasing aren't able to correctly implement the view()
31
+ // operator. Instead, they can opt into the Functionalization pass, which will
32
+ // sit between the user and the backend, and provide the necessary aliasing
33
+ // logic.
34
+ //
35
+ // The functionalization pass will turn the above program into a slightly
36
+ // different program that has the same semantics, transparently to the user,
37
+ // that backends like XLA/Vulkan are able to implement a = torch.ones(...) b =
38
+ // a.view_copy(...) # view() replaced with view_copy(). Backends like
39
+ // XLA/Vulkan can implement this! b.add_(1) a.add_(1) # Our functionalization
40
+ // pass machinery knows that a and b are aliased - it applies b's mutation to a
41
+ // too.
42
+ //
43
+ // So, how does the functionalization pass keep track of which tensors are
44
+ // aliased? The pass works by wrapping EVERY tensor in the program inside of a
45
+ // FunctionalTensorWrapper, which knows about its alias'd tensors.
46
+ //
47
+ // See Note [Functionalization: Alias Removal] for details on the aliasing
48
+ // machinery. See Note [Functionalization: Mutation Removal] for details on
49
+ // mutation removal.
50
+ struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl {
51
+ explicit FunctionalTensorWrapper(const Tensor& value);
52
+ // Additional constructor to create a FunctionalTensorWrapper directly from an
53
+ // underlying tensor that was created from a view. For example, the code b =
54
+ // a.view1() will generate a constructor call to FunctionalTensorWrapper(b, a,
55
+ // view1_meta)
56
+ explicit FunctionalTensorWrapper(
57
+ const Tensor& view_value,
58
+ const FunctionalTensorWrapper* base,
59
+ functionalization::ViewMeta meta);
60
+
61
+ // Get the underlying, actual tensor, that doesn't know anything about
62
+ // functionalization.
63
+ const Tensor& value() const {
64
+ return value_;
65
+ };
66
+ // The concept of "level" is only ever important to functorch; it's exposed
67
+ // here as more of a hook for functorch to use.
68
+ int64_t level() const {
69
+ return level_;
70
+ };
71
+ void set_level(int64_t level) {
72
+ level_ = level;
73
+ }
74
+ bool has_metadata_mutation() const {
75
+ return has_metadata_mutation_;
76
+ };
77
+
78
+ // Denotes a mutation that's hidden from autograd,
79
+ // e.g. for the purposes of passing a tensor to a triton kernel
80
+ void mark_mutation_hidden_from_autograd() {
81
+ mutation_hidden_from_autograd_counter_++;
82
+ }
83
+ void mark_mutation_during_no_grad_or_inference_mode() {
84
+ mutation_during_no_grad_or_inference_mode_++;
85
+ }
86
+ // Are all the mutations happening to the tensor hidden from autograd
87
+ bool are_all_mutations_hidden_from_autograd() const {
88
+ return mutation_hidden_from_autograd_counter_ == mutation_counter_;
89
+ }
90
+ // Did all mutations happen under no_grad or inference_mode
91
+ // (We also need to ignore mutations fully hidden from autograd here)
92
+ bool are_all_mutations_under_no_grad_or_inference_mode() const {
93
+ return mutation_hidden_from_autograd_counter_ +
94
+ mutation_during_no_grad_or_inference_mode_ ==
95
+ mutation_counter_;
96
+ }
97
+
98
+ // Sync's the underlying tensor with its alias, if it's out of date. This
99
+ // involves two steps: 1) Apply any pending updates/mutations to the alias 2)
100
+ // Replay the views (if any) to regenerate the current tensor off of the
101
+ // updated alias.
102
+ void sync_();
103
+ // Performs step (1) of the sync. This is its own public API because it's
104
+ // needed by view_inplace ops like transpose_. See Note [Functionalization
105
+ // Pass - Inplace View Ops]
106
+ void regenerate_from_base();
107
+ // Performs step (2) of the sync. This is its own public API because it's
108
+ // needed by functorch. functorch wants to make sure that all input tensors to
109
+ // a functionalized program have been properly synced so it can properly
110
+ // propagate mutations to inputs. It can't just call sync_(), because the
111
+ // FunctionalTensorWrapper will look like it has no aliases and sync_ will be
112
+ // a noop. We use the reference count on storage_ to determine if the wrapper
113
+ // is aliased, and by the time functorch is ready to propagate updates to
114
+ // inputs, any intermediate views of the input created by the program will
115
+ // have been deallocated. This function also returns whether or not the base
116
+ // actually had any updates to apply.
117
+ bool apply_updates();
118
+ // Takes the current state of value_ and snapshots it, sending it as a pending
119
+ // update to the alias.
120
+ void commit_update();
121
+ // When any tensor is mutated, the tensor increments its alias's "generation".
122
+ // Separately, each tensor maintains its own "generation" counter, which is
123
+ // used to determine if it's up-to-date with its alias. The act of syncing a
124
+ // tensor will set a tensor's generation equal to its alias's generation.
125
+ bool is_up_to_date() const;
126
+ // Freezes the storage of this tensor, preventing subsequent mutations
127
+ void freeze_storage() const;
128
+ // Every FunctionalTensorWrapper contains a vector<ViewMeta> objects
129
+ // describing the series of view ops that ran to generate the current tensor
130
+ // from the base tensor. This method is used by inplace-view ops like
131
+ // transpose_. It appends a ViewMeta to the existing stack, and refreshes the
132
+ // tensor by replaying the views off of the alias.
133
+ void mutate_view_meta(at::functionalization::ViewMeta meta);
134
+
135
+ // Custom implementation of self.set_(src)
136
+ void set__impl(const FunctionalTensorWrapper* other);
137
+
138
+ // Returns whether the current tensor's data was ever mutated
139
+ bool has_data_mutation();
140
+ //
141
+ // Returns whether the current FunctionalTensorWrapper
142
+ // experienced a set_() call.
143
+ bool was_storage_changed() {
144
+ return was_storage_changed_;
145
+ }
146
+
147
+ // The functionalization pass can be used to remove mutations.
148
+ // It does so by replacing any mutation op with it's corresponding
149
+ // out-of-place op, followed by a call to replace_(). e.g:
150
+ //
151
+ // a.add_(1)
152
+ //
153
+ // will turn into:
154
+ //
155
+ // tmp = a.add(1)
156
+ // a.replace_(tmp)
157
+ //
158
+ // replace_() swaps out the wrapped tensor, value_, with tmp.
159
+ void replace_(const Tensor& other);
160
+
161
+ bool is_multi_output_view() {
162
+ return is_multi_output_view_;
163
+ }
164
+
165
+ // See Note[resize_() in functionalization pass]
166
+ void maybe_replace_storage(const Tensor& other);
167
+
168
+ // Replaces the storage with a new functional storage,
169
+ // and clears the view_metas_ stack.
170
+ // WARNING: Calling this function will sever the aliasing relationship between
171
+ // the current FunctionalTensorWrapper and any of its outstanding aliases.
172
+ // Please only call if you know what you're doing.
173
+ void _unsafe_reset_storage();
174
+
175
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
176
+ const c10::VariableVersion& version_counter,
177
+ bool allow_tensor_metadata_change) const override;
178
+
179
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
180
+ c10::VariableVersion&& version_counter,
181
+ bool allow_tensor_metadata_change) const override;
182
+
183
+ ~FunctionalTensorWrapper() override = default;
184
+
185
+ // FunctionalTensorWrapper overrides all custom size/stride function,
186
+ // so that if the inner tensor has a custom implementation
187
+ // we make sure to call that implementation.
188
+ at::IntArrayRef sizes_custom() const override;
189
+ at::IntArrayRef strides_custom() const override;
190
+ int64_t dim_custom() const override;
191
+ int64_t numel_custom() const override;
192
+ bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
193
+ c10::SymIntArrayRef sym_sizes_custom() const override;
194
+ c10::SymInt sym_size_custom(int64_t d) const override;
195
+ c10::SymIntArrayRef sym_strides_custom() const override;
196
+ c10::SymInt sym_storage_offset_custom() const override;
197
+ c10::Device device_custom() const override;
198
+
199
+ private:
200
+ const char* tensorimpl_type_name() const override;
201
+ void set_constructor_metadata();
202
+ functionalization::FunctionalStorageImpl* functional_storage_impl() const;
203
+
204
+ // This is used to re-implement shallow_copy_and_detach for
205
+ // FunctionalTensorWrapper. The implementation is identical, but we just need
206
+ // to return a subclass instead of a plain TensorImpl.
207
+ // TODO: maybe it's possible to arrange for that to happen automatically
208
+ // without an override here?
209
+ template <typename VariableVersion>
210
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
211
+ VariableVersion&& version_counter,
212
+ bool allow_tensor_metadata_change) const;
213
+
214
+ // Note that value is not taken by reference: internally, the wrapper will
215
+ // change the value tensor that it points to over time.
216
+ Tensor value_;
217
+ int64_t level_;
218
+ // These two counters are used for identifying
219
+ // whether all the mutations on a given tensor are hidden from autograd or
220
+ // not. If we have an input mutation that is hidden from autograd, then once
221
+ // we convert the input mutation to a copy_() we know it will be safe to hide
222
+ // the copy_() from autograd as well.
223
+ uint64_t mutation_counter_ = 0;
224
+ uint64_t mutation_hidden_from_autograd_counter_ = 0;
225
+ uint64_t mutation_during_no_grad_or_inference_mode_ = 0;
226
+ bool has_metadata_mutation_ = false;
227
+ bool is_multi_output_view_ = false;
228
+ // Did the tensor experience a set_() call.
229
+ bool was_storage_changed_ = false;
230
+
231
+ size_t generation_ = 0;
232
+ std::vector<at::functionalization::ViewMeta> view_metas_;
233
+ };
234
+
235
+ // Utility functions for the functionalization pass.
236
+
237
+ namespace functionalization {
238
+ namespace impl {
239
+
240
+ TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
241
+ const Tensor& tensor) {
242
+ auto functional_impl =
243
+ static_cast<FunctionalTensorWrapper*>(tensor.unsafeGetTensorImpl());
244
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_impl != nullptr);
245
+ return functional_impl;
246
+ }
247
+
248
+ TORCH_API bool isFunctionalTensor(const at::Tensor& tensor);
249
+ TORCH_API bool isFunctionalTensor(const c10::optional<Tensor>& t);
250
+ TORCH_API bool isFunctionalTensor(
251
+ const c10::List<c10::optional<Tensor>>& t_list);
252
+ TORCH_API bool isFunctionalTensor(ITensorListRef list);
253
+
254
+ TORCH_API Tensor to_functional_tensor(const Tensor& tensor);
255
+ TORCH_API c10::optional<Tensor> to_functional_tensor(
256
+ const c10::optional<Tensor>& tensor);
257
+ TORCH_API c10::List<c10::optional<Tensor>> to_functional_tensor(
258
+ const c10::List<c10::optional<Tensor>>& t_list);
259
+ TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list);
260
+
261
+ TORCH_API void freeze_functional_tensor(const Tensor& tensor);
262
+
263
+ TORCH_API Tensor
264
+ from_functional_tensor(const Tensor& tensor, bool assert_functional = true);
265
+ TORCH_API c10::optional<Tensor> from_functional_tensor(
266
+ const c10::optional<Tensor>& t,
267
+ bool assert_functional = true);
268
+ TORCH_API c10::List<c10::optional<Tensor>> from_functional_tensor(
269
+ const c10::List<c10::optional<Tensor>>& t_list);
270
+ TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list);
271
+
272
+ TORCH_API void sync(const at::Tensor& t);
273
+ TORCH_API void sync(const c10::optional<Tensor>& t);
274
+ TORCH_API void sync(const c10::List<c10::optional<Tensor>>& t_list);
275
+ TORCH_API void sync(ITensorListRef t_list);
276
+
277
+ TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other);
278
+ TORCH_API void replace_(
279
+ const ITensorListRef functional_tensor,
280
+ ITensorListRef other);
281
+
282
+ TORCH_API void commit_update(const Tensor& functional_tensor);
283
+ TORCH_API void commit_update(ITensorListRef functional_tensor);
284
+
285
+ TORCH_API void unsafe_reset_storage(const Tensor& functional_tensor);
286
+
287
+ TORCH_API void mark_mutation_hidden_from_autograd(
288
+ const Tensor& functional_tensor);
289
+
290
+ TORCH_API bool are_all_mutations_hidden_from_autograd(
291
+ const Tensor& functional_tensor);
292
+
293
+ TORCH_API bool are_all_mutations_under_no_grad_or_inference_mode(
294
+ const Tensor& functional_tensor);
295
+
296
+ // These two methods are XLA-specific logic and are no-ops
297
+ // for the normal functionalization flow.
298
+ TORCH_API void propagate_xla_data(
299
+ const Tensor& functional_tensor,
300
+ const Tensor& other);
301
+ TORCH_API void propagate_xla_data(
302
+ const ITensorListRef functional_tensor,
303
+ ITensorListRef other);
304
+
305
+ Tensor create_functional_tensor_with_view_meta(
306
+ const Tensor& view_to_wrap,
307
+ const Tensor& base,
308
+ functionalization::ViewMeta meta,
309
+ int64_t out_idx = 0);
310
+ std::vector<Tensor> create_functional_tensor_with_view_meta(
311
+ ITensorListRef view_to_wrap,
312
+ const Tensor& base,
313
+ functionalization::ViewMeta meta);
314
+
315
+ void mutate_view_meta(const Tensor& self, functionalization::ViewMeta meta);
316
+
317
+ void set_sizes_strides_offset(const Tensor& out, const Tensor& meta_out);
318
+ void set_sizes_strides_offset(
319
+ const std::vector<Tensor>& outs,
320
+ const std::vector<Tensor>& meta_outs);
321
+
322
+ // ~~~~~ TLS used in functionalization ~~~~~
323
+
324
+ TORCH_API bool getFunctionalizationReapplyViewsTLS();
325
+ TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views);
326
+
327
+ class TORCH_API FunctionalizationReapplyViewsGuard {
328
+ public:
329
+ FunctionalizationReapplyViewsGuard(bool reapply_views)
330
+ : prev_(getFunctionalizationReapplyViewsTLS()) {
331
+ setFunctionalizationReapplyViewsTLS(reapply_views);
332
+ }
333
+
334
+ ~FunctionalizationReapplyViewsGuard() {
335
+ setFunctionalizationReapplyViewsTLS(prev_);
336
+ }
337
+
338
+ FunctionalizationReapplyViewsGuard(
339
+ const FunctionalizationReapplyViewsGuard&) = delete;
340
+ FunctionalizationReapplyViewsGuard operator=(
341
+ const FunctionalizationReapplyViewsGuard&) = delete;
342
+ FunctionalizationReapplyViewsGuard(FunctionalizationReapplyViewsGuard&&) =
343
+ delete;
344
+ FunctionalizationReapplyViewsGuard operator=(
345
+ FunctionalizationReapplyViewsGuard&&) = delete;
346
+
347
+ private:
348
+ bool prev_;
349
+ };
350
+
351
+ } // namespace impl
352
+
353
+ // Helper function to call an out-of-place composite aten kernel that may use
354
+ // mutations / views internally, and functionalize them.
355
+ TORCH_API void functionalize_op_helper(
356
+ const c10::OperatorHandle& op,
357
+ torch::jit::Stack* stack);
358
+
359
+ template <class Op, bool symint, class ReturnType, class... ParameterTypes>
360
+ struct _functionalize_aten_op final {};
361
+
362
+ template <class Op, bool symint, class ReturnType, class... ParameterTypes>
363
+ struct _functionalize_aten_op<Op, symint, ReturnType(ParameterTypes...)> final {
364
+ static ReturnType call(
365
+ typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
366
+ using FuncType = ReturnType(
367
+ typename c10::maybe_keep_symint<symint, ParameterTypes>::type...);
368
+ auto op = c10::Dispatcher::singleton()
369
+ .findSchemaOrThrow(
370
+ (const char*)Op::name, (const char*)Op::overload_name)
371
+ .typed<FuncType>();
372
+
373
+ return c10::impl::BoxedKernelWrapper<FuncType>::call(
374
+ c10::BoxedKernel::makeFromFunction<functionalize_op_helper>(),
375
+ op,
376
+ // BoxedKernelWrapper knows to ignore this keyset argument,
377
+ // because functionalize_op_helper doesn't take in a DispatchKeySet
378
+ c10::DispatchKeySet(),
379
+ args...);
380
+ }
381
+ };
382
+
383
+ template <class Op>
384
+ using functionalize_aten_op =
385
+ _functionalize_aten_op<Op, false, typename Op::schema>;
386
+
387
+ template <class Op>
388
+ using functionalize_aten_op_symint =
389
+ _functionalize_aten_op<Op, true, typename Op::schema>;
390
+
391
+ } // namespace functionalization
392
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Functions.h ADDED
@@ -0,0 +1,1405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Functions.h
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
17
+ see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21
+ //
22
+ // In ATen, certain generated headers files include the definitions of
23
+ // every single operator in PyTorch. Unfortunately this means every
24
+ // time an operator signature is updated or changed in
25
+ // native_functions.yaml, you (and every other PyTorch developer) need
26
+ // to recompile every source file that includes any of these headers.
27
+ //
28
+ // To break up these header dependencies, and improve incremental
29
+ // build times for all PyTorch developers. These headers are split
30
+ // into per-operator headers in the `ATen/ops` folder. This limits
31
+ // incremental builds to only changes to methods of `Tensor`, or files
32
+ // that use the specific operator being changed. With `at::sum` as an
33
+ // example, you should include
34
+ //
35
+ // <ATen/ops/sum.h> // instead of ATen/Functions.h
36
+ // <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
37
+ // <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
38
+ // <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
39
+ //
40
+ // However, even if you're careful to use this in your own code.
41
+ // `Functions.h` might be included indirectly through another header
42
+ // without you realising. To avoid this, you can add
43
+ //
44
+ // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45
+ //
46
+ // to the top of your source file. This way any time the non-specific
47
+ // headers are included, the compiler will error out.
48
+ //
49
+ // Also, be aware that `ops` are not available in all build
50
+ // configurations (namely fb-internal) so you must guard these
51
+ // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52
+ //
53
+ // #ifndef AT_PER_OPERATOR_HEADERS
54
+ // #include <ATen/Functions.h>
55
+ // #else
56
+ // #include <ATen/ops/sum.h>
57
+ // #endif
58
+
59
+ #include <ATen/Context.h>
60
+ #include <ATen/DeviceGuard.h>
61
+ #include <ATen/TensorUtils.h>
62
+ #include <ATen/TracerMode.h>
63
+ #include <ATen/core/Generator.h>
64
+ #include <ATen/core/Reduction.h>
65
+ #include <c10/core/SymInt.h>
66
+ #include <ATen/core/Tensor.h>
67
+ #include <c10/core/Scalar.h>
68
+ #include <c10/core/Storage.h>
69
+ #include <c10/core/TensorOptions.h>
70
+ #include <c10/util/Deprecated.h>
71
+ #include <c10/util/Optional.h>
72
+ #include <c10/util/OptionalArrayRef.h>
73
+
74
+ #include <ATen/ops/from_blob.h>
75
+ #include <ATen/ops/tensor.h>
76
+
77
+ #include <ATen/ops/_adaptive_avg_pool2d.h>
78
+ #include <ATen/ops/_adaptive_avg_pool2d_backward.h>
79
+ #include <ATen/ops/_adaptive_avg_pool3d.h>
80
+ #include <ATen/ops/_adaptive_avg_pool3d_backward.h>
81
+ #include <ATen/ops/_add_batch_dim.h>
82
+ #include <ATen/ops/_add_relu.h>
83
+ #include <ATen/ops/_addmm_activation.h>
84
+ #include <ATen/ops/_aminmax.h>
85
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
86
+ #include <ATen/ops/_amp_update_scale.h>
87
+ #include <ATen/ops/_assert_async.h>
88
+ #include <ATen/ops/_assert_tensor_metadata.h>
89
+ #include <ATen/ops/_autocast_to_full_precision.h>
90
+ #include <ATen/ops/_autocast_to_reduced_precision.h>
91
+ #include <ATen/ops/_backward.h>
92
+ #include <ATen/ops/_batch_norm_impl_index.h>
93
+ #include <ATen/ops/_batch_norm_impl_index_backward.h>
94
+ #include <ATen/ops/_cast_Byte.h>
95
+ #include <ATen/ops/_cast_Char.h>
96
+ #include <ATen/ops/_cast_Double.h>
97
+ #include <ATen/ops/_cast_Float.h>
98
+ #include <ATen/ops/_cast_Half.h>
99
+ #include <ATen/ops/_cast_Int.h>
100
+ #include <ATen/ops/_cast_Long.h>
101
+ #include <ATen/ops/_cast_Short.h>
102
+ #include <ATen/ops/_cdist_backward.h>
103
+ #include <ATen/ops/_cdist_forward.h>
104
+ #include <ATen/ops/_cholesky_solve_helper.h>
105
+ #include <ATen/ops/_choose_qparams_per_tensor.h>
106
+ #include <ATen/ops/_coalesce.h>
107
+ #include <ATen/ops/_coalesced.h>
108
+ #include <ATen/ops/_compute_linear_combination.h>
109
+ #include <ATen/ops/_conj.h>
110
+ #include <ATen/ops/_conj_copy.h>
111
+ #include <ATen/ops/_conj_physical.h>
112
+ #include <ATen/ops/_conv_depthwise2d.h>
113
+ #include <ATen/ops/_convert_indices_from_coo_to_csr.h>
114
+ #include <ATen/ops/_convert_indices_from_csr_to_coo.h>
115
+ #include <ATen/ops/_convert_weight_to_int4pack.h>
116
+ #include <ATen/ops/_convolution.h>
117
+ #include <ATen/ops/_convolution_double_backward.h>
118
+ #include <ATen/ops/_convolution_mode.h>
119
+ #include <ATen/ops/_copy_from.h>
120
+ #include <ATen/ops/_copy_from_and_resize.h>
121
+ #include <ATen/ops/_cslt_compress.h>
122
+ #include <ATen/ops/_cslt_sparse_mm.h>
123
+ #include <ATen/ops/_ctc_loss.h>
124
+ #include <ATen/ops/_ctc_loss_backward.h>
125
+ #include <ATen/ops/_cudnn_ctc_loss.h>
126
+ #include <ATen/ops/_cudnn_init_dropout_state.h>
127
+ #include <ATen/ops/_cudnn_rnn.h>
128
+ #include <ATen/ops/_cudnn_rnn_backward.h>
129
+ #include <ATen/ops/_cudnn_rnn_flatten_weight.h>
130
+ #include <ATen/ops/_cufft_clear_plan_cache.h>
131
+ #include <ATen/ops/_cufft_get_plan_cache_max_size.h>
132
+ #include <ATen/ops/_cufft_get_plan_cache_size.h>
133
+ #include <ATen/ops/_cufft_set_plan_cache_max_size.h>
134
+ #include <ATen/ops/_cummax_helper.h>
135
+ #include <ATen/ops/_cummin_helper.h>
136
+ #include <ATen/ops/_debug_has_internal_overlap.h>
137
+ #include <ATen/ops/_dimI.h>
138
+ #include <ATen/ops/_dimV.h>
139
+ #include <ATen/ops/_dim_arange.h>
140
+ #include <ATen/ops/_dirichlet_grad.h>
141
+ #include <ATen/ops/_efficient_attention_backward.h>
142
+ #include <ATen/ops/_efficient_attention_forward.h>
143
+ #include <ATen/ops/_efficientzerotensor.h>
144
+ #include <ATen/ops/_embedding_bag.h>
145
+ #include <ATen/ops/_embedding_bag_backward.h>
146
+ #include <ATen/ops/_embedding_bag_dense_backward.h>
147
+ #include <ATen/ops/_embedding_bag_forward_only.h>
148
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
149
+ #include <ATen/ops/_embedding_bag_sparse_backward.h>
150
+ #include <ATen/ops/_empty_affine_quantized.h>
151
+ #include <ATen/ops/_empty_per_channel_affine_quantized.h>
152
+ #include <ATen/ops/_euclidean_dist.h>
153
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
154
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h>
155
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
156
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h>
157
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
158
+ #include <ATen/ops/_fft_c2c.h>
159
+ #include <ATen/ops/_fft_c2r.h>
160
+ #include <ATen/ops/_fft_r2c.h>
161
+ #include <ATen/ops/_fill_mem_eff_dropout_mask.h>
162
+ #include <ATen/ops/_flash_attention_backward.h>
163
+ #include <ATen/ops/_flash_attention_forward.h>
164
+ #include <ATen/ops/_foobar.h>
165
+ #include <ATen/ops/_foreach_abs.h>
166
+ #include <ATen/ops/_foreach_acos.h>
167
+ #include <ATen/ops/_foreach_add.h>
168
+ #include <ATen/ops/_foreach_addcdiv.h>
169
+ #include <ATen/ops/_foreach_addcmul.h>
170
+ #include <ATen/ops/_foreach_asin.h>
171
+ #include <ATen/ops/_foreach_atan.h>
172
+ #include <ATen/ops/_foreach_ceil.h>
173
+ #include <ATen/ops/_foreach_clamp_max.h>
174
+ #include <ATen/ops/_foreach_clamp_min.h>
175
+ #include <ATen/ops/_foreach_copy.h>
176
+ #include <ATen/ops/_foreach_cos.h>
177
+ #include <ATen/ops/_foreach_cosh.h>
178
+ #include <ATen/ops/_foreach_div.h>
179
+ #include <ATen/ops/_foreach_erf.h>
180
+ #include <ATen/ops/_foreach_erfc.h>
181
+ #include <ATen/ops/_foreach_exp.h>
182
+ #include <ATen/ops/_foreach_expm1.h>
183
+ #include <ATen/ops/_foreach_floor.h>
184
+ #include <ATen/ops/_foreach_frac.h>
185
+ #include <ATen/ops/_foreach_lerp.h>
186
+ #include <ATen/ops/_foreach_lgamma.h>
187
+ #include <ATen/ops/_foreach_log.h>
188
+ #include <ATen/ops/_foreach_log10.h>
189
+ #include <ATen/ops/_foreach_log1p.h>
190
+ #include <ATen/ops/_foreach_log2.h>
191
+ #include <ATen/ops/_foreach_maximum.h>
192
+ #include <ATen/ops/_foreach_minimum.h>
193
+ #include <ATen/ops/_foreach_mul.h>
194
+ #include <ATen/ops/_foreach_neg.h>
195
+ #include <ATen/ops/_foreach_norm.h>
196
+ #include <ATen/ops/_foreach_pow.h>
197
+ #include <ATen/ops/_foreach_reciprocal.h>
198
+ #include <ATen/ops/_foreach_round.h>
199
+ #include <ATen/ops/_foreach_sigmoid.h>
200
+ #include <ATen/ops/_foreach_sign.h>
201
+ #include <ATen/ops/_foreach_sin.h>
202
+ #include <ATen/ops/_foreach_sinh.h>
203
+ #include <ATen/ops/_foreach_sqrt.h>
204
+ #include <ATen/ops/_foreach_sub.h>
205
+ #include <ATen/ops/_foreach_tan.h>
206
+ #include <ATen/ops/_foreach_tanh.h>
207
+ #include <ATen/ops/_foreach_trunc.h>
208
+ #include <ATen/ops/_foreach_zero.h>
209
+ #include <ATen/ops/_functional_assert_async.h>
210
+ #include <ATen/ops/_functional_sym_constrain_range.h>
211
+ #include <ATen/ops/_functional_sym_constrain_range_for_size.h>
212
+ #include <ATen/ops/_fused_adam.h>
213
+ #include <ATen/ops/_fused_adamw.h>
214
+ #include <ATen/ops/_fused_dropout.h>
215
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
216
+ #include <ATen/ops/_fused_sdp_choice.h>
217
+ #include <ATen/ops/_fw_primal.h>
218
+ #include <ATen/ops/_fw_primal_copy.h>
219
+ #include <ATen/ops/_gather_sparse_backward.h>
220
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
221
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h>
222
+ #include <ATen/ops/_has_compatible_shallow_copy_type.h>
223
+ #include <ATen/ops/_has_same_storage_numel.h>
224
+ #include <ATen/ops/_histogramdd_bin_edges.h>
225
+ #include <ATen/ops/_histogramdd_from_bin_cts.h>
226
+ #include <ATen/ops/_histogramdd_from_bin_tensors.h>
227
+ #include <ATen/ops/_index_put_impl.h>
228
+ #include <ATen/ops/_indices.h>
229
+ #include <ATen/ops/_indices_copy.h>
230
+ #include <ATen/ops/_int_mm.h>
231
+ #include <ATen/ops/_is_all_true.h>
232
+ #include <ATen/ops/_is_any_true.h>
233
+ #include <ATen/ops/_is_zerotensor.h>
234
+ #include <ATen/ops/_linalg_check_errors.h>
235
+ #include <ATen/ops/_linalg_det.h>
236
+ #include <ATen/ops/_linalg_eigh.h>
237
+ #include <ATen/ops/_linalg_slogdet.h>
238
+ #include <ATen/ops/_linalg_solve_ex.h>
239
+ #include <ATen/ops/_linalg_svd.h>
240
+ #include <ATen/ops/_local_scalar_dense.h>
241
+ #include <ATen/ops/_log_softmax.h>
242
+ #include <ATen/ops/_log_softmax_backward_data.h>
243
+ #include <ATen/ops/_logcumsumexp.h>
244
+ #include <ATen/ops/_lstm_mps.h>
245
+ #include <ATen/ops/_lu_with_info.h>
246
+ #include <ATen/ops/_make_dep_token.h>
247
+ #include <ATen/ops/_make_dual.h>
248
+ #include <ATen/ops/_make_dual_copy.h>
249
+ #include <ATen/ops/_make_per_channel_quantized_tensor.h>
250
+ #include <ATen/ops/_make_per_tensor_quantized_tensor.h>
251
+ #include <ATen/ops/_masked_scale.h>
252
+ #include <ATen/ops/_masked_softmax.h>
253
+ #include <ATen/ops/_masked_softmax_backward.h>
254
+ #include <ATen/ops/_mixed_dtypes_linear.h>
255
+ #include <ATen/ops/_mkldnn_reshape.h>
256
+ #include <ATen/ops/_mkldnn_transpose.h>
257
+ #include <ATen/ops/_mps_convolution.h>
258
+ #include <ATen/ops/_mps_convolution_transpose.h>
259
+ #include <ATen/ops/_native_batch_norm_legit.h>
260
+ #include <ATen/ops/_native_batch_norm_legit_no_training.h>
261
+ #include <ATen/ops/_native_multi_head_attention.h>
262
+ #include <ATen/ops/_neg_view.h>
263
+ #include <ATen/ops/_neg_view_copy.h>
264
+ #include <ATen/ops/_nested_from_padded.h>
265
+ #include <ATen/ops/_nested_from_padded_and_nested_example.h>
266
+ #include <ATen/ops/_nested_select_backward.h>
267
+ #include <ATen/ops/_nested_sum_backward.h>
268
+ #include <ATen/ops/_nested_tensor_from_mask.h>
269
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned.h>
270
+ #include <ATen/ops/_nested_tensor_from_tensor_list.h>
271
+ #include <ATen/ops/_nested_tensor_size.h>
272
+ #include <ATen/ops/_nested_tensor_softmax_with_shape.h>
273
+ #include <ATen/ops/_nested_tensor_storage_offsets.h>
274
+ #include <ATen/ops/_nested_tensor_strides.h>
275
+ #include <ATen/ops/_nested_view_from_buffer.h>
276
+ #include <ATen/ops/_nested_view_from_buffer_copy.h>
277
+ #include <ATen/ops/_new_zeros_with_same_feature_meta.h>
278
+ #include <ATen/ops/_nnpack_available.h>
279
+ #include <ATen/ops/_nnpack_spatial_convolution.h>
280
+ #include <ATen/ops/_nnz.h>
281
+ #include <ATen/ops/_pack_padded_sequence.h>
282
+ #include <ATen/ops/_pack_padded_sequence_backward.h>
283
+ #include <ATen/ops/_pad_circular.h>
284
+ #include <ATen/ops/_pad_enum.h>
285
+ #include <ATen/ops/_pad_packed_sequence.h>
286
+ #include <ATen/ops/_pdist_backward.h>
287
+ #include <ATen/ops/_pdist_forward.h>
288
+ #include <ATen/ops/_pin_memory.h>
289
+ #include <ATen/ops/_prelu_kernel.h>
290
+ #include <ATen/ops/_prelu_kernel_backward.h>
291
+ #include <ATen/ops/_propagate_xla_data.h>
292
+ #include <ATen/ops/_remove_batch_dim.h>
293
+ #include <ATen/ops/_reshape_alias.h>
294
+ #include <ATen/ops/_reshape_alias_copy.h>
295
+ #include <ATen/ops/_reshape_copy.h>
296
+ #include <ATen/ops/_reshape_from_tensor.h>
297
+ #include <ATen/ops/_resize_output.h>
298
+ #include <ATen/ops/_rowwise_prune.h>
299
+ #include <ATen/ops/_sample_dirichlet.h>
300
+ #include <ATen/ops/_saturate_weight_to_fp16.h>
301
+ #include <ATen/ops/_scaled_dot_product_attention_math.h>
302
+ #include <ATen/ops/_scaled_dot_product_efficient_attention.h>
303
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_backward.h>
304
+ #include <ATen/ops/_scaled_dot_product_flash_attention.h>
305
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward.h>
306
+ #include <ATen/ops/_scaled_mm.h>
307
+ #include <ATen/ops/_segment_reduce_backward.h>
308
+ #include <ATen/ops/_shape_as_tensor.h>
309
+ #include <ATen/ops/_slow_conv2d_backward.h>
310
+ #include <ATen/ops/_slow_conv2d_forward.h>
311
+ #include <ATen/ops/_sobol_engine_draw.h>
312
+ #include <ATen/ops/_sobol_engine_ff.h>
313
+ #include <ATen/ops/_sobol_engine_initialize_state.h>
314
+ #include <ATen/ops/_sobol_engine_scramble.h>
315
+ #include <ATen/ops/_softmax.h>
316
+ #include <ATen/ops/_softmax_backward_data.h>
317
+ #include <ATen/ops/_sparse_addmm.h>
318
+ #include <ATen/ops/_sparse_broadcast_to.h>
319
+ #include <ATen/ops/_sparse_broadcast_to_copy.h>
320
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
321
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
322
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
323
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
324
+ #include <ATen/ops/_sparse_coo_tensor_with_dims.h>
325
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
326
+ #include <ATen/ops/_sparse_csc_tensor_unsafe.h>
327
+ #include <ATen/ops/_sparse_csr_prod.h>
328
+ #include <ATen/ops/_sparse_csr_sum.h>
329
+ #include <ATen/ops/_sparse_csr_tensor_unsafe.h>
330
+ #include <ATen/ops/_sparse_log_softmax.h>
331
+ #include <ATen/ops/_sparse_log_softmax_backward_data.h>
332
+ #include <ATen/ops/_sparse_mask_projection.h>
333
+ #include <ATen/ops/_sparse_mm.h>
334
+ #include <ATen/ops/_sparse_mm_reduce_impl.h>
335
+ #include <ATen/ops/_sparse_mm_reduce_impl_backward.h>
336
+ #include <ATen/ops/_sparse_semi_structured_linear.h>
337
+ #include <ATen/ops/_sparse_softmax.h>
338
+ #include <ATen/ops/_sparse_softmax_backward_data.h>
339
+ #include <ATen/ops/_sparse_sparse_matmul.h>
340
+ #include <ATen/ops/_sparse_sum.h>
341
+ #include <ATen/ops/_sparse_sum_backward.h>
342
+ #include <ATen/ops/_spdiags.h>
343
+ #include <ATen/ops/_stack.h>
344
+ #include <ATen/ops/_standard_gamma.h>
345
+ #include <ATen/ops/_standard_gamma_grad.h>
346
+ #include <ATen/ops/_test_ambiguous_defaults.h>
347
+ #include <ATen/ops/_test_autograd_multiple_dispatch.h>
348
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view.h>
349
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
350
+ #include <ATen/ops/_test_check_tensor.h>
351
+ #include <ATen/ops/_test_functorch_fallback.h>
352
+ #include <ATen/ops/_test_optional_filled_intlist.h>
353
+ #include <ATen/ops/_test_optional_floatlist.h>
354
+ #include <ATen/ops/_test_optional_intlist.h>
355
+ #include <ATen/ops/_test_serialization_subcmul.h>
356
+ #include <ATen/ops/_test_string_default.h>
357
+ #include <ATen/ops/_test_warn_in_autograd.h>
358
+ #include <ATen/ops/_thnn_differentiable_gru_cell_backward.h>
359
+ #include <ATen/ops/_thnn_differentiable_lstm_cell_backward.h>
360
+ #include <ATen/ops/_thnn_fused_gru_cell.h>
361
+ #include <ATen/ops/_thnn_fused_gru_cell_backward.h>
362
+ #include <ATen/ops/_thnn_fused_lstm_cell.h>
363
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward.h>
364
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
365
+ #include <ATen/ops/_to_copy.h>
366
+ #include <ATen/ops/_to_cpu.h>
367
+ #include <ATen/ops/_to_dense.h>
368
+ #include <ATen/ops/_to_sparse.h>
369
+ #include <ATen/ops/_to_sparse_bsc.h>
370
+ #include <ATen/ops/_to_sparse_bsr.h>
371
+ #include <ATen/ops/_to_sparse_csc.h>
372
+ #include <ATen/ops/_to_sparse_csr.h>
373
+ #include <ATen/ops/_to_sparse_semi_structured.h>
374
+ #include <ATen/ops/_transform_bias_rescale_qkv.h>
375
+ #include <ATen/ops/_transformer_encoder_layer_fwd.h>
376
+ #include <ATen/ops/_trilinear.h>
377
+ #include <ATen/ops/_triton_multi_head_attention.h>
378
+ #include <ATen/ops/_triton_scaled_dot_attention.h>
379
+ #include <ATen/ops/_unique.h>
380
+ #include <ATen/ops/_unique2.h>
381
+ #include <ATen/ops/_unpack_dual.h>
382
+ #include <ATen/ops/_unsafe_index.h>
383
+ #include <ATen/ops/_unsafe_index_put.h>
384
+ #include <ATen/ops/_unsafe_view.h>
385
+ #include <ATen/ops/_upsample_bicubic2d_aa.h>
386
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
387
+ #include <ATen/ops/_upsample_bilinear2d_aa.h>
388
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
389
+ #include <ATen/ops/_upsample_nearest_exact1d.h>
390
+ #include <ATen/ops/_upsample_nearest_exact1d_backward.h>
391
+ #include <ATen/ops/_upsample_nearest_exact2d.h>
392
+ #include <ATen/ops/_upsample_nearest_exact2d_backward.h>
393
+ #include <ATen/ops/_upsample_nearest_exact3d.h>
394
+ #include <ATen/ops/_upsample_nearest_exact3d_backward.h>
395
+ #include <ATen/ops/_use_cudnn_ctc_loss.h>
396
+ #include <ATen/ops/_use_cudnn_rnn_flatten_weight.h>
397
+ #include <ATen/ops/_validate_compressed_sparse_indices.h>
398
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args.h>
399
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args.h>
400
+ #include <ATen/ops/_validate_sparse_compressed_tensor_args.h>
401
+ #include <ATen/ops/_validate_sparse_coo_tensor_args.h>
402
+ #include <ATen/ops/_validate_sparse_csc_tensor_args.h>
403
+ #include <ATen/ops/_validate_sparse_csr_tensor_args.h>
404
+ #include <ATen/ops/_values.h>
405
+ #include <ATen/ops/_values_copy.h>
406
+ #include <ATen/ops/_version.h>
407
+ #include <ATen/ops/_weight_int4pack_mm.h>
408
+ #include <ATen/ops/_weight_norm.h>
409
+ #include <ATen/ops/_weight_norm_differentiable_backward.h>
410
+ #include <ATen/ops/_weight_norm_interface.h>
411
+ #include <ATen/ops/_weight_norm_interface_backward.h>
412
+ #include <ATen/ops/abs.h>
413
+ #include <ATen/ops/absolute.h>
414
+ #include <ATen/ops/acos.h>
415
+ #include <ATen/ops/acosh.h>
416
+ #include <ATen/ops/adaptive_avg_pool1d.h>
417
+ #include <ATen/ops/adaptive_avg_pool2d.h>
418
+ #include <ATen/ops/adaptive_avg_pool3d.h>
419
+ #include <ATen/ops/adaptive_avg_pool3d_backward.h>
420
+ #include <ATen/ops/adaptive_max_pool1d.h>
421
+ #include <ATen/ops/adaptive_max_pool2d.h>
422
+ #include <ATen/ops/adaptive_max_pool2d_backward.h>
423
+ #include <ATen/ops/adaptive_max_pool3d.h>
424
+ #include <ATen/ops/adaptive_max_pool3d_backward.h>
425
+ #include <ATen/ops/add.h>
426
+ #include <ATen/ops/addbmm.h>
427
+ #include <ATen/ops/addcdiv.h>
428
+ #include <ATen/ops/addcmul.h>
429
+ #include <ATen/ops/addmm.h>
430
+ #include <ATen/ops/addmv.h>
431
+ #include <ATen/ops/addr.h>
432
+ #include <ATen/ops/adjoint.h>
433
+ #include <ATen/ops/affine_grid_generator.h>
434
+ #include <ATen/ops/affine_grid_generator_backward.h>
435
+ #include <ATen/ops/alias.h>
436
+ #include <ATen/ops/alias_copy.h>
437
+ #include <ATen/ops/align_as.h>
438
+ #include <ATen/ops/align_tensors.h>
439
+ #include <ATen/ops/align_to.h>
440
+ #include <ATen/ops/all.h>
441
+ #include <ATen/ops/allclose.h>
442
+ #include <ATen/ops/alpha_dropout.h>
443
+ #include <ATen/ops/amax.h>
444
+ #include <ATen/ops/amin.h>
445
+ #include <ATen/ops/aminmax.h>
446
+ #include <ATen/ops/and.h>
447
+ #include <ATen/ops/angle.h>
448
+ #include <ATen/ops/any.h>
449
+ #include <ATen/ops/arange.h>
450
+ #include <ATen/ops/arccos.h>
451
+ #include <ATen/ops/arccosh.h>
452
+ #include <ATen/ops/arcsin.h>
453
+ #include <ATen/ops/arcsinh.h>
454
+ #include <ATen/ops/arctan.h>
455
+ #include <ATen/ops/arctan2.h>
456
+ #include <ATen/ops/arctanh.h>
457
+ #include <ATen/ops/argmax.h>
458
+ #include <ATen/ops/argmin.h>
459
+ #include <ATen/ops/argsort.h>
460
+ #include <ATen/ops/argwhere.h>
461
+ #include <ATen/ops/as_strided.h>
462
+ #include <ATen/ops/as_strided_copy.h>
463
+ #include <ATen/ops/as_strided_scatter.h>
464
+ #include <ATen/ops/asin.h>
465
+ #include <ATen/ops/asinh.h>
466
+ #include <ATen/ops/atan.h>
467
+ #include <ATen/ops/atan2.h>
468
+ #include <ATen/ops/atanh.h>
469
+ #include <ATen/ops/atleast_1d.h>
470
+ #include <ATen/ops/atleast_2d.h>
471
+ #include <ATen/ops/atleast_3d.h>
472
+ #include <ATen/ops/avg_pool1d.h>
473
+ #include <ATen/ops/avg_pool2d.h>
474
+ #include <ATen/ops/avg_pool2d_backward.h>
475
+ #include <ATen/ops/avg_pool3d.h>
476
+ #include <ATen/ops/avg_pool3d_backward.h>
477
+ #include <ATen/ops/baddbmm.h>
478
+ #include <ATen/ops/bartlett_window.h>
479
+ #include <ATen/ops/batch_norm.h>
480
+ #include <ATen/ops/batch_norm_backward_elemt.h>
481
+ #include <ATen/ops/batch_norm_backward_reduce.h>
482
+ #include <ATen/ops/batch_norm_elemt.h>
483
+ #include <ATen/ops/batch_norm_gather_stats.h>
484
+ #include <ATen/ops/batch_norm_gather_stats_with_counts.h>
485
+ #include <ATen/ops/batch_norm_stats.h>
486
+ #include <ATen/ops/batch_norm_update_stats.h>
487
+ #include <ATen/ops/bernoulli.h>
488
+ #include <ATen/ops/bilinear.h>
489
+ #include <ATen/ops/binary_cross_entropy.h>
490
+ #include <ATen/ops/binary_cross_entropy_backward.h>
491
+ #include <ATen/ops/binary_cross_entropy_with_logits.h>
492
+ #include <ATen/ops/bincount.h>
493
+ #include <ATen/ops/binomial.h>
494
+ #include <ATen/ops/bitwise_and.h>
495
+ #include <ATen/ops/bitwise_left_shift.h>
496
+ #include <ATen/ops/bitwise_not.h>
497
+ #include <ATen/ops/bitwise_or.h>
498
+ #include <ATen/ops/bitwise_right_shift.h>
499
+ #include <ATen/ops/bitwise_xor.h>
500
+ #include <ATen/ops/blackman_window.h>
501
+ #include <ATen/ops/block_diag.h>
502
+ #include <ATen/ops/bmm.h>
503
+ #include <ATen/ops/broadcast_tensors.h>
504
+ #include <ATen/ops/broadcast_to.h>
505
+ #include <ATen/ops/bucketize.h>
506
+ #include <ATen/ops/can_cast.h>
507
+ #include <ATen/ops/cartesian_prod.h>
508
+ #include <ATen/ops/cat.h>
509
+ #include <ATen/ops/cauchy.h>
510
+ #include <ATen/ops/ccol_indices.h>
511
+ #include <ATen/ops/ccol_indices_copy.h>
512
+ #include <ATen/ops/cdist.h>
513
+ #include <ATen/ops/ceil.h>
514
+ #include <ATen/ops/celu.h>
515
+ #include <ATen/ops/chain_matmul.h>
516
+ #include <ATen/ops/chalf.h>
517
+ #include <ATen/ops/channel_shuffle.h>
518
+ #include <ATen/ops/cholesky.h>
519
+ #include <ATen/ops/cholesky_inverse.h>
520
+ #include <ATen/ops/cholesky_solve.h>
521
+ #include <ATen/ops/choose_qparams_optimized.h>
522
+ #include <ATen/ops/chunk.h>
523
+ #include <ATen/ops/clamp.h>
524
+ #include <ATen/ops/clamp_max.h>
525
+ #include <ATen/ops/clamp_min.h>
526
+ #include <ATen/ops/clip.h>
527
+ #include <ATen/ops/clone.h>
528
+ #include <ATen/ops/coalesce.h>
529
+ #include <ATen/ops/col2im.h>
530
+ #include <ATen/ops/col_indices.h>
531
+ #include <ATen/ops/col_indices_copy.h>
532
+ #include <ATen/ops/column_stack.h>
533
+ #include <ATen/ops/combinations.h>
534
+ #include <ATen/ops/complex.h>
535
+ #include <ATen/ops/concat.h>
536
+ #include <ATen/ops/concatenate.h>
537
+ #include <ATen/ops/conj.h>
538
+ #include <ATen/ops/conj_physical.h>
539
+ #include <ATen/ops/constant_pad_nd.h>
540
+ #include <ATen/ops/contiguous.h>
541
+ #include <ATen/ops/conv1d.h>
542
+ #include <ATen/ops/conv2d.h>
543
+ #include <ATen/ops/conv3d.h>
544
+ #include <ATen/ops/conv_depthwise3d.h>
545
+ #include <ATen/ops/conv_tbc.h>
546
+ #include <ATen/ops/conv_tbc_backward.h>
547
+ #include <ATen/ops/conv_transpose1d.h>
548
+ #include <ATen/ops/conv_transpose2d.h>
549
+ #include <ATen/ops/conv_transpose3d.h>
550
+ #include <ATen/ops/convolution.h>
551
+ #include <ATen/ops/convolution_backward.h>
552
+ #include <ATen/ops/convolution_backward_overrideable.h>
553
+ #include <ATen/ops/convolution_overrideable.h>
554
+ #include <ATen/ops/copy.h>
555
+ #include <ATen/ops/copy_sparse_to_sparse.h>
556
+ #include <ATen/ops/copysign.h>
557
+ #include <ATen/ops/corrcoef.h>
558
+ #include <ATen/ops/cos.h>
559
+ #include <ATen/ops/cosh.h>
560
+ #include <ATen/ops/cosine_embedding_loss.h>
561
+ #include <ATen/ops/cosine_similarity.h>
562
+ #include <ATen/ops/count_nonzero.h>
563
+ #include <ATen/ops/cov.h>
564
+ #include <ATen/ops/cross.h>
565
+ #include <ATen/ops/cross_entropy_loss.h>
566
+ #include <ATen/ops/crow_indices.h>
567
+ #include <ATen/ops/crow_indices_copy.h>
568
+ #include <ATen/ops/ctc_loss.h>
569
+ #include <ATen/ops/cudnn_affine_grid_generator.h>
570
+ #include <ATen/ops/cudnn_affine_grid_generator_backward.h>
571
+ #include <ATen/ops/cudnn_batch_norm.h>
572
+ #include <ATen/ops/cudnn_batch_norm_backward.h>
573
+ #include <ATen/ops/cudnn_convolution.h>
574
+ #include <ATen/ops/cudnn_convolution_add_relu.h>
575
+ #include <ATen/ops/cudnn_convolution_relu.h>
576
+ #include <ATen/ops/cudnn_convolution_transpose.h>
577
+ #include <ATen/ops/cudnn_grid_sampler.h>
578
+ #include <ATen/ops/cudnn_grid_sampler_backward.h>
579
+ #include <ATen/ops/cudnn_is_acceptable.h>
580
+ #include <ATen/ops/cummax.h>
581
+ #include <ATen/ops/cummaxmin_backward.h>
582
+ #include <ATen/ops/cummin.h>
583
+ #include <ATen/ops/cumprod.h>
584
+ #include <ATen/ops/cumprod_backward.h>
585
+ #include <ATen/ops/cumsum.h>
586
+ #include <ATen/ops/cumulative_trapezoid.h>
587
+ #include <ATen/ops/data.h>
588
+ #include <ATen/ops/deg2rad.h>
589
+ #include <ATen/ops/dense_dim.h>
590
+ #include <ATen/ops/dequantize.h>
591
+ #include <ATen/ops/det.h>
592
+ #include <ATen/ops/detach.h>
593
+ #include <ATen/ops/detach_copy.h>
594
+ #include <ATen/ops/diag.h>
595
+ #include <ATen/ops/diag_embed.h>
596
+ #include <ATen/ops/diagflat.h>
597
+ #include <ATen/ops/diagonal.h>
598
+ #include <ATen/ops/diagonal_backward.h>
599
+ #include <ATen/ops/diagonal_copy.h>
600
+ #include <ATen/ops/diagonal_scatter.h>
601
+ #include <ATen/ops/diff.h>
602
+ #include <ATen/ops/digamma.h>
603
+ #include <ATen/ops/dist.h>
604
+ #include <ATen/ops/div.h>
605
+ #include <ATen/ops/divide.h>
606
+ #include <ATen/ops/dot.h>
607
+ #include <ATen/ops/dropout.h>
608
+ #include <ATen/ops/dsplit.h>
609
+ #include <ATen/ops/dstack.h>
610
+ #include <ATen/ops/einsum.h>
611
+ #include <ATen/ops/elu.h>
612
+ #include <ATen/ops/elu_backward.h>
613
+ #include <ATen/ops/embedding.h>
614
+ #include <ATen/ops/embedding_backward.h>
615
+ #include <ATen/ops/embedding_bag.h>
616
+ #include <ATen/ops/embedding_dense_backward.h>
617
+ #include <ATen/ops/embedding_renorm.h>
618
+ #include <ATen/ops/embedding_sparse_backward.h>
619
+ #include <ATen/ops/empty.h>
620
+ #include <ATen/ops/empty_like.h>
621
+ #include <ATen/ops/empty_permuted.h>
622
+ #include <ATen/ops/empty_quantized.h>
623
+ #include <ATen/ops/empty_strided.h>
624
+ #include <ATen/ops/eq.h>
625
+ #include <ATen/ops/equal.h>
626
+ #include <ATen/ops/erf.h>
627
+ #include <ATen/ops/erfc.h>
628
+ #include <ATen/ops/erfinv.h>
629
+ #include <ATen/ops/exp.h>
630
+ #include <ATen/ops/exp2.h>
631
+ #include <ATen/ops/expand.h>
632
+ #include <ATen/ops/expand_as.h>
633
+ #include <ATen/ops/expand_copy.h>
634
+ #include <ATen/ops/expm1.h>
635
+ #include <ATen/ops/exponential.h>
636
+ #include <ATen/ops/eye.h>
637
+ #include <ATen/ops/fake_quantize_per_channel_affine.h>
638
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
639
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h>
640
+ #include <ATen/ops/fake_quantize_per_tensor_affine.h>
641
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
642
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h>
643
+ #include <ATen/ops/fbgemm_linear_fp16_weight.h>
644
+ #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h>
645
+ #include <ATen/ops/fbgemm_linear_int8_weight.h>
646
+ #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h>
647
+ #include <ATen/ops/fbgemm_linear_quantize_weight.h>
648
+ #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16.h>
649
+ #include <ATen/ops/fbgemm_pack_quantized_matrix.h>
650
+ #include <ATen/ops/feature_alpha_dropout.h>
651
+ #include <ATen/ops/feature_dropout.h>
652
+ #include <ATen/ops/fft_fft.h>
653
+ #include <ATen/ops/fft_fft2.h>
654
+ #include <ATen/ops/fft_fftfreq.h>
655
+ #include <ATen/ops/fft_fftn.h>
656
+ #include <ATen/ops/fft_fftshift.h>
657
+ #include <ATen/ops/fft_hfft.h>
658
+ #include <ATen/ops/fft_hfft2.h>
659
+ #include <ATen/ops/fft_hfftn.h>
660
+ #include <ATen/ops/fft_ifft.h>
661
+ #include <ATen/ops/fft_ifft2.h>
662
+ #include <ATen/ops/fft_ifftn.h>
663
+ #include <ATen/ops/fft_ifftshift.h>
664
+ #include <ATen/ops/fft_ihfft.h>
665
+ #include <ATen/ops/fft_ihfft2.h>
666
+ #include <ATen/ops/fft_ihfftn.h>
667
+ #include <ATen/ops/fft_irfft.h>
668
+ #include <ATen/ops/fft_irfft2.h>
669
+ #include <ATen/ops/fft_irfftn.h>
670
+ #include <ATen/ops/fft_rfft.h>
671
+ #include <ATen/ops/fft_rfft2.h>
672
+ #include <ATen/ops/fft_rfftfreq.h>
673
+ #include <ATen/ops/fft_rfftn.h>
674
+ #include <ATen/ops/fill.h>
675
+ #include <ATen/ops/fill_diagonal.h>
676
+ #include <ATen/ops/fix.h>
677
+ #include <ATen/ops/flatten.h>
678
+ #include <ATen/ops/flatten_dense_tensors.h>
679
+ #include <ATen/ops/flip.h>
680
+ #include <ATen/ops/fliplr.h>
681
+ #include <ATen/ops/flipud.h>
682
+ #include <ATen/ops/float_power.h>
683
+ #include <ATen/ops/floor.h>
684
+ #include <ATen/ops/floor_divide.h>
685
+ #include <ATen/ops/fmax.h>
686
+ #include <ATen/ops/fmin.h>
687
+ #include <ATen/ops/fmod.h>
688
+ #include <ATen/ops/frac.h>
689
+ #include <ATen/ops/fractional_max_pool2d.h>
690
+ #include <ATen/ops/fractional_max_pool2d_backward.h>
691
+ #include <ATen/ops/fractional_max_pool3d.h>
692
+ #include <ATen/ops/fractional_max_pool3d_backward.h>
693
+ #include <ATen/ops/frexp.h>
694
+ #include <ATen/ops/frobenius_norm.h>
695
+ #include <ATen/ops/from_file.h>
696
+ #include <ATen/ops/full.h>
697
+ #include <ATen/ops/full_like.h>
698
+ #include <ATen/ops/fused_moving_avg_obs_fake_quant.h>
699
+ #include <ATen/ops/gather.h>
700
+ #include <ATen/ops/gather_backward.h>
701
+ #include <ATen/ops/gcd.h>
702
+ #include <ATen/ops/ge.h>
703
+ #include <ATen/ops/gelu.h>
704
+ #include <ATen/ops/gelu_backward.h>
705
+ #include <ATen/ops/geometric.h>
706
+ #include <ATen/ops/geqrf.h>
707
+ #include <ATen/ops/ger.h>
708
+ #include <ATen/ops/glu.h>
709
+ #include <ATen/ops/glu_backward.h>
710
+ #include <ATen/ops/glu_backward_jvp.h>
711
+ #include <ATen/ops/glu_jvp.h>
712
+ #include <ATen/ops/gradient.h>
713
+ #include <ATen/ops/greater.h>
714
+ #include <ATen/ops/greater_equal.h>
715
+ #include <ATen/ops/grid_sampler.h>
716
+ #include <ATen/ops/grid_sampler_2d.h>
717
+ #include <ATen/ops/grid_sampler_2d_backward.h>
718
+ #include <ATen/ops/grid_sampler_3d.h>
719
+ #include <ATen/ops/grid_sampler_3d_backward.h>
720
+ #include <ATen/ops/group_norm.h>
721
+ #include <ATen/ops/gru.h>
722
+ #include <ATen/ops/gru_cell.h>
723
+ #include <ATen/ops/gt.h>
724
+ #include <ATen/ops/hamming_window.h>
725
+ #include <ATen/ops/hann_window.h>
726
+ #include <ATen/ops/hardshrink.h>
727
+ #include <ATen/ops/hardshrink_backward.h>
728
+ #include <ATen/ops/hardsigmoid.h>
729
+ #include <ATen/ops/hardsigmoid_backward.h>
730
+ #include <ATen/ops/hardswish.h>
731
+ #include <ATen/ops/hardswish_backward.h>
732
+ #include <ATen/ops/hardtanh.h>
733
+ #include <ATen/ops/hardtanh_backward.h>
734
+ #include <ATen/ops/heaviside.h>
735
+ #include <ATen/ops/hinge_embedding_loss.h>
736
+ #include <ATen/ops/histc.h>
737
+ #include <ATen/ops/histogram.h>
738
+ #include <ATen/ops/histogramdd.h>
739
+ #include <ATen/ops/hsplit.h>
740
+ #include <ATen/ops/hspmm.h>
741
+ #include <ATen/ops/hstack.h>
742
+ #include <ATen/ops/huber_loss.h>
743
+ #include <ATen/ops/huber_loss_backward.h>
744
+ #include <ATen/ops/hypot.h>
745
+ #include <ATen/ops/i0.h>
746
+ #include <ATen/ops/igamma.h>
747
+ #include <ATen/ops/igammac.h>
748
+ #include <ATen/ops/im2col.h>
749
+ #include <ATen/ops/imag.h>
750
+ #include <ATen/ops/index.h>
751
+ #include <ATen/ops/index_add.h>
752
+ #include <ATen/ops/index_copy.h>
753
+ #include <ATen/ops/index_fill.h>
754
+ #include <ATen/ops/index_put.h>
755
+ #include <ATen/ops/index_reduce.h>
756
+ #include <ATen/ops/index_select.h>
757
+ #include <ATen/ops/index_select_backward.h>
758
+ #include <ATen/ops/indices.h>
759
+ #include <ATen/ops/indices_copy.h>
760
+ #include <ATen/ops/infinitely_differentiable_gelu_backward.h>
761
+ #include <ATen/ops/inner.h>
762
+ #include <ATen/ops/instance_norm.h>
763
+ #include <ATen/ops/int_repr.h>
764
+ #include <ATen/ops/inverse.h>
765
+ #include <ATen/ops/is_coalesced.h>
766
+ #include <ATen/ops/is_complex.h>
767
+ #include <ATen/ops/is_conj.h>
768
+ #include <ATen/ops/is_distributed.h>
769
+ #include <ATen/ops/is_floating_point.h>
770
+ #include <ATen/ops/is_inference.h>
771
+ #include <ATen/ops/is_leaf.h>
772
+ #include <ATen/ops/is_neg.h>
773
+ #include <ATen/ops/is_nonzero.h>
774
+ #include <ATen/ops/is_pinned.h>
775
+ #include <ATen/ops/is_same_size.h>
776
+ #include <ATen/ops/is_set_to.h>
777
+ #include <ATen/ops/is_signed.h>
778
+ #include <ATen/ops/is_vulkan_available.h>
779
+ #include <ATen/ops/isclose.h>
780
+ #include <ATen/ops/isfinite.h>
781
+ #include <ATen/ops/isin.h>
782
+ #include <ATen/ops/isinf.h>
783
+ #include <ATen/ops/isnan.h>
784
+ #include <ATen/ops/isneginf.h>
785
+ #include <ATen/ops/isposinf.h>
786
+ #include <ATen/ops/isreal.h>
787
+ #include <ATen/ops/istft.h>
788
+ #include <ATen/ops/item.h>
789
+ #include <ATen/ops/kaiser_window.h>
790
+ #include <ATen/ops/kl_div.h>
791
+ #include <ATen/ops/kron.h>
792
+ #include <ATen/ops/kthvalue.h>
793
+ #include <ATen/ops/l1_loss.h>
794
+ #include <ATen/ops/layer_norm.h>
795
+ #include <ATen/ops/lcm.h>
796
+ #include <ATen/ops/ldexp.h>
797
+ #include <ATen/ops/le.h>
798
+ #include <ATen/ops/leaky_relu.h>
799
+ #include <ATen/ops/leaky_relu_backward.h>
800
+ #include <ATen/ops/lerp.h>
801
+ #include <ATen/ops/less.h>
802
+ #include <ATen/ops/less_equal.h>
803
+ #include <ATen/ops/lgamma.h>
804
+ #include <ATen/ops/lift.h>
805
+ #include <ATen/ops/lift_fresh.h>
806
+ #include <ATen/ops/lift_fresh_copy.h>
807
+ #include <ATen/ops/linalg_cholesky.h>
808
+ #include <ATen/ops/linalg_cholesky_ex.h>
809
+ #include <ATen/ops/linalg_cond.h>
810
+ #include <ATen/ops/linalg_cross.h>
811
+ #include <ATen/ops/linalg_det.h>
812
+ #include <ATen/ops/linalg_diagonal.h>
813
+ #include <ATen/ops/linalg_eig.h>
814
+ #include <ATen/ops/linalg_eigh.h>
815
+ #include <ATen/ops/linalg_eigvals.h>
816
+ #include <ATen/ops/linalg_eigvalsh.h>
817
+ #include <ATen/ops/linalg_householder_product.h>
818
+ #include <ATen/ops/linalg_inv.h>
819
+ #include <ATen/ops/linalg_inv_ex.h>
820
+ #include <ATen/ops/linalg_ldl_factor.h>
821
+ #include <ATen/ops/linalg_ldl_factor_ex.h>
822
+ #include <ATen/ops/linalg_ldl_solve.h>
823
+ #include <ATen/ops/linalg_lstsq.h>
824
+ #include <ATen/ops/linalg_lu.h>
825
+ #include <ATen/ops/linalg_lu_factor.h>
826
+ #include <ATen/ops/linalg_lu_factor_ex.h>
827
+ #include <ATen/ops/linalg_lu_solve.h>
828
+ #include <ATen/ops/linalg_matmul.h>
829
+ #include <ATen/ops/linalg_matrix_exp.h>
830
+ #include <ATen/ops/linalg_matrix_norm.h>
831
+ #include <ATen/ops/linalg_matrix_power.h>
832
+ #include <ATen/ops/linalg_matrix_rank.h>
833
+ #include <ATen/ops/linalg_multi_dot.h>
834
+ #include <ATen/ops/linalg_norm.h>
835
+ #include <ATen/ops/linalg_pinv.h>
836
+ #include <ATen/ops/linalg_qr.h>
837
+ #include <ATen/ops/linalg_slogdet.h>
838
+ #include <ATen/ops/linalg_solve.h>
839
+ #include <ATen/ops/linalg_solve_ex.h>
840
+ #include <ATen/ops/linalg_solve_triangular.h>
841
+ #include <ATen/ops/linalg_svd.h>
842
+ #include <ATen/ops/linalg_svdvals.h>
843
+ #include <ATen/ops/linalg_tensorinv.h>
844
+ #include <ATen/ops/linalg_tensorsolve.h>
845
+ #include <ATen/ops/linalg_vander.h>
846
+ #include <ATen/ops/linalg_vecdot.h>
847
+ #include <ATen/ops/linalg_vector_norm.h>
848
+ #include <ATen/ops/linear.h>
849
+ #include <ATen/ops/linear_backward.h>
850
+ #include <ATen/ops/linspace.h>
851
+ #include <ATen/ops/log.h>
852
+ #include <ATen/ops/log10.h>
853
+ #include <ATen/ops/log1p.h>
854
+ #include <ATen/ops/log2.h>
855
+ #include <ATen/ops/log_normal.h>
856
+ #include <ATen/ops/log_sigmoid.h>
857
+ #include <ATen/ops/log_sigmoid_backward.h>
858
+ #include <ATen/ops/log_sigmoid_forward.h>
859
+ #include <ATen/ops/log_softmax.h>
860
+ #include <ATen/ops/logaddexp.h>
861
+ #include <ATen/ops/logaddexp2.h>
862
+ #include <ATen/ops/logcumsumexp.h>
863
+ #include <ATen/ops/logdet.h>
864
+ #include <ATen/ops/logical_and.h>
865
+ #include <ATen/ops/logical_not.h>
866
+ #include <ATen/ops/logical_or.h>
867
+ #include <ATen/ops/logical_xor.h>
868
+ #include <ATen/ops/logit.h>
869
+ #include <ATen/ops/logit_backward.h>
870
+ #include <ATen/ops/logspace.h>
871
+ #include <ATen/ops/logsumexp.h>
872
+ #include <ATen/ops/lshift.h>
873
+ #include <ATen/ops/lstm.h>
874
+ #include <ATen/ops/lstm_cell.h>
875
+ #include <ATen/ops/lstm_mps_backward.h>
876
+ #include <ATen/ops/lt.h>
877
+ #include <ATen/ops/lu_solve.h>
878
+ #include <ATen/ops/lu_unpack.h>
879
+ #include <ATen/ops/mH.h>
880
+ #include <ATen/ops/mT.h>
881
+ #include <ATen/ops/margin_ranking_loss.h>
882
+ #include <ATen/ops/masked_fill.h>
883
+ #include <ATen/ops/masked_scatter.h>
884
+ #include <ATen/ops/masked_scatter_backward.h>
885
+ #include <ATen/ops/masked_select.h>
886
+ #include <ATen/ops/masked_select_backward.h>
887
+ #include <ATen/ops/matmul.h>
888
+ #include <ATen/ops/matmul_backward.h>
889
+ #include <ATen/ops/matrix_H.h>
890
+ #include <ATen/ops/matrix_exp.h>
891
+ #include <ATen/ops/matrix_exp_backward.h>
892
+ #include <ATen/ops/matrix_power.h>
893
+ #include <ATen/ops/max.h>
894
+ #include <ATen/ops/max_pool1d.h>
895
+ #include <ATen/ops/max_pool1d_with_indices.h>
896
+ #include <ATen/ops/max_pool2d.h>
897
+ #include <ATen/ops/max_pool2d_backward.h>
898
+ #include <ATen/ops/max_pool2d_with_indices.h>
899
+ #include <ATen/ops/max_pool2d_with_indices_backward.h>
900
+ #include <ATen/ops/max_pool3d.h>
901
+ #include <ATen/ops/max_pool3d_with_indices.h>
902
+ #include <ATen/ops/max_pool3d_with_indices_backward.h>
903
+ #include <ATen/ops/max_unpool2d.h>
904
+ #include <ATen/ops/max_unpool3d.h>
905
+ #include <ATen/ops/maximum.h>
906
+ #include <ATen/ops/mean.h>
907
+ #include <ATen/ops/median.h>
908
+ #include <ATen/ops/meshgrid.h>
909
+ #include <ATen/ops/min.h>
910
+ #include <ATen/ops/minimum.h>
911
+ #include <ATen/ops/miopen_batch_norm.h>
912
+ #include <ATen/ops/miopen_batch_norm_backward.h>
913
+ #include <ATen/ops/miopen_convolution.h>
914
+ #include <ATen/ops/miopen_convolution_add_relu.h>
915
+ #include <ATen/ops/miopen_convolution_relu.h>
916
+ #include <ATen/ops/miopen_convolution_transpose.h>
917
+ #include <ATen/ops/miopen_depthwise_convolution.h>
918
+ #include <ATen/ops/miopen_rnn.h>
919
+ #include <ATen/ops/miopen_rnn_backward.h>
920
+ #include <ATen/ops/mish.h>
921
+ #include <ATen/ops/mish_backward.h>
922
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
923
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
924
+ #include <ATen/ops/mkldnn_convolution.h>
925
+ #include <ATen/ops/mkldnn_linear.h>
926
+ #include <ATen/ops/mkldnn_linear_backward.h>
927
+ #include <ATen/ops/mkldnn_linear_backward_input.h>
928
+ #include <ATen/ops/mkldnn_linear_backward_weights.h>
929
+ #include <ATen/ops/mkldnn_max_pool2d.h>
930
+ #include <ATen/ops/mkldnn_max_pool2d_backward.h>
931
+ #include <ATen/ops/mkldnn_max_pool3d.h>
932
+ #include <ATen/ops/mkldnn_max_pool3d_backward.h>
933
+ #include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
934
+ #include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
935
+ #include <ATen/ops/mkldnn_rnn_layer.h>
936
+ #include <ATen/ops/mkldnn_rnn_layer_backward.h>
937
+ #include <ATen/ops/mm.h>
938
+ #include <ATen/ops/mode.h>
939
+ #include <ATen/ops/moveaxis.h>
940
+ #include <ATen/ops/movedim.h>
941
+ #include <ATen/ops/mps_convolution_backward.h>
942
+ #include <ATen/ops/mps_convolution_transpose_backward.h>
943
+ #include <ATen/ops/mse_loss.h>
944
+ #include <ATen/ops/mse_loss_backward.h>
945
+ #include <ATen/ops/msort.h>
946
+ #include <ATen/ops/mul.h>
947
+ #include <ATen/ops/multi_margin_loss.h>
948
+ #include <ATen/ops/multi_margin_loss_backward.h>
949
+ #include <ATen/ops/multilabel_margin_loss.h>
950
+ #include <ATen/ops/multilabel_margin_loss_backward.h>
951
+ #include <ATen/ops/multilabel_margin_loss_forward.h>
952
+ #include <ATen/ops/multinomial.h>
953
+ #include <ATen/ops/multiply.h>
954
+ #include <ATen/ops/mv.h>
955
+ #include <ATen/ops/mvlgamma.h>
956
+ #include <ATen/ops/nan_to_num.h>
957
+ #include <ATen/ops/nanmean.h>
958
+ #include <ATen/ops/nanmedian.h>
959
+ #include <ATen/ops/nanquantile.h>
960
+ #include <ATen/ops/nansum.h>
961
+ #include <ATen/ops/narrow.h>
962
+ #include <ATen/ops/narrow_copy.h>
963
+ #include <ATen/ops/native_batch_norm.h>
964
+ #include <ATen/ops/native_batch_norm_backward.h>
965
+ #include <ATen/ops/native_channel_shuffle.h>
966
+ #include <ATen/ops/native_dropout.h>
967
+ #include <ATen/ops/native_dropout_backward.h>
968
+ #include <ATen/ops/native_group_norm.h>
969
+ #include <ATen/ops/native_group_norm_backward.h>
970
+ #include <ATen/ops/native_layer_norm.h>
971
+ #include <ATen/ops/native_layer_norm_backward.h>
972
+ #include <ATen/ops/native_norm.h>
973
+ #include <ATen/ops/ne.h>
974
+ #include <ATen/ops/neg.h>
975
+ #include <ATen/ops/negative.h>
976
+ #include <ATen/ops/nested_to_padded_tensor.h>
977
+ #include <ATen/ops/new_empty.h>
978
+ #include <ATen/ops/new_empty_strided.h>
979
+ #include <ATen/ops/new_full.h>
980
+ #include <ATen/ops/new_ones.h>
981
+ #include <ATen/ops/new_zeros.h>
982
+ #include <ATen/ops/nextafter.h>
983
+ #include <ATen/ops/nll_loss.h>
984
+ #include <ATen/ops/nll_loss2d.h>
985
+ #include <ATen/ops/nll_loss2d_backward.h>
986
+ #include <ATen/ops/nll_loss2d_forward.h>
987
+ #include <ATen/ops/nll_loss_backward.h>
988
+ #include <ATen/ops/nll_loss_forward.h>
989
+ #include <ATen/ops/nll_loss_nd.h>
990
+ #include <ATen/ops/nonzero.h>
991
+ #include <ATen/ops/nonzero_numpy.h>
992
+ #include <ATen/ops/nonzero_static.h>
993
+ #include <ATen/ops/norm.h>
994
+ #include <ATen/ops/norm_except_dim.h>
995
+ #include <ATen/ops/normal.h>
996
+ #include <ATen/ops/not_equal.h>
997
+ #include <ATen/ops/nuclear_norm.h>
998
+ #include <ATen/ops/numpy_T.h>
999
+ #include <ATen/ops/one_hot.h>
1000
+ #include <ATen/ops/ones.h>
1001
+ #include <ATen/ops/ones_like.h>
1002
+ #include <ATen/ops/or.h>
1003
+ #include <ATen/ops/orgqr.h>
1004
+ #include <ATen/ops/ormqr.h>
1005
+ #include <ATen/ops/outer.h>
1006
+ #include <ATen/ops/output_nr.h>
1007
+ #include <ATen/ops/pad.h>
1008
+ #include <ATen/ops/pad_sequence.h>
1009
+ #include <ATen/ops/pairwise_distance.h>
1010
+ #include <ATen/ops/pdist.h>
1011
+ #include <ATen/ops/permute.h>
1012
+ #include <ATen/ops/permute_copy.h>
1013
+ #include <ATen/ops/pin_memory.h>
1014
+ #include <ATen/ops/pinverse.h>
1015
+ #include <ATen/ops/pixel_shuffle.h>
1016
+ #include <ATen/ops/pixel_unshuffle.h>
1017
+ #include <ATen/ops/poisson.h>
1018
+ #include <ATen/ops/poisson_nll_loss.h>
1019
+ #include <ATen/ops/polar.h>
1020
+ #include <ATen/ops/polygamma.h>
1021
+ #include <ATen/ops/positive.h>
1022
+ #include <ATen/ops/pow.h>
1023
+ #include <ATen/ops/prelu.h>
1024
+ #include <ATen/ops/prod.h>
1025
+ #include <ATen/ops/promote_types.h>
1026
+ #include <ATen/ops/put.h>
1027
+ #include <ATen/ops/q_per_channel_axis.h>
1028
+ #include <ATen/ops/q_per_channel_scales.h>
1029
+ #include <ATen/ops/q_per_channel_zero_points.h>
1030
+ #include <ATen/ops/q_scale.h>
1031
+ #include <ATen/ops/q_zero_point.h>
1032
+ #include <ATen/ops/qr.h>
1033
+ #include <ATen/ops/qscheme.h>
1034
+ #include <ATen/ops/quantile.h>
1035
+ #include <ATen/ops/quantize_per_channel.h>
1036
+ #include <ATen/ops/quantize_per_tensor.h>
1037
+ #include <ATen/ops/quantize_per_tensor_dynamic.h>
1038
+ #include <ATen/ops/quantized_batch_norm.h>
1039
+ #include <ATen/ops/quantized_gru_cell.h>
1040
+ #include <ATen/ops/quantized_lstm_cell.h>
1041
+ #include <ATen/ops/quantized_max_pool1d.h>
1042
+ #include <ATen/ops/quantized_max_pool2d.h>
1043
+ #include <ATen/ops/quantized_max_pool3d.h>
1044
+ #include <ATen/ops/quantized_rnn_relu_cell.h>
1045
+ #include <ATen/ops/quantized_rnn_tanh_cell.h>
1046
+ #include <ATen/ops/rad2deg.h>
1047
+ #include <ATen/ops/rand.h>
1048
+ #include <ATen/ops/rand_like.h>
1049
+ #include <ATen/ops/randint.h>
1050
+ #include <ATen/ops/randint_like.h>
1051
+ #include <ATen/ops/randn.h>
1052
+ #include <ATen/ops/randn_like.h>
1053
+ #include <ATen/ops/random.h>
1054
+ #include <ATen/ops/randperm.h>
1055
+ #include <ATen/ops/range.h>
1056
+ #include <ATen/ops/ravel.h>
1057
+ #include <ATen/ops/real.h>
1058
+ #include <ATen/ops/reciprocal.h>
1059
+ #include <ATen/ops/record_stream.h>
1060
+ #include <ATen/ops/refine_names.h>
1061
+ #include <ATen/ops/reflection_pad1d.h>
1062
+ #include <ATen/ops/reflection_pad1d_backward.h>
1063
+ #include <ATen/ops/reflection_pad2d.h>
1064
+ #include <ATen/ops/reflection_pad2d_backward.h>
1065
+ #include <ATen/ops/reflection_pad3d.h>
1066
+ #include <ATen/ops/reflection_pad3d_backward.h>
1067
+ #include <ATen/ops/relu.h>
1068
+ #include <ATen/ops/relu6.h>
1069
+ #include <ATen/ops/remainder.h>
1070
+ #include <ATen/ops/rename.h>
1071
+ #include <ATen/ops/renorm.h>
1072
+ #include <ATen/ops/repeat.h>
1073
+ #include <ATen/ops/repeat_interleave.h>
1074
+ #include <ATen/ops/replication_pad1d.h>
1075
+ #include <ATen/ops/replication_pad1d_backward.h>
1076
+ #include <ATen/ops/replication_pad2d.h>
1077
+ #include <ATen/ops/replication_pad2d_backward.h>
1078
+ #include <ATen/ops/replication_pad3d.h>
1079
+ #include <ATen/ops/replication_pad3d_backward.h>
1080
+ #include <ATen/ops/requires_grad.h>
1081
+ #include <ATen/ops/reshape.h>
1082
+ #include <ATen/ops/reshape_as.h>
1083
+ #include <ATen/ops/resize.h>
1084
+ #include <ATen/ops/resize_as.h>
1085
+ #include <ATen/ops/resize_as_sparse.h>
1086
+ #include <ATen/ops/resolve_conj.h>
1087
+ #include <ATen/ops/resolve_neg.h>
1088
+ #include <ATen/ops/result_type.h>
1089
+ #include <ATen/ops/retain_grad.h>
1090
+ #include <ATen/ops/retains_grad.h>
1091
+ #include <ATen/ops/rnn_relu.h>
1092
+ #include <ATen/ops/rnn_relu_cell.h>
1093
+ #include <ATen/ops/rnn_tanh.h>
1094
+ #include <ATen/ops/rnn_tanh_cell.h>
1095
+ #include <ATen/ops/roll.h>
1096
+ #include <ATen/ops/rot90.h>
1097
+ #include <ATen/ops/round.h>
1098
+ #include <ATen/ops/row_indices.h>
1099
+ #include <ATen/ops/row_indices_copy.h>
1100
+ #include <ATen/ops/row_stack.h>
1101
+ #include <ATen/ops/rrelu.h>
1102
+ #include <ATen/ops/rrelu_with_noise.h>
1103
+ #include <ATen/ops/rrelu_with_noise_backward.h>
1104
+ #include <ATen/ops/rshift.h>
1105
+ #include <ATen/ops/rsqrt.h>
1106
+ #include <ATen/ops/rsub.h>
1107
+ #include <ATen/ops/scalar_tensor.h>
1108
+ #include <ATen/ops/scaled_dot_product_attention.h>
1109
+ #include <ATen/ops/scatter.h>
1110
+ #include <ATen/ops/scatter_add.h>
1111
+ #include <ATen/ops/scatter_reduce.h>
1112
+ #include <ATen/ops/searchsorted.h>
1113
+ #include <ATen/ops/segment_reduce.h>
1114
+ #include <ATen/ops/select.h>
1115
+ #include <ATen/ops/select_backward.h>
1116
+ #include <ATen/ops/select_copy.h>
1117
+ #include <ATen/ops/select_scatter.h>
1118
+ #include <ATen/ops/selu.h>
1119
+ #include <ATen/ops/set.h>
1120
+ #include <ATen/ops/set_data.h>
1121
+ #include <ATen/ops/sgn.h>
1122
+ #include <ATen/ops/sigmoid.h>
1123
+ #include <ATen/ops/sigmoid_backward.h>
1124
+ #include <ATen/ops/sign.h>
1125
+ #include <ATen/ops/signbit.h>
1126
+ #include <ATen/ops/silu.h>
1127
+ #include <ATen/ops/silu_backward.h>
1128
+ #include <ATen/ops/sin.h>
1129
+ #include <ATen/ops/sinc.h>
1130
+ #include <ATen/ops/sinh.h>
1131
+ #include <ATen/ops/size.h>
1132
+ #include <ATen/ops/slice.h>
1133
+ #include <ATen/ops/slice_backward.h>
1134
+ #include <ATen/ops/slice_copy.h>
1135
+ #include <ATen/ops/slice_scatter.h>
1136
+ #include <ATen/ops/slogdet.h>
1137
+ #include <ATen/ops/slow_conv3d.h>
1138
+ #include <ATen/ops/slow_conv3d_forward.h>
1139
+ #include <ATen/ops/slow_conv_dilated2d.h>
1140
+ #include <ATen/ops/slow_conv_dilated3d.h>
1141
+ #include <ATen/ops/slow_conv_transpose2d.h>
1142
+ #include <ATen/ops/slow_conv_transpose3d.h>
1143
+ #include <ATen/ops/smm.h>
1144
+ #include <ATen/ops/smooth_l1_loss.h>
1145
+ #include <ATen/ops/smooth_l1_loss_backward.h>
1146
+ #include <ATen/ops/soft_margin_loss.h>
1147
+ #include <ATen/ops/soft_margin_loss_backward.h>
1148
+ #include <ATen/ops/softmax.h>
1149
+ #include <ATen/ops/softplus.h>
1150
+ #include <ATen/ops/softplus_backward.h>
1151
+ #include <ATen/ops/softshrink.h>
1152
+ #include <ATen/ops/softshrink_backward.h>
1153
+ #include <ATen/ops/sort.h>
1154
+ #include <ATen/ops/sparse_bsc_tensor.h>
1155
+ #include <ATen/ops/sparse_bsr_tensor.h>
1156
+ #include <ATen/ops/sparse_compressed_tensor.h>
1157
+ #include <ATen/ops/sparse_coo_tensor.h>
1158
+ #include <ATen/ops/sparse_csc_tensor.h>
1159
+ #include <ATen/ops/sparse_csr_tensor.h>
1160
+ #include <ATen/ops/sparse_dim.h>
1161
+ #include <ATen/ops/sparse_mask.h>
1162
+ #include <ATen/ops/sparse_resize.h>
1163
+ #include <ATen/ops/sparse_resize_and_clear.h>
1164
+ #include <ATen/ops/sparse_sampled_addmm.h>
1165
+ #include <ATen/ops/special_airy_ai.h>
1166
+ #include <ATen/ops/special_bessel_j0.h>
1167
+ #include <ATen/ops/special_bessel_j1.h>
1168
+ #include <ATen/ops/special_bessel_y0.h>
1169
+ #include <ATen/ops/special_bessel_y1.h>
1170
+ #include <ATen/ops/special_chebyshev_polynomial_t.h>
1171
+ #include <ATen/ops/special_chebyshev_polynomial_u.h>
1172
+ #include <ATen/ops/special_chebyshev_polynomial_v.h>
1173
+ #include <ATen/ops/special_chebyshev_polynomial_w.h>
1174
+ #include <ATen/ops/special_digamma.h>
1175
+ #include <ATen/ops/special_entr.h>
1176
+ #include <ATen/ops/special_erf.h>
1177
+ #include <ATen/ops/special_erfc.h>
1178
+ #include <ATen/ops/special_erfcx.h>
1179
+ #include <ATen/ops/special_erfinv.h>
1180
+ #include <ATen/ops/special_exp2.h>
1181
+ #include <ATen/ops/special_expit.h>
1182
+ #include <ATen/ops/special_expm1.h>
1183
+ #include <ATen/ops/special_gammainc.h>
1184
+ #include <ATen/ops/special_gammaincc.h>
1185
+ #include <ATen/ops/special_gammaln.h>
1186
+ #include <ATen/ops/special_hermite_polynomial_h.h>
1187
+ #include <ATen/ops/special_hermite_polynomial_he.h>
1188
+ #include <ATen/ops/special_i0.h>
1189
+ #include <ATen/ops/special_i0e.h>
1190
+ #include <ATen/ops/special_i1.h>
1191
+ #include <ATen/ops/special_i1e.h>
1192
+ #include <ATen/ops/special_laguerre_polynomial_l.h>
1193
+ #include <ATen/ops/special_legendre_polynomial_p.h>
1194
+ #include <ATen/ops/special_log1p.h>
1195
+ #include <ATen/ops/special_log_ndtr.h>
1196
+ #include <ATen/ops/special_log_softmax.h>
1197
+ #include <ATen/ops/special_logit.h>
1198
+ #include <ATen/ops/special_logsumexp.h>
1199
+ #include <ATen/ops/special_modified_bessel_i0.h>
1200
+ #include <ATen/ops/special_modified_bessel_i1.h>
1201
+ #include <ATen/ops/special_modified_bessel_k0.h>
1202
+ #include <ATen/ops/special_modified_bessel_k1.h>
1203
+ #include <ATen/ops/special_multigammaln.h>
1204
+ #include <ATen/ops/special_ndtr.h>
1205
+ #include <ATen/ops/special_ndtri.h>
1206
+ #include <ATen/ops/special_polygamma.h>
1207
+ #include <ATen/ops/special_psi.h>
1208
+ #include <ATen/ops/special_round.h>
1209
+ #include <ATen/ops/special_scaled_modified_bessel_k0.h>
1210
+ #include <ATen/ops/special_scaled_modified_bessel_k1.h>
1211
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
1212
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
1213
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
1214
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
1215
+ #include <ATen/ops/special_sinc.h>
1216
+ #include <ATen/ops/special_softmax.h>
1217
+ #include <ATen/ops/special_spherical_bessel_j0.h>
1218
+ #include <ATen/ops/special_xlog1py.h>
1219
+ #include <ATen/ops/special_xlogy.h>
1220
+ #include <ATen/ops/special_zeta.h>
1221
+ #include <ATen/ops/split.h>
1222
+ #include <ATen/ops/split_copy.h>
1223
+ #include <ATen/ops/split_with_sizes.h>
1224
+ #include <ATen/ops/split_with_sizes_copy.h>
1225
+ #include <ATen/ops/sqrt.h>
1226
+ #include <ATen/ops/square.h>
1227
+ #include <ATen/ops/squeeze.h>
1228
+ #include <ATen/ops/squeeze_copy.h>
1229
+ #include <ATen/ops/sspaddmm.h>
1230
+ #include <ATen/ops/stack.h>
1231
+ #include <ATen/ops/std.h>
1232
+ #include <ATen/ops/std_mean.h>
1233
+ #include <ATen/ops/stft.h>
1234
+ #include <ATen/ops/stride.h>
1235
+ #include <ATen/ops/sub.h>
1236
+ #include <ATen/ops/subtract.h>
1237
+ #include <ATen/ops/sum.h>
1238
+ #include <ATen/ops/sum_to_size.h>
1239
+ #include <ATen/ops/svd.h>
1240
+ #include <ATen/ops/swapaxes.h>
1241
+ #include <ATen/ops/swapdims.h>
1242
+ #include <ATen/ops/sym_constrain_range.h>
1243
+ #include <ATen/ops/sym_constrain_range_for_size.h>
1244
+ #include <ATen/ops/sym_numel.h>
1245
+ #include <ATen/ops/sym_size.h>
1246
+ #include <ATen/ops/sym_storage_offset.h>
1247
+ #include <ATen/ops/sym_stride.h>
1248
+ #include <ATen/ops/t.h>
1249
+ #include <ATen/ops/t_copy.h>
1250
+ #include <ATen/ops/take.h>
1251
+ #include <ATen/ops/take_along_dim.h>
1252
+ #include <ATen/ops/tan.h>
1253
+ #include <ATen/ops/tanh.h>
1254
+ #include <ATen/ops/tanh_backward.h>
1255
+ #include <ATen/ops/tensor_split.h>
1256
+ #include <ATen/ops/tensordot.h>
1257
+ #include <ATen/ops/thnn_conv2d.h>
1258
+ #include <ATen/ops/threshold.h>
1259
+ #include <ATen/ops/threshold_backward.h>
1260
+ #include <ATen/ops/tile.h>
1261
+ #include <ATen/ops/to.h>
1262
+ #include <ATen/ops/to_dense.h>
1263
+ #include <ATen/ops/to_dense_backward.h>
1264
+ #include <ATen/ops/to_mkldnn.h>
1265
+ #include <ATen/ops/to_mkldnn_backward.h>
1266
+ #include <ATen/ops/to_padded_tensor.h>
1267
+ #include <ATen/ops/to_sparse.h>
1268
+ #include <ATen/ops/to_sparse_bsc.h>
1269
+ #include <ATen/ops/to_sparse_bsr.h>
1270
+ #include <ATen/ops/to_sparse_csc.h>
1271
+ #include <ATen/ops/to_sparse_csr.h>
1272
+ #include <ATen/ops/topk.h>
1273
+ #include <ATen/ops/trace.h>
1274
+ #include <ATen/ops/trace_backward.h>
1275
+ #include <ATen/ops/transpose.h>
1276
+ #include <ATen/ops/transpose_copy.h>
1277
+ #include <ATen/ops/trapezoid.h>
1278
+ #include <ATen/ops/trapz.h>
1279
+ #include <ATen/ops/triangular_solve.h>
1280
+ #include <ATen/ops/tril.h>
1281
+ #include <ATen/ops/tril_indices.h>
1282
+ #include <ATen/ops/triplet_margin_loss.h>
1283
+ #include <ATen/ops/triu.h>
1284
+ #include <ATen/ops/triu_indices.h>
1285
+ #include <ATen/ops/true_divide.h>
1286
+ #include <ATen/ops/trunc.h>
1287
+ #include <ATen/ops/type_as.h>
1288
+ #include <ATen/ops/unbind.h>
1289
+ #include <ATen/ops/unbind_copy.h>
1290
+ #include <ATen/ops/unflatten.h>
1291
+ #include <ATen/ops/unflatten_dense_tensors.h>
1292
+ #include <ATen/ops/unfold.h>
1293
+ #include <ATen/ops/unfold_backward.h>
1294
+ #include <ATen/ops/unfold_copy.h>
1295
+ #include <ATen/ops/uniform.h>
1296
+ #include <ATen/ops/unique_consecutive.h>
1297
+ #include <ATen/ops/unique_dim.h>
1298
+ #include <ATen/ops/unique_dim_consecutive.h>
1299
+ #include <ATen/ops/unsafe_chunk.h>
1300
+ #include <ATen/ops/unsafe_split.h>
1301
+ #include <ATen/ops/unsafe_split_with_sizes.h>
1302
+ #include <ATen/ops/unsqueeze.h>
1303
+ #include <ATen/ops/unsqueeze_copy.h>
1304
+ #include <ATen/ops/upsample_bicubic2d.h>
1305
+ #include <ATen/ops/upsample_bicubic2d_backward.h>
1306
+ #include <ATen/ops/upsample_bilinear2d.h>
1307
+ #include <ATen/ops/upsample_bilinear2d_backward.h>
1308
+ #include <ATen/ops/upsample_linear1d.h>
1309
+ #include <ATen/ops/upsample_linear1d_backward.h>
1310
+ #include <ATen/ops/upsample_nearest1d.h>
1311
+ #include <ATen/ops/upsample_nearest1d_backward.h>
1312
+ #include <ATen/ops/upsample_nearest2d.h>
1313
+ #include <ATen/ops/upsample_nearest2d_backward.h>
1314
+ #include <ATen/ops/upsample_nearest3d.h>
1315
+ #include <ATen/ops/upsample_nearest3d_backward.h>
1316
+ #include <ATen/ops/upsample_trilinear3d.h>
1317
+ #include <ATen/ops/upsample_trilinear3d_backward.h>
1318
+ #include <ATen/ops/value_selecting_reduction_backward.h>
1319
+ #include <ATen/ops/values.h>
1320
+ #include <ATen/ops/values_copy.h>
1321
+ #include <ATen/ops/vander.h>
1322
+ #include <ATen/ops/var.h>
1323
+ #include <ATen/ops/var_mean.h>
1324
+ #include <ATen/ops/vdot.h>
1325
+ #include <ATen/ops/view.h>
1326
+ #include <ATen/ops/view_as.h>
1327
+ #include <ATen/ops/view_as_complex.h>
1328
+ #include <ATen/ops/view_as_complex_copy.h>
1329
+ #include <ATen/ops/view_as_real.h>
1330
+ #include <ATen/ops/view_as_real_copy.h>
1331
+ #include <ATen/ops/view_copy.h>
1332
+ #include <ATen/ops/vsplit.h>
1333
+ #include <ATen/ops/vstack.h>
1334
+ #include <ATen/ops/where.h>
1335
+ #include <ATen/ops/xlogy.h>
1336
+ #include <ATen/ops/xor.h>
1337
+ #include <ATen/ops/zero.h>
1338
+ #include <ATen/ops/zeros.h>
1339
+ #include <ATen/ops/zeros_like.h>
1340
+
1341
+ namespace at {
1342
+
1343
+
1344
+
1345
+ // Special C++ only overloads for std()-like functions (See gh-40287)
1346
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
1347
+ // So, for example std(0) would select the std(unbiased=False) overload
1348
+ TORCH_API inline Tensor var(const Tensor& self, int dim) {
1349
+ return at::var(self, IntArrayRef{dim});
1350
+ }
1351
+ TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
1352
+ return at::var_mean(self, IntArrayRef{dim});
1353
+ }
1354
+ TORCH_API inline Tensor std(const Tensor& self, int dim) {
1355
+ return at::std(self, IntArrayRef{dim});
1356
+ }
1357
+ TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
1358
+ return at::std_mean(self, IntArrayRef{dim});
1359
+ }
1360
+
1361
+ inline int64_t numel(const Tensor& tensor) {
1362
+ return tensor.numel();
1363
+ }
1364
+
1365
+ inline int64_t size(const Tensor& tensor, int64_t dim) {
1366
+ return tensor.size(dim);
1367
+ }
1368
+
1369
+ inline int64_t stride(const Tensor& tensor, int64_t dim) {
1370
+ return tensor.stride(dim);
1371
+ }
1372
+
1373
+ inline bool is_complex(const Tensor& tensor) {
1374
+ return tensor.is_complex();
1375
+ }
1376
+
1377
+ inline bool is_floating_point(const Tensor& tensor) {
1378
+ return tensor.is_floating_point();
1379
+ }
1380
+
1381
+ inline bool is_signed(const Tensor& tensor) {
1382
+ return tensor.is_signed();
1383
+ }
1384
+
1385
+ inline bool is_inference(const Tensor& tensor) {
1386
+ return tensor.is_inference();
1387
+ }
1388
+
1389
+ inline bool _is_zerotensor(const Tensor& tensor) {
1390
+ return tensor._is_zerotensor();
1391
+ }
1392
+
1393
+ inline bool is_conj(const Tensor& tensor) {
1394
+ return tensor.is_conj();
1395
+ }
1396
+
1397
+ inline Tensor conj(const Tensor& tensor) {
1398
+ return tensor.conj();
1399
+ }
1400
+
1401
+ inline bool is_neg(const Tensor& tensor) {
1402
+ return tensor.is_neg();
1403
+ }
1404
+
1405
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Generator.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Generator.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedTensorImpl.h ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <bitset>
4
+ #include <utility>
5
+
6
+ #include <ATen/ArrayRef.h>
7
+ #include <ATen/SmallVector.h>
8
+ #include <ATen/Tensor.h>
9
+
10
+ namespace at {
11
+
12
+ // We assume this in a few other places in the codebase,
13
+ // but there isn't a centralized definition.
14
+ constexpr int64_t kVmapMaxTensorDims = 64;
15
+
16
+ // The valid vmap levels range from [0, 64). This effectively means that we
17
+ // support a maximum of 64 nested vmaps.
18
+ constexpr int64_t kVmapNumLevels = 64;
19
+
20
+ // Store this number of elements of BatchDims on the stack. Most people will
21
+ // probably use <= 5 nested vmaps, but adjust this number as necessary.
22
+ constexpr int64_t kBatchDimsStackSize = 5;
23
+
24
+ // a BatchDim represents a "private" dimension on a Tensor created inside of
25
+ // vmap. It is a (level, dim) tuple, with the `dim` indicating which dimension
26
+ // is being vmap'ed over and the `level` being an identifier for which vmap
27
+ // said dimension was created inside. The `dim` corresponds to a "physical
28
+ // dim" - it is a dimension index on the underlying physical tensor that is
29
+ // being vmapped over.
30
+ struct BatchDim {
31
+ BatchDim(int64_t level, int64_t dim) : dim_(dim), level_(level) {}
32
+ int64_t dim() const {
33
+ return dim_;
34
+ }
35
+ int64_t level() const {
36
+ return level_;
37
+ }
38
+
39
+ private:
40
+ int64_t dim_;
41
+ int64_t level_;
42
+ };
43
+
44
+ using BatchDims = SmallVector<BatchDim, kBatchDimsStackSize>;
45
+ using BatchDimsRef = ArrayRef<BatchDim>;
46
+
47
+ // A BatchedTensorImpl holds an underlying Tensor and a list of BatchDim
48
+ // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
49
+ // BatchedTensorImpl.
50
+ //
51
+ // The batch dimensions are treated as being "private"; they are not
52
+ // user-visible. For example, in the following Tensor,
53
+ // bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, dim=1)])
54
+ // dimensions 0 and 1 are batch dimensions.
55
+ //
56
+ // bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
57
+ // dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7)
58
+ // tensor.
59
+ struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
60
+ explicit BatchedTensorImpl(Tensor value, BatchDims bdims);
61
+
62
+ // Returns a reference to BatchDims that represent which dimensions of this
63
+ // tensor are private.
64
+ BatchDimsRef bdims() const {
65
+ return bdims_;
66
+ }
67
+
68
+ // BatchedTensorImpl wraps a Tensor
69
+ const Tensor& value() const {
70
+ return value_;
71
+ };
72
+
73
+ // Given a public dimension index, return the dimension index in the
74
+ // underlying value() tensor. For example, if we have
75
+ // bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2,
76
+ // dim=2)])
77
+ // bt.actualDim(0) -> 1
78
+ // bt.actualDim(1) -> 3
79
+ // bt.actualDim(2) -> Error
80
+ int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
81
+
82
+ // We have to override this because we opted into CustomStrides
83
+ IntArrayRef strides_custom() const override;
84
+ // Override a bunch of methods inherited from TensorImpl to return error
85
+ // messages.
86
+ bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
87
+ void set_size(int64_t dim, int64_t new_size) override;
88
+ void set_stride(int64_t dim, int64_t new_stride) override;
89
+ void set_storage_offset(int64_t storage_offset) override;
90
+ #ifdef DEBUG
91
+ bool has_storage() const override;
92
+ #endif
93
+
94
+ private:
95
+ // see NOTE: [BatchedTensorImpl levels invariant]
96
+ void checkInvariants() const;
97
+ const char* tensorimpl_type_name() const override;
98
+
99
+ Tensor value_;
100
+
101
+ // Note: [BatchedTensorImpl levels invariant]
102
+ // There is an invariant that the BatchDims must be stored in increasing
103
+ // `level` order. That is, for i < j, bdims_[i].level must be less than
104
+ // bdims_[j].level.
105
+ BatchDims bdims_;
106
+ };
107
+
108
+ // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
109
+ // BatchedTensorImpl.
110
+ inline bool isBatchedTensor(const Tensor& tensor) {
111
+ return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::Batched);
112
+ }
113
+
114
+ // It is unsafe to call this on a Tensor that is not backed by a
115
+ // BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
116
+ inline BatchedTensorImpl* unsafeGetBatchedImpl(const Tensor& tensor) {
117
+ return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
118
+ }
119
+
120
+ inline BatchedTensorImpl* maybeGetBatchedImpl(const Tensor& tensor) {
121
+ if (!isBatchedTensor(tensor)) {
122
+ return nullptr;
123
+ }
124
+ return unsafeGetBatchedImpl(tensor);
125
+ }
126
+
127
+ // Returns a bitset. If bit i is set, then that means dim i is a batchdim.
128
+ inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(
129
+ BatchDimsRef bdims) {
130
+ std::bitset<kVmapMaxTensorDims> is_bdim;
131
+ for (const auto& bdim : bdims) {
132
+ is_bdim.set(bdim.dim());
133
+ }
134
+ return is_bdim;
135
+ }
136
+
137
+ // Creates a bitset for all of the levels present in `bdims`
138
+ inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(BatchDimsRef bdims) {
139
+ std::bitset<kVmapNumLevels> result;
140
+ for (const auto& bdim : bdims) {
141
+ result.set(bdim.level());
142
+ }
143
+ return result;
144
+ }
145
+
146
+ inline std::ostream& operator<<(std::ostream& out, const BatchDim& bdim) {
147
+ out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ")";
148
+ return out;
149
+ }
150
+
151
+ // Use this to construct a BatchedTensor from a regular Tensor
152
+ TORCH_API Tensor makeBatched(const Tensor& tensor, BatchDims bdims);
153
+
154
+ // Adds a batch dim to `tensor`, returning a BatchedTensor
155
+ TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t level, int64_t dim);
156
+
157
+ // Checks if an inplace operation on self and other is "vmap compatible".
158
+ // See NOTE: [vmap-incompatible in-place operations] for the definition of this.
159
+ TORCH_API bool inplaceIsVmapCompatible(const Tensor& self, const Tensor& other);
160
+
161
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/LegacyBatchedTensorImpl.h>
4
+ #include <ATen/core/IListRef.h>
5
+
6
+ namespace at {
7
+
8
+ // This file contains abstractions used for transforming *logical* vmap
9
+ // arguments into *physical* arguments. (Keep reading for definitions of these
10
+ // terms).
11
+
12
+ // NOTE: [Logical vs physical args]
13
+ // Consider the following vmap.
14
+ // vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
15
+ // This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
16
+ // with batch dims 0 and 2:
17
+ // BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
18
+ //
19
+ // We say the *logical* view of the tensor has size [3] -- tensors inside
20
+ // `func` appear to have size [3].
21
+ // However, the *physical* underlying tensor (the one passed to vmap) has size
22
+ // [2, 3, 4].
23
+ //
24
+ // This notion of logical vs physical also extends to non-tensor arguments.
25
+ // Consider the previous tensor; let's assume the user called
26
+ // `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
27
+ // dimension they are reducing over is dim 0 but the physical dim is dim 1
28
+ // (the first non-batch dimension)
29
+
30
+ // Forward declared; see NOTE: [What is a VmapPhysicalView?]
31
+ struct VmapPhysicalView;
32
+
33
+ // Most PyTorch operators take 4 or fewer inputs.
34
+ constexpr int64_t kVmapTransformStaticInputSize = 4;
35
+ using VmapPhysicalViewVec =
36
+ SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
37
+
38
+ // Pytorch generally advertises good performance for <= 5 dims.
39
+ // (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
40
+ // dimensions to get 8. Adjust this number as necessary
41
+ constexpr int64_t kVmapStaticDimVecSize = 8;
42
+ using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
43
+ using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
44
+
45
+ // NOTE: [What is an VmapTransform?]
46
+ // An *VmapTransform* converts logical views of tensors to physical views.
47
+ //
48
+ // Batching rules use VmapTransforms to convert logical arguments to
49
+ // physical arguments, then call one or more at:: operator that handles the
50
+ // physical arguments, and then converts the physical result back to a logical
51
+ // argument.
52
+
53
+ // VmapTransform for operators that take tensors with multiple batch dims.
54
+ // Given one or more logical views on Tensors, `logicalToPhysical`
55
+ // permutes all of the batch dims to the front of the tensor, aligns
56
+ // and expands the batch dims to match each other (according to their `level`),
57
+ // and returns a VmapPhysicalView on the tensor(s).
58
+ struct TORCH_API MultiBatchVmapTransform {
59
+ static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
60
+ static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
61
+ };
62
+
63
+ // VmapTransform for operators that broadcast all inputs.
64
+ // Given some logical views on Tensors, `logicalToPhysical`:
65
+ // - permutes all of the batch dims to the front of the tensors
66
+ // - aligns all the batch dims to the collective levels of all of the tensors.
67
+ // If a tensor does not have a batch dim for a vmap level, then it receives
68
+ // a size-one dimension for said level.
69
+ // - aligns the non-batch dims to have the same dimensionality, adding extra
70
+ // size-1 dimensions in between the batch dimensions and the non-batch
71
+ // dimensions so that the batch dimensions are lined up from the right.
72
+ //
73
+ // For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
74
+ // dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap
75
+ // tensors of size (B, 1, 2) and (B, 3, 2).
76
+ //
77
+ // Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
78
+ // VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
79
+ // actually *need* to return a tensor of size (1, 2) for the second tensor
80
+ // because the broadcasting operation takes care of that for us, but we do
81
+ // it anyways to keep things simple.
82
+ struct TORCH_API BroadcastingVmapTransform {
83
+ static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
84
+ };
85
+
86
+ // Forward declared, if you're reading this file head to toe, don't worry about
87
+ // it yet.
88
+ struct VmapPhysicalToLogicalMap;
89
+
90
+ // NOTE: [What is a VmapPhysicalView?]
91
+ // VmapPhysicalView represents a physical view on a Tensor.
92
+ //
93
+ // One can use it to further convert logical dimension indices, logical shapes,
94
+ // and more to their physical variants, or convert a new (physical) tensor into
95
+ // a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
96
+ //
97
+ // VmapPhysicalView stores a physical tensor with all of its batch dimensions at
98
+ // the front and some levels that correspond to said batch dimensions.
99
+ //
100
+ // The levels bitset specifies which vmap levels correspond to the batch
101
+ // dimensions at the front of the tensor. In particular, the number of set bits
102
+ // corresponds to the number of batch dimensions on `tensor` and the rightmost
103
+ // bit of `levels` specifies the maximum number of nested vmaps we are in at
104
+ // this point in time.
105
+ // For example, given:
106
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
107
+ //
108
+ // Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
109
+ // than or equal to 3.
110
+ // bitset: 010100
111
+ // ^
112
+ // |
113
+ // levels: 012345
114
+ struct TORCH_API VmapPhysicalView {
115
+ VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
116
+ : levels_(levels), tensor_(tensor) {
117
+ TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
118
+ }
119
+
120
+ Tensor& tensor() {
121
+ return tensor_;
122
+ }
123
+ const Tensor& tensor() const {
124
+ return tensor_;
125
+ }
126
+
127
+ // Maps logical dim indices to physical dim indices. Also does dim wrapping.
128
+ //
129
+ // For example, given:
130
+ // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
131
+ //
132
+ // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
133
+ // This is because the size of levels tell us that the first two dimensions
134
+ // of `tensor_` are batch dimensions, so a logical dim of `n` is actually
135
+ // a physical dim of `n + 2`.
136
+ VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const;
137
+ int64_t getPhysicalDim(int64_t logical_dim) const;
138
+
139
+ // Returns a VmapPhysicalToLogicalMap object. This can be used for
140
+ // mapping a physical tensor to a new logical tensor (BatchedTensor)
141
+ VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
142
+
143
+ // Maps a logical shape to a physical shape by pre-pending the batch
144
+ // sizes to the logical shape.
145
+ VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
146
+
147
+ int64_t numBatchDims() const;
148
+
149
+ private:
150
+ int64_t numLogicalDims() const;
151
+
152
+ std::bitset<kVmapNumLevels> levels_;
153
+ Tensor tensor_;
154
+ };
155
+
156
+ // Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
157
+ // to a logical one (BatchedTensor). It holds some levels that are used to do
158
+ // the mapping and assumes that the batch dimensions in the physical tensor all
159
+ // occur at the front of the tensor.
160
+ struct TORCH_API VmapPhysicalToLogicalMap {
161
+ VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels)
162
+ : levels_(levels) {}
163
+
164
+ // Maps a physical tensor to a new logical tensor (BatchedTensor).
165
+ // Assumes that all of the "batch dimensions" are at the front
166
+ // of the physical tensor. For example, given:
167
+ // - x = rank-4 Tensor with size 2, 3, 5, 7
168
+ // - levels = (2, 4)
169
+ // Returns:
170
+ // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
171
+ Tensor apply(const Tensor& physical_tensor) const;
172
+
173
+ // Given a vector of physical tensors,
174
+ // 1. maps each tensor to a new logical tensor. Assumes that all of the
175
+ // "batch dimensions" are at the front of the physical tensors.
176
+ // 2. stores the new logical tensors back into the passed-in vector. This is
177
+ // to avoid additional dynamic allocations.
178
+ void applyInplace(std::vector<Tensor>& physical_tensors) const;
179
+
180
+ std::bitset<kVmapNumLevels> levels_;
181
+ };
182
+
183
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/string_view.h>
5
+
6
+ namespace at {
7
+
8
+ enum MappedAllocatorModes {
9
+ ALLOCATOR_MAPPED_SHARED = 1,
10
+ ALLOCATOR_MAPPED_SHAREDMEM = 2,
11
+ ALLOCATOR_MAPPED_EXCLUSIVE = 4,
12
+ ALLOCATOR_MAPPED_NOCREATE = 8,
13
+ ALLOCATOR_MAPPED_KEEPFD = 16,
14
+ ALLOCATOR_MAPPED_FROMFD = 32,
15
+ ALLOCATOR_MAPPED_UNLINK = 64
16
+ };
17
+
18
+ // Sentinel value/type to help distinguish the file descriptor constructor from
19
+ // the non-file descriptor constructor
20
+ enum WithFd { WITH_FD };
21
+
22
+ TORCH_API std::string NewProcessWideShmHandle();
23
+
24
+ class TORCH_API MapAllocator {
25
+ public:
26
+ MapAllocator(c10::string_view filename, int flags, size_t size);
27
+ MapAllocator(
28
+ WithFd,
29
+ c10::string_view filename,
30
+ int fd,
31
+ int flags,
32
+ size_t size);
33
+ MapAllocator(const MapAllocator&) = delete;
34
+ MapAllocator& operator=(const MapAllocator&) = delete;
35
+ MapAllocator(MapAllocator&&) = delete;
36
+ MapAllocator& operator=(MapAllocator&&) = delete;
37
+
38
+ const char* filename() const {
39
+ return filename_.c_str();
40
+ }
41
+ int fd() const {
42
+ #ifdef _WIN32
43
+ TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows");
44
+ #else
45
+ return fd_;
46
+ #endif
47
+ }
48
+ ptrdiff_t size() const {
49
+ return size_;
50
+ }
51
+ // Return a pointer to the actual data for this allocator
52
+ // (in the case of the refcounted allocator, this is offset
53
+ // from the base pointer.)
54
+ virtual void* data() const {
55
+ return base_ptr_;
56
+ }
57
+
58
+ static MapAllocator* fromDataPtr(const at::DataPtr&);
59
+ static at::DataPtr makeDataPtr(
60
+ c10::string_view filename,
61
+ int flags,
62
+ size_t size,
63
+ size_t* actual_size_out);
64
+ static at::DataPtr makeDataPtr(
65
+ WithFd,
66
+ const char* filename,
67
+ int fd,
68
+ int flags,
69
+ size_t size,
70
+ size_t* actual_size_out);
71
+
72
+ // Closes the data. Helps us avoid destructor shenanigans
73
+ virtual void close();
74
+
75
+ // This is very dangerous. You have to redefine this destructor for each
76
+ // subclass
77
+ virtual ~MapAllocator();
78
+
79
+ protected:
80
+ bool closed_ = false;
81
+ std::string filename_;
82
+ int flags_ = 0;
83
+ ptrdiff_t size_; /* mapped size */
84
+ #ifdef _WIN32
85
+ void* handle_;
86
+ void* event_;
87
+ std::string eventname_;
88
+ #else
89
+ int fd_ = -1;
90
+ #endif
91
+ void* base_ptr_ = nullptr;
92
+ };
93
+
94
+ // Base-from-member idiom
95
+ struct TORCH_API RefcountedMapAllocatorArgCheck {
96
+ RefcountedMapAllocatorArgCheck(int flags);
97
+ };
98
+
99
+ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
100
+ public MapAllocator {
101
+ public:
102
+ RefcountedMapAllocator(const char* filename, int flags, size_t size);
103
+ RefcountedMapAllocator(
104
+ WithFd,
105
+ const char* filename,
106
+ int fd,
107
+ int flags,
108
+ size_t size);
109
+
110
+ static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
111
+ static at::DataPtr makeDataPtr(
112
+ const char* filename,
113
+ int flags,
114
+ size_t size,
115
+ size_t* actual_size_out);
116
+ static at::DataPtr makeDataPtr(
117
+ WithFd,
118
+ const char* filename,
119
+ int fd,
120
+ int flags,
121
+ size_t size,
122
+ size_t* actual_size_out);
123
+
124
+ void* data() const override;
125
+
126
+ void incref();
127
+ int decref();
128
+ void close() override;
129
+
130
+ ~RefcountedMapAllocator() override {
131
+ close();
132
+ }
133
+
134
+ protected:
135
+ void checkFlags();
136
+ void initializeAlloc();
137
+ };
138
+
139
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from MethodOperators.h
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ // Forward declarations of any types needed in the operator signatures.
14
+ // We can't directly include these classes because it will cause circular include dependencies.
15
+ // This file is included by TensorBody.h, which defines the Tensor class.
16
+ #include <ATen/core/ATen_fwd.h>
17
+
18
+ #include <ATen/ops/_addmm_activation_ops.h>
19
+ #include <ATen/ops/_autocast_to_full_precision_ops.h>
20
+ #include <ATen/ops/_autocast_to_reduced_precision_ops.h>
21
+ #include <ATen/ops/_backward_ops.h>
22
+ #include <ATen/ops/_coalesced_ops.h>
23
+ #include <ATen/ops/_conj_ops.h>
24
+ #include <ATen/ops/_conj_physical_ops.h>
25
+ #include <ATen/ops/_dimI_ops.h>
26
+ #include <ATen/ops/_dimV_ops.h>
27
+ #include <ATen/ops/_fw_primal_ops.h>
28
+ #include <ATen/ops/_indices_ops.h>
29
+ #include <ATen/ops/_is_all_true_ops.h>
30
+ #include <ATen/ops/_is_any_true_ops.h>
31
+ #include <ATen/ops/_is_zerotensor_ops.h>
32
+ #include <ATen/ops/_neg_view_ops.h>
33
+ #include <ATen/ops/_nested_tensor_size_ops.h>
34
+ #include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
35
+ #include <ATen/ops/_nested_tensor_strides_ops.h>
36
+ #include <ATen/ops/_nnz_ops.h>
37
+ #include <ATen/ops/_reshape_alias_ops.h>
38
+ #include <ATen/ops/_sparse_mask_projection_ops.h>
39
+ #include <ATen/ops/_to_dense_ops.h>
40
+ #include <ATen/ops/_to_sparse_bsc_ops.h>
41
+ #include <ATen/ops/_to_sparse_bsr_ops.h>
42
+ #include <ATen/ops/_to_sparse_csc_ops.h>
43
+ #include <ATen/ops/_to_sparse_csr_ops.h>
44
+ #include <ATen/ops/_to_sparse_ops.h>
45
+ #include <ATen/ops/_values_ops.h>
46
+ #include <ATen/ops/_version_ops.h>
47
+ #include <ATen/ops/abs_ops.h>
48
+ #include <ATen/ops/absolute_ops.h>
49
+ #include <ATen/ops/acos_ops.h>
50
+ #include <ATen/ops/acosh_ops.h>
51
+ #include <ATen/ops/add_ops.h>
52
+ #include <ATen/ops/addbmm_ops.h>
53
+ #include <ATen/ops/addcdiv_ops.h>
54
+ #include <ATen/ops/addcmul_ops.h>
55
+ #include <ATen/ops/addmm_ops.h>
56
+ #include <ATen/ops/addmv_ops.h>
57
+ #include <ATen/ops/addr_ops.h>
58
+ #include <ATen/ops/adjoint_ops.h>
59
+ #include <ATen/ops/alias_ops.h>
60
+ #include <ATen/ops/align_as_ops.h>
61
+ #include <ATen/ops/align_to_ops.h>
62
+ #include <ATen/ops/all_ops.h>
63
+ #include <ATen/ops/allclose_ops.h>
64
+ #include <ATen/ops/amax_ops.h>
65
+ #include <ATen/ops/amin_ops.h>
66
+ #include <ATen/ops/aminmax_ops.h>
67
+ #include <ATen/ops/and_ops.h>
68
+ #include <ATen/ops/angle_ops.h>
69
+ #include <ATen/ops/any_ops.h>
70
+ #include <ATen/ops/arccos_ops.h>
71
+ #include <ATen/ops/arccosh_ops.h>
72
+ #include <ATen/ops/arcsin_ops.h>
73
+ #include <ATen/ops/arcsinh_ops.h>
74
+ #include <ATen/ops/arctan2_ops.h>
75
+ #include <ATen/ops/arctan_ops.h>
76
+ #include <ATen/ops/arctanh_ops.h>
77
+ #include <ATen/ops/argmax_ops.h>
78
+ #include <ATen/ops/argmin_ops.h>
79
+ #include <ATen/ops/argsort_ops.h>
80
+ #include <ATen/ops/argwhere_ops.h>
81
+ #include <ATen/ops/as_strided_ops.h>
82
+ #include <ATen/ops/as_strided_scatter_ops.h>
83
+ #include <ATen/ops/asin_ops.h>
84
+ #include <ATen/ops/asinh_ops.h>
85
+ #include <ATen/ops/atan2_ops.h>
86
+ #include <ATen/ops/atan_ops.h>
87
+ #include <ATen/ops/atanh_ops.h>
88
+ #include <ATen/ops/baddbmm_ops.h>
89
+ #include <ATen/ops/bernoulli_ops.h>
90
+ #include <ATen/ops/bincount_ops.h>
91
+ #include <ATen/ops/bitwise_and_ops.h>
92
+ #include <ATen/ops/bitwise_left_shift_ops.h>
93
+ #include <ATen/ops/bitwise_not_ops.h>
94
+ #include <ATen/ops/bitwise_or_ops.h>
95
+ #include <ATen/ops/bitwise_right_shift_ops.h>
96
+ #include <ATen/ops/bitwise_xor_ops.h>
97
+ #include <ATen/ops/bmm_ops.h>
98
+ #include <ATen/ops/broadcast_to_ops.h>
99
+ #include <ATen/ops/cauchy_ops.h>
100
+ #include <ATen/ops/ccol_indices_ops.h>
101
+ #include <ATen/ops/ceil_ops.h>
102
+ #include <ATen/ops/chalf_ops.h>
103
+ #include <ATen/ops/cholesky_inverse_ops.h>
104
+ #include <ATen/ops/cholesky_ops.h>
105
+ #include <ATen/ops/cholesky_solve_ops.h>
106
+ #include <ATen/ops/chunk_ops.h>
107
+ #include <ATen/ops/clamp_max_ops.h>
108
+ #include <ATen/ops/clamp_min_ops.h>
109
+ #include <ATen/ops/clamp_ops.h>
110
+ #include <ATen/ops/clip_ops.h>
111
+ #include <ATen/ops/clone_ops.h>
112
+ #include <ATen/ops/coalesce_ops.h>
113
+ #include <ATen/ops/col_indices_ops.h>
114
+ #include <ATen/ops/conj_ops.h>
115
+ #include <ATen/ops/conj_physical_ops.h>
116
+ #include <ATen/ops/contiguous_ops.h>
117
+ #include <ATen/ops/copy_ops.h>
118
+ #include <ATen/ops/copysign_ops.h>
119
+ #include <ATen/ops/corrcoef_ops.h>
120
+ #include <ATen/ops/cos_ops.h>
121
+ #include <ATen/ops/cosh_ops.h>
122
+ #include <ATen/ops/count_nonzero_ops.h>
123
+ #include <ATen/ops/cov_ops.h>
124
+ #include <ATen/ops/cross_ops.h>
125
+ #include <ATen/ops/crow_indices_ops.h>
126
+ #include <ATen/ops/cummax_ops.h>
127
+ #include <ATen/ops/cummin_ops.h>
128
+ #include <ATen/ops/cumprod_ops.h>
129
+ #include <ATen/ops/cumsum_ops.h>
130
+ #include <ATen/ops/data_ops.h>
131
+ #include <ATen/ops/deg2rad_ops.h>
132
+ #include <ATen/ops/dense_dim_ops.h>
133
+ #include <ATen/ops/dequantize_ops.h>
134
+ #include <ATen/ops/det_ops.h>
135
+ #include <ATen/ops/detach_ops.h>
136
+ #include <ATen/ops/diag_embed_ops.h>
137
+ #include <ATen/ops/diag_ops.h>
138
+ #include <ATen/ops/diagflat_ops.h>
139
+ #include <ATen/ops/diagonal_ops.h>
140
+ #include <ATen/ops/diagonal_scatter_ops.h>
141
+ #include <ATen/ops/diff_ops.h>
142
+ #include <ATen/ops/digamma_ops.h>
143
+ #include <ATen/ops/dist_ops.h>
144
+ #include <ATen/ops/div_ops.h>
145
+ #include <ATen/ops/divide_ops.h>
146
+ #include <ATen/ops/dot_ops.h>
147
+ #include <ATen/ops/dsplit_ops.h>
148
+ #include <ATen/ops/eq_ops.h>
149
+ #include <ATen/ops/equal_ops.h>
150
+ #include <ATen/ops/erf_ops.h>
151
+ #include <ATen/ops/erfc_ops.h>
152
+ #include <ATen/ops/erfinv_ops.h>
153
+ #include <ATen/ops/exp2_ops.h>
154
+ #include <ATen/ops/exp_ops.h>
155
+ #include <ATen/ops/expand_as_ops.h>
156
+ #include <ATen/ops/expand_ops.h>
157
+ #include <ATen/ops/expm1_ops.h>
158
+ #include <ATen/ops/exponential_ops.h>
159
+ #include <ATen/ops/fill_diagonal_ops.h>
160
+ #include <ATen/ops/fill_ops.h>
161
+ #include <ATen/ops/fix_ops.h>
162
+ #include <ATen/ops/flatten_ops.h>
163
+ #include <ATen/ops/flip_ops.h>
164
+ #include <ATen/ops/fliplr_ops.h>
165
+ #include <ATen/ops/flipud_ops.h>
166
+ #include <ATen/ops/float_power_ops.h>
167
+ #include <ATen/ops/floor_divide_ops.h>
168
+ #include <ATen/ops/floor_ops.h>
169
+ #include <ATen/ops/fmax_ops.h>
170
+ #include <ATen/ops/fmin_ops.h>
171
+ #include <ATen/ops/fmod_ops.h>
172
+ #include <ATen/ops/frac_ops.h>
173
+ #include <ATen/ops/frexp_ops.h>
174
+ #include <ATen/ops/gather_ops.h>
175
+ #include <ATen/ops/gcd_ops.h>
176
+ #include <ATen/ops/ge_ops.h>
177
+ #include <ATen/ops/geometric_ops.h>
178
+ #include <ATen/ops/geqrf_ops.h>
179
+ #include <ATen/ops/ger_ops.h>
180
+ #include <ATen/ops/greater_equal_ops.h>
181
+ #include <ATen/ops/greater_ops.h>
182
+ #include <ATen/ops/gt_ops.h>
183
+ #include <ATen/ops/hardshrink_backward_ops.h>
184
+ #include <ATen/ops/hardshrink_ops.h>
185
+ #include <ATen/ops/heaviside_ops.h>
186
+ #include <ATen/ops/histc_ops.h>
187
+ #include <ATen/ops/histogram_ops.h>
188
+ #include <ATen/ops/hsplit_ops.h>
189
+ #include <ATen/ops/hypot_ops.h>
190
+ #include <ATen/ops/i0_ops.h>
191
+ #include <ATen/ops/igamma_ops.h>
192
+ #include <ATen/ops/igammac_ops.h>
193
+ #include <ATen/ops/index_add_ops.h>
194
+ #include <ATen/ops/index_copy_ops.h>
195
+ #include <ATen/ops/index_fill_ops.h>
196
+ #include <ATen/ops/index_ops.h>
197
+ #include <ATen/ops/index_put_ops.h>
198
+ #include <ATen/ops/index_reduce_ops.h>
199
+ #include <ATen/ops/index_select_ops.h>
200
+ #include <ATen/ops/indices_ops.h>
201
+ #include <ATen/ops/inner_ops.h>
202
+ #include <ATen/ops/int_repr_ops.h>
203
+ #include <ATen/ops/inverse_ops.h>
204
+ #include <ATen/ops/is_coalesced_ops.h>
205
+ #include <ATen/ops/is_complex_ops.h>
206
+ #include <ATen/ops/is_conj_ops.h>
207
+ #include <ATen/ops/is_distributed_ops.h>
208
+ #include <ATen/ops/is_floating_point_ops.h>
209
+ #include <ATen/ops/is_inference_ops.h>
210
+ #include <ATen/ops/is_leaf_ops.h>
211
+ #include <ATen/ops/is_neg_ops.h>
212
+ #include <ATen/ops/is_nonzero_ops.h>
213
+ #include <ATen/ops/is_pinned_ops.h>
214
+ #include <ATen/ops/is_same_size_ops.h>
215
+ #include <ATen/ops/is_set_to_ops.h>
216
+ #include <ATen/ops/is_signed_ops.h>
217
+ #include <ATen/ops/isclose_ops.h>
218
+ #include <ATen/ops/isfinite_ops.h>
219
+ #include <ATen/ops/isinf_ops.h>
220
+ #include <ATen/ops/isnan_ops.h>
221
+ #include <ATen/ops/isneginf_ops.h>
222
+ #include <ATen/ops/isposinf_ops.h>
223
+ #include <ATen/ops/isreal_ops.h>
224
+ #include <ATen/ops/istft_ops.h>
225
+ #include <ATen/ops/item_ops.h>
226
+ #include <ATen/ops/kron_ops.h>
227
+ #include <ATen/ops/kthvalue_ops.h>
228
+ #include <ATen/ops/lcm_ops.h>
229
+ #include <ATen/ops/ldexp_ops.h>
230
+ #include <ATen/ops/le_ops.h>
231
+ #include <ATen/ops/lerp_ops.h>
232
+ #include <ATen/ops/less_equal_ops.h>
233
+ #include <ATen/ops/less_ops.h>
234
+ #include <ATen/ops/lgamma_ops.h>
235
+ #include <ATen/ops/log10_ops.h>
236
+ #include <ATen/ops/log1p_ops.h>
237
+ #include <ATen/ops/log2_ops.h>
238
+ #include <ATen/ops/log_normal_ops.h>
239
+ #include <ATen/ops/log_ops.h>
240
+ #include <ATen/ops/log_softmax_ops.h>
241
+ #include <ATen/ops/logaddexp2_ops.h>
242
+ #include <ATen/ops/logaddexp_ops.h>
243
+ #include <ATen/ops/logcumsumexp_ops.h>
244
+ #include <ATen/ops/logdet_ops.h>
245
+ #include <ATen/ops/logical_and_ops.h>
246
+ #include <ATen/ops/logical_not_ops.h>
247
+ #include <ATen/ops/logical_or_ops.h>
248
+ #include <ATen/ops/logical_xor_ops.h>
249
+ #include <ATen/ops/logit_ops.h>
250
+ #include <ATen/ops/logsumexp_ops.h>
251
+ #include <ATen/ops/lshift_ops.h>
252
+ #include <ATen/ops/lt_ops.h>
253
+ #include <ATen/ops/lu_solve_ops.h>
254
+ #include <ATen/ops/mH_ops.h>
255
+ #include <ATen/ops/mT_ops.h>
256
+ #include <ATen/ops/masked_fill_ops.h>
257
+ #include <ATen/ops/masked_scatter_ops.h>
258
+ #include <ATen/ops/masked_select_ops.h>
259
+ #include <ATen/ops/matmul_ops.h>
260
+ #include <ATen/ops/matrix_H_ops.h>
261
+ #include <ATen/ops/matrix_exp_ops.h>
262
+ #include <ATen/ops/matrix_power_ops.h>
263
+ #include <ATen/ops/max_ops.h>
264
+ #include <ATen/ops/maximum_ops.h>
265
+ #include <ATen/ops/mean_ops.h>
266
+ #include <ATen/ops/median_ops.h>
267
+ #include <ATen/ops/min_ops.h>
268
+ #include <ATen/ops/minimum_ops.h>
269
+ #include <ATen/ops/mm_ops.h>
270
+ #include <ATen/ops/mode_ops.h>
271
+ #include <ATen/ops/moveaxis_ops.h>
272
+ #include <ATen/ops/movedim_ops.h>
273
+ #include <ATen/ops/msort_ops.h>
274
+ #include <ATen/ops/mul_ops.h>
275
+ #include <ATen/ops/multinomial_ops.h>
276
+ #include <ATen/ops/multiply_ops.h>
277
+ #include <ATen/ops/mv_ops.h>
278
+ #include <ATen/ops/mvlgamma_ops.h>
279
+ #include <ATen/ops/nan_to_num_ops.h>
280
+ #include <ATen/ops/nanmean_ops.h>
281
+ #include <ATen/ops/nanmedian_ops.h>
282
+ #include <ATen/ops/nanquantile_ops.h>
283
+ #include <ATen/ops/nansum_ops.h>
284
+ #include <ATen/ops/narrow_copy_ops.h>
285
+ #include <ATen/ops/narrow_ops.h>
286
+ #include <ATen/ops/ne_ops.h>
287
+ #include <ATen/ops/neg_ops.h>
288
+ #include <ATen/ops/negative_ops.h>
289
+ #include <ATen/ops/new_empty_ops.h>
290
+ #include <ATen/ops/new_empty_strided_ops.h>
291
+ #include <ATen/ops/new_full_ops.h>
292
+ #include <ATen/ops/new_ones_ops.h>
293
+ #include <ATen/ops/new_zeros_ops.h>
294
+ #include <ATen/ops/nextafter_ops.h>
295
+ #include <ATen/ops/nonzero_numpy_ops.h>
296
+ #include <ATen/ops/nonzero_ops.h>
297
+ #include <ATen/ops/nonzero_static_ops.h>
298
+ #include <ATen/ops/norm_ops.h>
299
+ #include <ATen/ops/normal_ops.h>
300
+ #include <ATen/ops/not_equal_ops.h>
301
+ #include <ATen/ops/numpy_T_ops.h>
302
+ #include <ATen/ops/or_ops.h>
303
+ #include <ATen/ops/orgqr_ops.h>
304
+ #include <ATen/ops/ormqr_ops.h>
305
+ #include <ATen/ops/outer_ops.h>
306
+ #include <ATen/ops/output_nr_ops.h>
307
+ #include <ATen/ops/permute_ops.h>
308
+ #include <ATen/ops/pin_memory_ops.h>
309
+ #include <ATen/ops/pinverse_ops.h>
310
+ #include <ATen/ops/polygamma_ops.h>
311
+ #include <ATen/ops/positive_ops.h>
312
+ #include <ATen/ops/pow_ops.h>
313
+ #include <ATen/ops/prelu_ops.h>
314
+ #include <ATen/ops/prod_ops.h>
315
+ #include <ATen/ops/put_ops.h>
316
+ #include <ATen/ops/q_per_channel_axis_ops.h>
317
+ #include <ATen/ops/q_per_channel_scales_ops.h>
318
+ #include <ATen/ops/q_per_channel_zero_points_ops.h>
319
+ #include <ATen/ops/q_scale_ops.h>
320
+ #include <ATen/ops/q_zero_point_ops.h>
321
+ #include <ATen/ops/qr_ops.h>
322
+ #include <ATen/ops/qscheme_ops.h>
323
+ #include <ATen/ops/quantile_ops.h>
324
+ #include <ATen/ops/rad2deg_ops.h>
325
+ #include <ATen/ops/random_ops.h>
326
+ #include <ATen/ops/ravel_ops.h>
327
+ #include <ATen/ops/reciprocal_ops.h>
328
+ #include <ATen/ops/record_stream_ops.h>
329
+ #include <ATen/ops/refine_names_ops.h>
330
+ #include <ATen/ops/relu_ops.h>
331
+ #include <ATen/ops/remainder_ops.h>
332
+ #include <ATen/ops/rename_ops.h>
333
+ #include <ATen/ops/renorm_ops.h>
334
+ #include <ATen/ops/repeat_interleave_ops.h>
335
+ #include <ATen/ops/repeat_ops.h>
336
+ #include <ATen/ops/requires_grad_ops.h>
337
+ #include <ATen/ops/reshape_as_ops.h>
338
+ #include <ATen/ops/reshape_ops.h>
339
+ #include <ATen/ops/resize_as_ops.h>
340
+ #include <ATen/ops/resize_as_sparse_ops.h>
341
+ #include <ATen/ops/resize_ops.h>
342
+ #include <ATen/ops/resolve_conj_ops.h>
343
+ #include <ATen/ops/resolve_neg_ops.h>
344
+ #include <ATen/ops/retain_grad_ops.h>
345
+ #include <ATen/ops/retains_grad_ops.h>
346
+ #include <ATen/ops/roll_ops.h>
347
+ #include <ATen/ops/rot90_ops.h>
348
+ #include <ATen/ops/round_ops.h>
349
+ #include <ATen/ops/row_indices_ops.h>
350
+ #include <ATen/ops/rshift_ops.h>
351
+ #include <ATen/ops/rsqrt_ops.h>
352
+ #include <ATen/ops/scatter_add_ops.h>
353
+ #include <ATen/ops/scatter_ops.h>
354
+ #include <ATen/ops/scatter_reduce_ops.h>
355
+ #include <ATen/ops/select_ops.h>
356
+ #include <ATen/ops/select_scatter_ops.h>
357
+ #include <ATen/ops/set_data_ops.h>
358
+ #include <ATen/ops/set_ops.h>
359
+ #include <ATen/ops/sgn_ops.h>
360
+ #include <ATen/ops/sigmoid_ops.h>
361
+ #include <ATen/ops/sign_ops.h>
362
+ #include <ATen/ops/signbit_ops.h>
363
+ #include <ATen/ops/sin_ops.h>
364
+ #include <ATen/ops/sinc_ops.h>
365
+ #include <ATen/ops/sinh_ops.h>
366
+ #include <ATen/ops/size_ops.h>
367
+ #include <ATen/ops/slice_ops.h>
368
+ #include <ATen/ops/slice_scatter_ops.h>
369
+ #include <ATen/ops/slogdet_ops.h>
370
+ #include <ATen/ops/smm_ops.h>
371
+ #include <ATen/ops/softmax_ops.h>
372
+ #include <ATen/ops/sort_ops.h>
373
+ #include <ATen/ops/sparse_dim_ops.h>
374
+ #include <ATen/ops/sparse_mask_ops.h>
375
+ #include <ATen/ops/sparse_resize_and_clear_ops.h>
376
+ #include <ATen/ops/sparse_resize_ops.h>
377
+ #include <ATen/ops/split_ops.h>
378
+ #include <ATen/ops/split_with_sizes_ops.h>
379
+ #include <ATen/ops/sqrt_ops.h>
380
+ #include <ATen/ops/square_ops.h>
381
+ #include <ATen/ops/squeeze_ops.h>
382
+ #include <ATen/ops/sspaddmm_ops.h>
383
+ #include <ATen/ops/std_ops.h>
384
+ #include <ATen/ops/stft_ops.h>
385
+ #include <ATen/ops/stride_ops.h>
386
+ #include <ATen/ops/sub_ops.h>
387
+ #include <ATen/ops/subtract_ops.h>
388
+ #include <ATen/ops/sum_ops.h>
389
+ #include <ATen/ops/sum_to_size_ops.h>
390
+ #include <ATen/ops/svd_ops.h>
391
+ #include <ATen/ops/swapaxes_ops.h>
392
+ #include <ATen/ops/swapdims_ops.h>
393
+ #include <ATen/ops/t_ops.h>
394
+ #include <ATen/ops/take_along_dim_ops.h>
395
+ #include <ATen/ops/take_ops.h>
396
+ #include <ATen/ops/tan_ops.h>
397
+ #include <ATen/ops/tanh_ops.h>
398
+ #include <ATen/ops/tensor_split_ops.h>
399
+ #include <ATen/ops/tile_ops.h>
400
+ #include <ATen/ops/to_dense_ops.h>
401
+ #include <ATen/ops/to_mkldnn_ops.h>
402
+ #include <ATen/ops/to_ops.h>
403
+ #include <ATen/ops/to_padded_tensor_ops.h>
404
+ #include <ATen/ops/to_sparse_bsc_ops.h>
405
+ #include <ATen/ops/to_sparse_bsr_ops.h>
406
+ #include <ATen/ops/to_sparse_csc_ops.h>
407
+ #include <ATen/ops/to_sparse_csr_ops.h>
408
+ #include <ATen/ops/to_sparse_ops.h>
409
+ #include <ATen/ops/topk_ops.h>
410
+ #include <ATen/ops/trace_ops.h>
411
+ #include <ATen/ops/transpose_ops.h>
412
+ #include <ATen/ops/triangular_solve_ops.h>
413
+ #include <ATen/ops/tril_ops.h>
414
+ #include <ATen/ops/triu_ops.h>
415
+ #include <ATen/ops/true_divide_ops.h>
416
+ #include <ATen/ops/trunc_ops.h>
417
+ #include <ATen/ops/type_as_ops.h>
418
+ #include <ATen/ops/unbind_ops.h>
419
+ #include <ATen/ops/unflatten_ops.h>
420
+ #include <ATen/ops/unfold_ops.h>
421
+ #include <ATen/ops/uniform_ops.h>
422
+ #include <ATen/ops/unsafe_chunk_ops.h>
423
+ #include <ATen/ops/unsafe_split_ops.h>
424
+ #include <ATen/ops/unsafe_split_with_sizes_ops.h>
425
+ #include <ATen/ops/unsqueeze_ops.h>
426
+ #include <ATen/ops/values_ops.h>
427
+ #include <ATen/ops/var_ops.h>
428
+ #include <ATen/ops/vdot_ops.h>
429
+ #include <ATen/ops/view_as_ops.h>
430
+ #include <ATen/ops/view_ops.h>
431
+ #include <ATen/ops/vsplit_ops.h>
432
+ #include <ATen/ops/where_ops.h>
433
+ #include <ATen/ops/xlogy_ops.h>
434
+ #include <ATen/ops/xor_ops.h>
435
+ #include <ATen/ops/zero_ops.h>
436
+
437
+ namespace at {
438
+ namespace _ops {
439
+
440
+ } // namespace _ops
441
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/NamedTensor.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef __HIPCC__
4
+ #include <hip/hip_runtime.h>
5
+ #endif
6
+
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/BFloat16.h>
9
+ #include <c10/util/Float8_e4m3fn.h>
10
+ #include <c10/util/Float8_e5m2.h>
11
+ #include <c10/util/Half.h>
12
+ #include <c10/util/complex.h>
13
+
14
+ #include <cmath>
15
+ #include <type_traits>
16
+
17
+ namespace at {
18
+
19
+ // std::isnan isn't performant to use on integral types; it will
20
+ // (uselessly) convert to floating point and then do the test.
21
+ // This function is.
22
+
23
+ template <
24
+ typename T,
25
+ typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
26
+ inline C10_HOST_DEVICE bool _isnan(T /*val*/) {
27
+ return false;
28
+ }
29
+
30
+ template <
31
+ typename T,
32
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
33
+ inline C10_HOST_DEVICE bool _isnan(T val) {
34
+ #if defined(__CUDACC__) || defined(__HIPCC__)
35
+ return ::isnan(val);
36
+ #else
37
+ return std::isnan(val);
38
+ #endif
39
+ }
40
+
41
+ template <
42
+ typename T,
43
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
44
+ inline C10_HOST_DEVICE bool _isnan(T val) {
45
+ return std::isnan(val.real()) || std::isnan(val.imag());
46
+ }
47
+
48
+ template <
49
+ typename T,
50
+ typename std::enable_if<std::is_same<T, at::Half>::value, int>::type = 0>
51
+ inline C10_HOST_DEVICE bool _isnan(T val) {
52
+ return at::_isnan(static_cast<float>(val));
53
+ }
54
+
55
+ template <
56
+ typename T,
57
+ typename std::enable_if<std::is_same<T, at::BFloat16>::value, int>::type =
58
+ 0>
59
+ inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
60
+ return at::_isnan(static_cast<float>(val));
61
+ }
62
+
63
+ inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
64
+ return at::_isnan(static_cast<float>(val));
65
+ }
66
+
67
+ template <
68
+ typename T,
69
+ typename std::enable_if<std::is_same<T, at::Float8_e5m2>::value, int>::
70
+ type = 0>
71
+ inline C10_HOST_DEVICE bool _isnan(T val) {
72
+ return val.isnan();
73
+ }
74
+
75
+ template <
76
+ typename T,
77
+ typename std::enable_if<std::is_same<T, at::Float8_e4m3fn>::value, int>::
78
+ type = 0>
79
+ inline C10_HOST_DEVICE bool _isnan(T val) {
80
+ return val.isnan();
81
+ }
82
+
83
+ // std::isinf isn't performant to use on integral types; it will
84
+ // (uselessly) convert to floating point and then do the test.
85
+ // This function is.
86
+
87
+ template <
88
+ typename T,
89
+ typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
90
+ inline C10_HOST_DEVICE bool _isinf(T /*val*/) {
91
+ return false;
92
+ }
93
+
94
+ template <
95
+ typename T,
96
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
97
+ inline C10_HOST_DEVICE bool _isinf(T val) {
98
+ #if defined(__CUDACC__) || defined(__HIPCC__)
99
+ return ::isinf(val);
100
+ #else
101
+ return std::isinf(val);
102
+ #endif
103
+ }
104
+
105
+ inline C10_HOST_DEVICE bool _isinf(at::Half val) {
106
+ return at::_isinf(static_cast<float>(val));
107
+ }
108
+
109
+ inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) {
110
+ return at::_isinf(static_cast<float>(val));
111
+ }
112
+
113
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2 val) {
114
+ return val.isinf();
115
+ }
116
+
117
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fn val) {
118
+ return false;
119
+ }
120
+
121
+ template <typename T>
122
+ C10_HOST_DEVICE inline T exp(T x) {
123
+ static_assert(
124
+ !std::is_same<T, double>::value,
125
+ "this template must be used with float or less precise type");
126
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
127
+ // use __expf fast approximation for peak bandwidth
128
+ return __expf(x);
129
+ #else
130
+ return ::exp(x);
131
+ #endif
132
+ }
133
+
134
+ template <>
135
+ C10_HOST_DEVICE inline double exp<double>(double x) {
136
+ return ::exp(x);
137
+ }
138
+
139
+ template <typename T>
140
+ C10_HOST_DEVICE inline T log(T x) {
141
+ static_assert(
142
+ !std::is_same<T, double>::value,
143
+ "this template must be used with float or less precise type");
144
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
145
+ // use __logf fast approximation for peak bandwidth
146
+ return __logf(x);
147
+ #else
148
+ return ::log(x);
149
+ #endif
150
+ }
151
+
152
+ template <>
153
+ C10_HOST_DEVICE inline double log<double>(double x) {
154
+ return ::log(x);
155
+ }
156
+
157
+ template <typename T>
158
+ C10_HOST_DEVICE inline T log1p(T x) {
159
+ static_assert(
160
+ !std::is_same<T, double>::value,
161
+ "this template must be used with float or less precise type");
162
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
163
+ // use __logf fast approximation for peak bandwidth
164
+ // NOTE: There is no __log1pf so unfortunately we lose precision.
165
+ return __logf(1.0f + x);
166
+ #else
167
+ return ::log1p(x);
168
+ #endif
169
+ }
170
+
171
+ template <>
172
+ C10_HOST_DEVICE inline double log1p<double>(double x) {
173
+ return ::log1p(x);
174
+ }
175
+
176
+ template <typename T>
177
+ C10_HOST_DEVICE inline T tan(T x) {
178
+ static_assert(
179
+ !std::is_same<T, double>::value,
180
+ "this template must be used with float or less precise type");
181
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
182
+ // use __tanf fast approximation for peak bandwidth
183
+ return __tanf(x);
184
+ #else
185
+ return ::tan(x);
186
+ #endif
187
+ }
188
+
189
+ template <>
190
+ C10_HOST_DEVICE inline double tan<double>(double x) {
191
+ return ::tan(x);
192
+ }
193
+
194
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Operators.h ADDED
@@ -0,0 +1,1336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operators.h
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}_ops.h> \
17
+ and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ #include <c10/core/SymInt.h>
21
+ #include <c10/core/SymIntArrayRef.h>
22
+ #include <c10/core/Scalar.h>
23
+ #include <c10/core/TensorOptions.h>
24
+ #include <c10/core/QScheme.h>
25
+ #include <c10/util/OptionalArrayRef.h>
26
+ #include <tuple>
27
+ #include <vector>
28
+
29
+ #include <ATen/ops/_adaptive_avg_pool2d_ops.h>
30
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h>
31
+ #include <ATen/ops/_adaptive_avg_pool3d_ops.h>
32
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
33
+ #include <ATen/ops/_add_batch_dim_ops.h>
34
+ #include <ATen/ops/_add_relu_ops.h>
35
+ #include <ATen/ops/_addmm_activation_ops.h>
36
+ #include <ATen/ops/_aminmax_ops.h>
37
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
38
+ #include <ATen/ops/_amp_update_scale_ops.h>
39
+ #include <ATen/ops/_assert_async_ops.h>
40
+ #include <ATen/ops/_assert_tensor_metadata_ops.h>
41
+ #include <ATen/ops/_autocast_to_full_precision_ops.h>
42
+ #include <ATen/ops/_autocast_to_reduced_precision_ops.h>
43
+ #include <ATen/ops/_backward_ops.h>
44
+ #include <ATen/ops/_batch_norm_impl_index_ops.h>
45
+ #include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
46
+ #include <ATen/ops/_cast_Byte_ops.h>
47
+ #include <ATen/ops/_cast_Char_ops.h>
48
+ #include <ATen/ops/_cast_Double_ops.h>
49
+ #include <ATen/ops/_cast_Float_ops.h>
50
+ #include <ATen/ops/_cast_Half_ops.h>
51
+ #include <ATen/ops/_cast_Int_ops.h>
52
+ #include <ATen/ops/_cast_Long_ops.h>
53
+ #include <ATen/ops/_cast_Short_ops.h>
54
+ #include <ATen/ops/_cdist_backward_ops.h>
55
+ #include <ATen/ops/_cdist_forward_ops.h>
56
+ #include <ATen/ops/_cholesky_solve_helper_ops.h>
57
+ #include <ATen/ops/_choose_qparams_per_tensor_ops.h>
58
+ #include <ATen/ops/_coalesce_ops.h>
59
+ #include <ATen/ops/_coalesced_ops.h>
60
+ #include <ATen/ops/_compute_linear_combination_ops.h>
61
+ #include <ATen/ops/_conj_ops.h>
62
+ #include <ATen/ops/_conj_copy_ops.h>
63
+ #include <ATen/ops/_conj_physical_ops.h>
64
+ #include <ATen/ops/_conv_depthwise2d_ops.h>
65
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h>
66
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h>
67
+ #include <ATen/ops/_convert_weight_to_int4pack_ops.h>
68
+ #include <ATen/ops/_convolution_ops.h>
69
+ #include <ATen/ops/_convolution_double_backward_ops.h>
70
+ #include <ATen/ops/_convolution_mode_ops.h>
71
+ #include <ATen/ops/_copy_from_ops.h>
72
+ #include <ATen/ops/_copy_from_and_resize_ops.h>
73
+ #include <ATen/ops/_cslt_compress_ops.h>
74
+ #include <ATen/ops/_cslt_sparse_mm_ops.h>
75
+ #include <ATen/ops/_ctc_loss_ops.h>
76
+ #include <ATen/ops/_ctc_loss_backward_ops.h>
77
+ #include <ATen/ops/_cudnn_ctc_loss_ops.h>
78
+ #include <ATen/ops/_cudnn_init_dropout_state_ops.h>
79
+ #include <ATen/ops/_cudnn_rnn_ops.h>
80
+ #include <ATen/ops/_cudnn_rnn_backward_ops.h>
81
+ #include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
82
+ #include <ATen/ops/_cufft_clear_plan_cache_ops.h>
83
+ #include <ATen/ops/_cufft_get_plan_cache_max_size_ops.h>
84
+ #include <ATen/ops/_cufft_get_plan_cache_size_ops.h>
85
+ #include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h>
86
+ #include <ATen/ops/_cummax_helper_ops.h>
87
+ #include <ATen/ops/_cummin_helper_ops.h>
88
+ #include <ATen/ops/_debug_has_internal_overlap_ops.h>
89
+ #include <ATen/ops/_dimI_ops.h>
90
+ #include <ATen/ops/_dimV_ops.h>
91
+ #include <ATen/ops/_dim_arange_ops.h>
92
+ #include <ATen/ops/_dirichlet_grad_ops.h>
93
+ #include <ATen/ops/_efficient_attention_backward_ops.h>
94
+ #include <ATen/ops/_efficient_attention_forward_ops.h>
95
+ #include <ATen/ops/_efficientzerotensor_ops.h>
96
+ #include <ATen/ops/_embedding_bag_ops.h>
97
+ #include <ATen/ops/_embedding_bag_backward_ops.h>
98
+ #include <ATen/ops/_embedding_bag_dense_backward_ops.h>
99
+ #include <ATen/ops/_embedding_bag_forward_only_ops.h>
100
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
101
+ #include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
102
+ #include <ATen/ops/_empty_affine_quantized_ops.h>
103
+ #include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
104
+ #include <ATen/ops/_euclidean_dist_ops.h>
105
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h>
106
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_ops.h>
107
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h>
108
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h>
109
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h>
110
+ #include <ATen/ops/_fft_c2c_ops.h>
111
+ #include <ATen/ops/_fft_c2r_ops.h>
112
+ #include <ATen/ops/_fft_r2c_ops.h>
113
+ #include <ATen/ops/_fill_mem_eff_dropout_mask_ops.h>
114
+ #include <ATen/ops/_flash_attention_backward_ops.h>
115
+ #include <ATen/ops/_flash_attention_forward_ops.h>
116
+ #include <ATen/ops/_foobar_ops.h>
117
+ #include <ATen/ops/_foreach_abs_ops.h>
118
+ #include <ATen/ops/_foreach_acos_ops.h>
119
+ #include <ATen/ops/_foreach_add_ops.h>
120
+ #include <ATen/ops/_foreach_addcdiv_ops.h>
121
+ #include <ATen/ops/_foreach_addcmul_ops.h>
122
+ #include <ATen/ops/_foreach_asin_ops.h>
123
+ #include <ATen/ops/_foreach_atan_ops.h>
124
+ #include <ATen/ops/_foreach_ceil_ops.h>
125
+ #include <ATen/ops/_foreach_clamp_max_ops.h>
126
+ #include <ATen/ops/_foreach_clamp_min_ops.h>
127
+ #include <ATen/ops/_foreach_copy_ops.h>
128
+ #include <ATen/ops/_foreach_cos_ops.h>
129
+ #include <ATen/ops/_foreach_cosh_ops.h>
130
+ #include <ATen/ops/_foreach_div_ops.h>
131
+ #include <ATen/ops/_foreach_erf_ops.h>
132
+ #include <ATen/ops/_foreach_erfc_ops.h>
133
+ #include <ATen/ops/_foreach_exp_ops.h>
134
+ #include <ATen/ops/_foreach_expm1_ops.h>
135
+ #include <ATen/ops/_foreach_floor_ops.h>
136
+ #include <ATen/ops/_foreach_frac_ops.h>
137
+ #include <ATen/ops/_foreach_lerp_ops.h>
138
+ #include <ATen/ops/_foreach_lgamma_ops.h>
139
+ #include <ATen/ops/_foreach_log_ops.h>
140
+ #include <ATen/ops/_foreach_log10_ops.h>
141
+ #include <ATen/ops/_foreach_log1p_ops.h>
142
+ #include <ATen/ops/_foreach_log2_ops.h>
143
+ #include <ATen/ops/_foreach_maximum_ops.h>
144
+ #include <ATen/ops/_foreach_minimum_ops.h>
145
+ #include <ATen/ops/_foreach_mul_ops.h>
146
+ #include <ATen/ops/_foreach_neg_ops.h>
147
+ #include <ATen/ops/_foreach_norm_ops.h>
148
+ #include <ATen/ops/_foreach_pow_ops.h>
149
+ #include <ATen/ops/_foreach_reciprocal_ops.h>
150
+ #include <ATen/ops/_foreach_round_ops.h>
151
+ #include <ATen/ops/_foreach_sigmoid_ops.h>
152
+ #include <ATen/ops/_foreach_sign_ops.h>
153
+ #include <ATen/ops/_foreach_sin_ops.h>
154
+ #include <ATen/ops/_foreach_sinh_ops.h>
155
+ #include <ATen/ops/_foreach_sqrt_ops.h>
156
+ #include <ATen/ops/_foreach_sub_ops.h>
157
+ #include <ATen/ops/_foreach_tan_ops.h>
158
+ #include <ATen/ops/_foreach_tanh_ops.h>
159
+ #include <ATen/ops/_foreach_trunc_ops.h>
160
+ #include <ATen/ops/_foreach_zero_ops.h>
161
+ #include <ATen/ops/_functional_assert_async_ops.h>
162
+ #include <ATen/ops/_functional_sym_constrain_range_ops.h>
163
+ #include <ATen/ops/_functional_sym_constrain_range_for_size_ops.h>
164
+ #include <ATen/ops/_fused_adam_ops.h>
165
+ #include <ATen/ops/_fused_adamw_ops.h>
166
+ #include <ATen/ops/_fused_dropout_ops.h>
167
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h>
168
+ #include <ATen/ops/_fused_sdp_choice_ops.h>
169
+ #include <ATen/ops/_fw_primal_ops.h>
170
+ #include <ATen/ops/_fw_primal_copy_ops.h>
171
+ #include <ATen/ops/_gather_sparse_backward_ops.h>
172
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h>
173
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h>
174
+ #include <ATen/ops/_has_compatible_shallow_copy_type_ops.h>
175
+ #include <ATen/ops/_has_same_storage_numel_ops.h>
176
+ #include <ATen/ops/_histogramdd_bin_edges_ops.h>
177
+ #include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
178
+ #include <ATen/ops/_histogramdd_from_bin_tensors_ops.h>
179
+ #include <ATen/ops/_index_put_impl_ops.h>
180
+ #include <ATen/ops/_indices_ops.h>
181
+ #include <ATen/ops/_indices_copy_ops.h>
182
+ #include <ATen/ops/_int_mm_ops.h>
183
+ #include <ATen/ops/_is_all_true_ops.h>
184
+ #include <ATen/ops/_is_any_true_ops.h>
185
+ #include <ATen/ops/_is_zerotensor_ops.h>
186
+ #include <ATen/ops/_linalg_check_errors_ops.h>
187
+ #include <ATen/ops/_linalg_det_ops.h>
188
+ #include <ATen/ops/_linalg_eigh_ops.h>
189
+ #include <ATen/ops/_linalg_slogdet_ops.h>
190
+ #include <ATen/ops/_linalg_solve_ex_ops.h>
191
+ #include <ATen/ops/_linalg_svd_ops.h>
192
+ #include <ATen/ops/_local_scalar_dense_ops.h>
193
+ #include <ATen/ops/_log_softmax_ops.h>
194
+ #include <ATen/ops/_log_softmax_backward_data_ops.h>
195
+ #include <ATen/ops/_logcumsumexp_ops.h>
196
+ #include <ATen/ops/_lstm_mps_ops.h>
197
+ #include <ATen/ops/_lu_with_info_ops.h>
198
+ #include <ATen/ops/_make_dep_token_ops.h>
199
+ #include <ATen/ops/_make_dual_ops.h>
200
+ #include <ATen/ops/_make_dual_copy_ops.h>
201
+ #include <ATen/ops/_make_per_channel_quantized_tensor_ops.h>
202
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
203
+ #include <ATen/ops/_masked_scale_ops.h>
204
+ #include <ATen/ops/_masked_softmax_ops.h>
205
+ #include <ATen/ops/_masked_softmax_backward_ops.h>
206
+ #include <ATen/ops/_mixed_dtypes_linear_ops.h>
207
+ #include <ATen/ops/_mkldnn_reshape_ops.h>
208
+ #include <ATen/ops/_mkldnn_transpose_ops.h>
209
+ #include <ATen/ops/_mps_convolution_ops.h>
210
+ #include <ATen/ops/_mps_convolution_transpose_ops.h>
211
+ #include <ATen/ops/_native_batch_norm_legit_ops.h>
212
+ #include <ATen/ops/_native_batch_norm_legit_no_training_ops.h>
213
+ #include <ATen/ops/_native_multi_head_attention_ops.h>
214
+ #include <ATen/ops/_neg_view_ops.h>
215
+ #include <ATen/ops/_neg_view_copy_ops.h>
216
+ #include <ATen/ops/_nested_from_padded_ops.h>
217
+ #include <ATen/ops/_nested_from_padded_and_nested_example_ops.h>
218
+ #include <ATen/ops/_nested_select_backward_ops.h>
219
+ #include <ATen/ops/_nested_sum_backward_ops.h>
220
+ #include <ATen/ops/_nested_tensor_from_mask_ops.h>
221
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h>
222
+ #include <ATen/ops/_nested_tensor_from_tensor_list_ops.h>
223
+ #include <ATen/ops/_nested_tensor_size_ops.h>
224
+ #include <ATen/ops/_nested_tensor_softmax_with_shape_ops.h>
225
+ #include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
226
+ #include <ATen/ops/_nested_tensor_strides_ops.h>
227
+ #include <ATen/ops/_nested_view_from_buffer_ops.h>
228
+ #include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
229
+ #include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h>
230
+ #include <ATen/ops/_nnpack_available_ops.h>
231
+ #include <ATen/ops/_nnpack_spatial_convolution_ops.h>
232
+ #include <ATen/ops/_nnz_ops.h>
233
+ #include <ATen/ops/_pack_padded_sequence_ops.h>
234
+ #include <ATen/ops/_pack_padded_sequence_backward_ops.h>
235
+ #include <ATen/ops/_pad_circular_ops.h>
236
+ #include <ATen/ops/_pad_enum_ops.h>
237
+ #include <ATen/ops/_pad_packed_sequence_ops.h>
238
+ #include <ATen/ops/_pdist_backward_ops.h>
239
+ #include <ATen/ops/_pdist_forward_ops.h>
240
+ #include <ATen/ops/_pin_memory_ops.h>
241
+ #include <ATen/ops/_prelu_kernel_ops.h>
242
+ #include <ATen/ops/_prelu_kernel_backward_ops.h>
243
+ #include <ATen/ops/_propagate_xla_data_ops.h>
244
+ #include <ATen/ops/_remove_batch_dim_ops.h>
245
+ #include <ATen/ops/_reshape_alias_ops.h>
246
+ #include <ATen/ops/_reshape_alias_copy_ops.h>
247
+ #include <ATen/ops/_reshape_copy_ops.h>
248
+ #include <ATen/ops/_reshape_from_tensor_ops.h>
249
+ #include <ATen/ops/_resize_output_ops.h>
250
+ #include <ATen/ops/_rowwise_prune_ops.h>
251
+ #include <ATen/ops/_sample_dirichlet_ops.h>
252
+ #include <ATen/ops/_saturate_weight_to_fp16_ops.h>
253
+ #include <ATen/ops/_scaled_dot_product_attention_math_ops.h>
254
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_ops.h>
255
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h>
256
+ #include <ATen/ops/_scaled_dot_product_flash_attention_ops.h>
257
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h>
258
+ #include <ATen/ops/_scaled_mm_ops.h>
259
+ #include <ATen/ops/_segment_reduce_backward_ops.h>
260
+ #include <ATen/ops/_shape_as_tensor_ops.h>
261
+ #include <ATen/ops/_slow_conv2d_backward_ops.h>
262
+ #include <ATen/ops/_slow_conv2d_forward_ops.h>
263
+ #include <ATen/ops/_sobol_engine_draw_ops.h>
264
+ #include <ATen/ops/_sobol_engine_ff_ops.h>
265
+ #include <ATen/ops/_sobol_engine_initialize_state_ops.h>
266
+ #include <ATen/ops/_sobol_engine_scramble_ops.h>
267
+ #include <ATen/ops/_softmax_ops.h>
268
+ #include <ATen/ops/_softmax_backward_data_ops.h>
269
+ #include <ATen/ops/_sparse_addmm_ops.h>
270
+ #include <ATen/ops/_sparse_broadcast_to_ops.h>
271
+ #include <ATen/ops/_sparse_broadcast_to_copy_ops.h>
272
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h>
273
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h>
274
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h>
275
+ #include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h>
276
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
277
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
278
+ #include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h>
279
+ #include <ATen/ops/_sparse_csr_prod_ops.h>
280
+ #include <ATen/ops/_sparse_csr_sum_ops.h>
281
+ #include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
282
+ #include <ATen/ops/_sparse_log_softmax_ops.h>
283
+ #include <ATen/ops/_sparse_log_softmax_backward_data_ops.h>
284
+ #include <ATen/ops/_sparse_mask_projection_ops.h>
285
+ #include <ATen/ops/_sparse_mm_ops.h>
286
+ #include <ATen/ops/_sparse_mm_reduce_impl_ops.h>
287
+ #include <ATen/ops/_sparse_mm_reduce_impl_backward_ops.h>
288
+ #include <ATen/ops/_sparse_semi_structured_linear_ops.h>
289
+ #include <ATen/ops/_sparse_softmax_ops.h>
290
+ #include <ATen/ops/_sparse_softmax_backward_data_ops.h>
291
+ #include <ATen/ops/_sparse_sparse_matmul_ops.h>
292
+ #include <ATen/ops/_sparse_sum_ops.h>
293
+ #include <ATen/ops/_sparse_sum_backward_ops.h>
294
+ #include <ATen/ops/_spdiags_ops.h>
295
+ #include <ATen/ops/_stack_ops.h>
296
+ #include <ATen/ops/_standard_gamma_ops.h>
297
+ #include <ATen/ops/_standard_gamma_grad_ops.h>
298
+ #include <ATen/ops/_test_ambiguous_defaults_ops.h>
299
+ #include <ATen/ops/_test_autograd_multiple_dispatch_ops.h>
300
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
301
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h>
302
+ #include <ATen/ops/_test_check_tensor_ops.h>
303
+ #include <ATen/ops/_test_functorch_fallback_ops.h>
304
+ #include <ATen/ops/_test_optional_filled_intlist_ops.h>
305
+ #include <ATen/ops/_test_optional_floatlist_ops.h>
306
+ #include <ATen/ops/_test_optional_intlist_ops.h>
307
+ #include <ATen/ops/_test_serialization_subcmul_ops.h>
308
+ #include <ATen/ops/_test_string_default_ops.h>
309
+ #include <ATen/ops/_test_warn_in_autograd_ops.h>
310
+ #include <ATen/ops/_thnn_differentiable_gru_cell_backward_ops.h>
311
+ #include <ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h>
312
+ #include <ATen/ops/_thnn_fused_gru_cell_ops.h>
313
+ #include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h>
314
+ #include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
315
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_ops.h>
316
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
317
+ #include <ATen/ops/_to_copy_ops.h>
318
+ #include <ATen/ops/_to_cpu_ops.h>
319
+ #include <ATen/ops/_to_dense_ops.h>
320
+ #include <ATen/ops/_to_sparse_ops.h>
321
+ #include <ATen/ops/_to_sparse_bsc_ops.h>
322
+ #include <ATen/ops/_to_sparse_bsr_ops.h>
323
+ #include <ATen/ops/_to_sparse_csc_ops.h>
324
+ #include <ATen/ops/_to_sparse_csr_ops.h>
325
+ #include <ATen/ops/_to_sparse_semi_structured_ops.h>
326
+ #include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
327
+ #include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
328
+ #include <ATen/ops/_trilinear_ops.h>
329
+ #include <ATen/ops/_triton_multi_head_attention_ops.h>
330
+ #include <ATen/ops/_triton_scaled_dot_attention_ops.h>
331
+ #include <ATen/ops/_unique_ops.h>
332
+ #include <ATen/ops/_unique2_ops.h>
333
+ #include <ATen/ops/_unpack_dual_ops.h>
334
+ #include <ATen/ops/_unsafe_index_ops.h>
335
+ #include <ATen/ops/_unsafe_index_put_ops.h>
336
+ #include <ATen/ops/_unsafe_view_ops.h>
337
+ #include <ATen/ops/_upsample_bicubic2d_aa_ops.h>
338
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h>
339
+ #include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
340
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h>
341
+ #include <ATen/ops/_upsample_nearest_exact1d_ops.h>
342
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h>
343
+ #include <ATen/ops/_upsample_nearest_exact2d_ops.h>
344
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h>
345
+ #include <ATen/ops/_upsample_nearest_exact3d_ops.h>
346
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h>
347
+ #include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
348
+ #include <ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h>
349
+ #include <ATen/ops/_validate_compressed_sparse_indices_ops.h>
350
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args_ops.h>
351
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args_ops.h>
352
+ #include <ATen/ops/_validate_sparse_compressed_tensor_args_ops.h>
353
+ #include <ATen/ops/_validate_sparse_coo_tensor_args_ops.h>
354
+ #include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h>
355
+ #include <ATen/ops/_validate_sparse_csr_tensor_args_ops.h>
356
+ #include <ATen/ops/_values_ops.h>
357
+ #include <ATen/ops/_values_copy_ops.h>
358
+ #include <ATen/ops/_version_ops.h>
359
+ #include <ATen/ops/_weight_int4pack_mm_ops.h>
360
+ #include <ATen/ops/_weight_norm_ops.h>
361
+ #include <ATen/ops/_weight_norm_differentiable_backward_ops.h>
362
+ #include <ATen/ops/_weight_norm_interface_ops.h>
363
+ #include <ATen/ops/_weight_norm_interface_backward_ops.h>
364
+ #include <ATen/ops/abs_ops.h>
365
+ #include <ATen/ops/absolute_ops.h>
366
+ #include <ATen/ops/acos_ops.h>
367
+ #include <ATen/ops/acosh_ops.h>
368
+ #include <ATen/ops/adaptive_avg_pool1d_ops.h>
369
+ #include <ATen/ops/adaptive_avg_pool2d_ops.h>
370
+ #include <ATen/ops/adaptive_avg_pool3d_ops.h>
371
+ #include <ATen/ops/adaptive_avg_pool3d_backward_ops.h>
372
+ #include <ATen/ops/adaptive_max_pool1d_ops.h>
373
+ #include <ATen/ops/adaptive_max_pool2d_ops.h>
374
+ #include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
375
+ #include <ATen/ops/adaptive_max_pool3d_ops.h>
376
+ #include <ATen/ops/adaptive_max_pool3d_backward_ops.h>
377
+ #include <ATen/ops/add_ops.h>
378
+ #include <ATen/ops/addbmm_ops.h>
379
+ #include <ATen/ops/addcdiv_ops.h>
380
+ #include <ATen/ops/addcmul_ops.h>
381
+ #include <ATen/ops/addmm_ops.h>
382
+ #include <ATen/ops/addmv_ops.h>
383
+ #include <ATen/ops/addr_ops.h>
384
+ #include <ATen/ops/adjoint_ops.h>
385
+ #include <ATen/ops/affine_grid_generator_ops.h>
386
+ #include <ATen/ops/affine_grid_generator_backward_ops.h>
387
+ #include <ATen/ops/alias_ops.h>
388
+ #include <ATen/ops/alias_copy_ops.h>
389
+ #include <ATen/ops/align_as_ops.h>
390
+ #include <ATen/ops/align_tensors_ops.h>
391
+ #include <ATen/ops/align_to_ops.h>
392
+ #include <ATen/ops/all_ops.h>
393
+ #include <ATen/ops/allclose_ops.h>
394
+ #include <ATen/ops/alpha_dropout_ops.h>
395
+ #include <ATen/ops/amax_ops.h>
396
+ #include <ATen/ops/amin_ops.h>
397
+ #include <ATen/ops/aminmax_ops.h>
398
+ #include <ATen/ops/and_ops.h>
399
+ #include <ATen/ops/angle_ops.h>
400
+ #include <ATen/ops/any_ops.h>
401
+ #include <ATen/ops/arange_ops.h>
402
+ #include <ATen/ops/arccos_ops.h>
403
+ #include <ATen/ops/arccosh_ops.h>
404
+ #include <ATen/ops/arcsin_ops.h>
405
+ #include <ATen/ops/arcsinh_ops.h>
406
+ #include <ATen/ops/arctan_ops.h>
407
+ #include <ATen/ops/arctan2_ops.h>
408
+ #include <ATen/ops/arctanh_ops.h>
409
+ #include <ATen/ops/argmax_ops.h>
410
+ #include <ATen/ops/argmin_ops.h>
411
+ #include <ATen/ops/argsort_ops.h>
412
+ #include <ATen/ops/argwhere_ops.h>
413
+ #include <ATen/ops/as_strided_ops.h>
414
+ #include <ATen/ops/as_strided_copy_ops.h>
415
+ #include <ATen/ops/as_strided_scatter_ops.h>
416
+ #include <ATen/ops/asin_ops.h>
417
+ #include <ATen/ops/asinh_ops.h>
418
+ #include <ATen/ops/atan_ops.h>
419
+ #include <ATen/ops/atan2_ops.h>
420
+ #include <ATen/ops/atanh_ops.h>
421
+ #include <ATen/ops/atleast_1d_ops.h>
422
+ #include <ATen/ops/atleast_2d_ops.h>
423
+ #include <ATen/ops/atleast_3d_ops.h>
424
+ #include <ATen/ops/avg_pool1d_ops.h>
425
+ #include <ATen/ops/avg_pool2d_ops.h>
426
+ #include <ATen/ops/avg_pool2d_backward_ops.h>
427
+ #include <ATen/ops/avg_pool3d_ops.h>
428
+ #include <ATen/ops/avg_pool3d_backward_ops.h>
429
+ #include <ATen/ops/baddbmm_ops.h>
430
+ #include <ATen/ops/bartlett_window_ops.h>
431
+ #include <ATen/ops/batch_norm_ops.h>
432
+ #include <ATen/ops/batch_norm_backward_elemt_ops.h>
433
+ #include <ATen/ops/batch_norm_backward_reduce_ops.h>
434
+ #include <ATen/ops/batch_norm_elemt_ops.h>
435
+ #include <ATen/ops/batch_norm_gather_stats_ops.h>
436
+ #include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h>
437
+ #include <ATen/ops/batch_norm_stats_ops.h>
438
+ #include <ATen/ops/batch_norm_update_stats_ops.h>
439
+ #include <ATen/ops/bernoulli_ops.h>
440
+ #include <ATen/ops/bilinear_ops.h>
441
+ #include <ATen/ops/binary_cross_entropy_ops.h>
442
+ #include <ATen/ops/binary_cross_entropy_backward_ops.h>
443
+ #include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
444
+ #include <ATen/ops/bincount_ops.h>
445
+ #include <ATen/ops/binomial_ops.h>
446
+ #include <ATen/ops/bitwise_and_ops.h>
447
+ #include <ATen/ops/bitwise_left_shift_ops.h>
448
+ #include <ATen/ops/bitwise_not_ops.h>
449
+ #include <ATen/ops/bitwise_or_ops.h>
450
+ #include <ATen/ops/bitwise_right_shift_ops.h>
451
+ #include <ATen/ops/bitwise_xor_ops.h>
452
+ #include <ATen/ops/blackman_window_ops.h>
453
+ #include <ATen/ops/block_diag_ops.h>
454
+ #include <ATen/ops/bmm_ops.h>
455
+ #include <ATen/ops/broadcast_tensors_ops.h>
456
+ #include <ATen/ops/broadcast_to_ops.h>
457
+ #include <ATen/ops/bucketize_ops.h>
458
+ #include <ATen/ops/can_cast_ops.h>
459
+ #include <ATen/ops/cartesian_prod_ops.h>
460
+ #include <ATen/ops/cat_ops.h>
461
+ #include <ATen/ops/cauchy_ops.h>
462
+ #include <ATen/ops/ccol_indices_ops.h>
463
+ #include <ATen/ops/ccol_indices_copy_ops.h>
464
+ #include <ATen/ops/cdist_ops.h>
465
+ #include <ATen/ops/ceil_ops.h>
466
+ #include <ATen/ops/celu_ops.h>
467
+ #include <ATen/ops/chain_matmul_ops.h>
468
+ #include <ATen/ops/chalf_ops.h>
469
+ #include <ATen/ops/channel_shuffle_ops.h>
470
+ #include <ATen/ops/cholesky_ops.h>
471
+ #include <ATen/ops/cholesky_inverse_ops.h>
472
+ #include <ATen/ops/cholesky_solve_ops.h>
473
+ #include <ATen/ops/choose_qparams_optimized_ops.h>
474
+ #include <ATen/ops/chunk_ops.h>
475
+ #include <ATen/ops/clamp_ops.h>
476
+ #include <ATen/ops/clamp_max_ops.h>
477
+ #include <ATen/ops/clamp_min_ops.h>
478
+ #include <ATen/ops/clip_ops.h>
479
+ #include <ATen/ops/clone_ops.h>
480
+ #include <ATen/ops/coalesce_ops.h>
481
+ #include <ATen/ops/col2im_ops.h>
482
+ #include <ATen/ops/col_indices_ops.h>
483
+ #include <ATen/ops/col_indices_copy_ops.h>
484
+ #include <ATen/ops/column_stack_ops.h>
485
+ #include <ATen/ops/combinations_ops.h>
486
+ #include <ATen/ops/complex_ops.h>
487
+ #include <ATen/ops/concat_ops.h>
488
+ #include <ATen/ops/concatenate_ops.h>
489
+ #include <ATen/ops/conj_ops.h>
490
+ #include <ATen/ops/conj_physical_ops.h>
491
+ #include <ATen/ops/constant_pad_nd_ops.h>
492
+ #include <ATen/ops/contiguous_ops.h>
493
+ #include <ATen/ops/conv1d_ops.h>
494
+ #include <ATen/ops/conv2d_ops.h>
495
+ #include <ATen/ops/conv3d_ops.h>
496
+ #include <ATen/ops/conv_depthwise3d_ops.h>
497
+ #include <ATen/ops/conv_tbc_ops.h>
498
+ #include <ATen/ops/conv_tbc_backward_ops.h>
499
+ #include <ATen/ops/conv_transpose1d_ops.h>
500
+ #include <ATen/ops/conv_transpose2d_ops.h>
501
+ #include <ATen/ops/conv_transpose3d_ops.h>
502
+ #include <ATen/ops/convolution_ops.h>
503
+ #include <ATen/ops/convolution_backward_ops.h>
504
+ #include <ATen/ops/convolution_backward_overrideable_ops.h>
505
+ #include <ATen/ops/convolution_overrideable_ops.h>
506
+ #include <ATen/ops/copy_ops.h>
507
+ #include <ATen/ops/copy_sparse_to_sparse_ops.h>
508
+ #include <ATen/ops/copysign_ops.h>
509
+ #include <ATen/ops/corrcoef_ops.h>
510
+ #include <ATen/ops/cos_ops.h>
511
+ #include <ATen/ops/cosh_ops.h>
512
+ #include <ATen/ops/cosine_embedding_loss_ops.h>
513
+ #include <ATen/ops/cosine_similarity_ops.h>
514
+ #include <ATen/ops/count_nonzero_ops.h>
515
+ #include <ATen/ops/cov_ops.h>
516
+ #include <ATen/ops/cross_ops.h>
517
+ #include <ATen/ops/cross_entropy_loss_ops.h>
518
+ #include <ATen/ops/crow_indices_ops.h>
519
+ #include <ATen/ops/crow_indices_copy_ops.h>
520
+ #include <ATen/ops/ctc_loss_ops.h>
521
+ #include <ATen/ops/cudnn_affine_grid_generator_ops.h>
522
+ #include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h>
523
+ #include <ATen/ops/cudnn_batch_norm_ops.h>
524
+ #include <ATen/ops/cudnn_batch_norm_backward_ops.h>
525
+ #include <ATen/ops/cudnn_convolution_ops.h>
526
+ #include <ATen/ops/cudnn_convolution_add_relu_ops.h>
527
+ #include <ATen/ops/cudnn_convolution_relu_ops.h>
528
+ #include <ATen/ops/cudnn_convolution_transpose_ops.h>
529
+ #include <ATen/ops/cudnn_grid_sampler_ops.h>
530
+ #include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
531
+ #include <ATen/ops/cudnn_is_acceptable_ops.h>
532
+ #include <ATen/ops/cummax_ops.h>
533
+ #include <ATen/ops/cummaxmin_backward_ops.h>
534
+ #include <ATen/ops/cummin_ops.h>
535
+ #include <ATen/ops/cumprod_ops.h>
536
+ #include <ATen/ops/cumprod_backward_ops.h>
537
+ #include <ATen/ops/cumsum_ops.h>
538
+ #include <ATen/ops/cumulative_trapezoid_ops.h>
539
+ #include <ATen/ops/data_ops.h>
540
+ #include <ATen/ops/deg2rad_ops.h>
541
+ #include <ATen/ops/dense_dim_ops.h>
542
+ #include <ATen/ops/dequantize_ops.h>
543
+ #include <ATen/ops/det_ops.h>
544
+ #include <ATen/ops/detach_ops.h>
545
+ #include <ATen/ops/detach_copy_ops.h>
546
+ #include <ATen/ops/diag_ops.h>
547
+ #include <ATen/ops/diag_embed_ops.h>
548
+ #include <ATen/ops/diagflat_ops.h>
549
+ #include <ATen/ops/diagonal_ops.h>
550
+ #include <ATen/ops/diagonal_backward_ops.h>
551
+ #include <ATen/ops/diagonal_copy_ops.h>
552
+ #include <ATen/ops/diagonal_scatter_ops.h>
553
+ #include <ATen/ops/diff_ops.h>
554
+ #include <ATen/ops/digamma_ops.h>
555
+ #include <ATen/ops/dist_ops.h>
556
+ #include <ATen/ops/div_ops.h>
557
+ #include <ATen/ops/divide_ops.h>
558
+ #include <ATen/ops/dot_ops.h>
559
+ #include <ATen/ops/dropout_ops.h>
560
+ #include <ATen/ops/dsplit_ops.h>
561
+ #include <ATen/ops/dstack_ops.h>
562
+ #include <ATen/ops/einsum_ops.h>
563
+ #include <ATen/ops/elu_ops.h>
564
+ #include <ATen/ops/elu_backward_ops.h>
565
+ #include <ATen/ops/embedding_ops.h>
566
+ #include <ATen/ops/embedding_backward_ops.h>
567
+ #include <ATen/ops/embedding_bag_ops.h>
568
+ #include <ATen/ops/embedding_dense_backward_ops.h>
569
+ #include <ATen/ops/embedding_renorm_ops.h>
570
+ #include <ATen/ops/embedding_sparse_backward_ops.h>
571
+ #include <ATen/ops/empty_ops.h>
572
+ #include <ATen/ops/empty_like_ops.h>
573
+ #include <ATen/ops/empty_permuted_ops.h>
574
+ #include <ATen/ops/empty_quantized_ops.h>
575
+ #include <ATen/ops/empty_strided_ops.h>
576
+ #include <ATen/ops/eq_ops.h>
577
+ #include <ATen/ops/equal_ops.h>
578
+ #include <ATen/ops/erf_ops.h>
579
+ #include <ATen/ops/erfc_ops.h>
580
+ #include <ATen/ops/erfinv_ops.h>
581
+ #include <ATen/ops/exp_ops.h>
582
+ #include <ATen/ops/exp2_ops.h>
583
+ #include <ATen/ops/expand_ops.h>
584
+ #include <ATen/ops/expand_as_ops.h>
585
+ #include <ATen/ops/expand_copy_ops.h>
586
+ #include <ATen/ops/expm1_ops.h>
587
+ #include <ATen/ops/exponential_ops.h>
588
+ #include <ATen/ops/eye_ops.h>
589
+ #include <ATen/ops/fake_quantize_per_channel_affine_ops.h>
590
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h>
591
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h>
592
+ #include <ATen/ops/fake_quantize_per_tensor_affine_ops.h>
593
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
594
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_ops.h>
595
+ #include <ATen/ops/fbgemm_linear_fp16_weight_ops.h>
596
+ #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h>
597
+ #include <ATen/ops/fbgemm_linear_int8_weight_ops.h>
598
+ #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h>
599
+ #include <ATen/ops/fbgemm_linear_quantize_weight_ops.h>
600
+ #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h>
601
+ #include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h>
602
+ #include <ATen/ops/feature_alpha_dropout_ops.h>
603
+ #include <ATen/ops/feature_dropout_ops.h>
604
+ #include <ATen/ops/fft_fft_ops.h>
605
+ #include <ATen/ops/fft_fft2_ops.h>
606
+ #include <ATen/ops/fft_fftfreq_ops.h>
607
+ #include <ATen/ops/fft_fftn_ops.h>
608
+ #include <ATen/ops/fft_fftshift_ops.h>
609
+ #include <ATen/ops/fft_hfft_ops.h>
610
+ #include <ATen/ops/fft_hfft2_ops.h>
611
+ #include <ATen/ops/fft_hfftn_ops.h>
612
+ #include <ATen/ops/fft_ifft_ops.h>
613
+ #include <ATen/ops/fft_ifft2_ops.h>
614
+ #include <ATen/ops/fft_ifftn_ops.h>
615
+ #include <ATen/ops/fft_ifftshift_ops.h>
616
+ #include <ATen/ops/fft_ihfft_ops.h>
617
+ #include <ATen/ops/fft_ihfft2_ops.h>
618
+ #include <ATen/ops/fft_ihfftn_ops.h>
619
+ #include <ATen/ops/fft_irfft_ops.h>
620
+ #include <ATen/ops/fft_irfft2_ops.h>
621
+ #include <ATen/ops/fft_irfftn_ops.h>
622
+ #include <ATen/ops/fft_rfft_ops.h>
623
+ #include <ATen/ops/fft_rfft2_ops.h>
624
+ #include <ATen/ops/fft_rfftfreq_ops.h>
625
+ #include <ATen/ops/fft_rfftn_ops.h>
626
+ #include <ATen/ops/fill_ops.h>
627
+ #include <ATen/ops/fill_diagonal_ops.h>
628
+ #include <ATen/ops/fix_ops.h>
629
+ #include <ATen/ops/flatten_ops.h>
630
+ #include <ATen/ops/flatten_dense_tensors_ops.h>
631
+ #include <ATen/ops/flip_ops.h>
632
+ #include <ATen/ops/fliplr_ops.h>
633
+ #include <ATen/ops/flipud_ops.h>
634
+ #include <ATen/ops/float_power_ops.h>
635
+ #include <ATen/ops/floor_ops.h>
636
+ #include <ATen/ops/floor_divide_ops.h>
637
+ #include <ATen/ops/fmax_ops.h>
638
+ #include <ATen/ops/fmin_ops.h>
639
+ #include <ATen/ops/fmod_ops.h>
640
+ #include <ATen/ops/frac_ops.h>
641
+ #include <ATen/ops/fractional_max_pool2d_ops.h>
642
+ #include <ATen/ops/fractional_max_pool2d_backward_ops.h>
643
+ #include <ATen/ops/fractional_max_pool3d_ops.h>
644
+ #include <ATen/ops/fractional_max_pool3d_backward_ops.h>
645
+ #include <ATen/ops/frexp_ops.h>
646
+ #include <ATen/ops/frobenius_norm_ops.h>
647
+ #include <ATen/ops/from_file_ops.h>
648
+ #include <ATen/ops/full_ops.h>
649
+ #include <ATen/ops/full_like_ops.h>
650
+ #include <ATen/ops/fused_moving_avg_obs_fake_quant_ops.h>
651
+ #include <ATen/ops/gather_ops.h>
652
+ #include <ATen/ops/gather_backward_ops.h>
653
+ #include <ATen/ops/gcd_ops.h>
654
+ #include <ATen/ops/ge_ops.h>
655
+ #include <ATen/ops/gelu_ops.h>
656
+ #include <ATen/ops/gelu_backward_ops.h>
657
+ #include <ATen/ops/geometric_ops.h>
658
+ #include <ATen/ops/geqrf_ops.h>
659
+ #include <ATen/ops/ger_ops.h>
660
+ #include <ATen/ops/glu_ops.h>
661
+ #include <ATen/ops/glu_backward_ops.h>
662
+ #include <ATen/ops/glu_backward_jvp_ops.h>
663
+ #include <ATen/ops/glu_jvp_ops.h>
664
+ #include <ATen/ops/gradient_ops.h>
665
+ #include <ATen/ops/greater_ops.h>
666
+ #include <ATen/ops/greater_equal_ops.h>
667
+ #include <ATen/ops/grid_sampler_ops.h>
668
+ #include <ATen/ops/grid_sampler_2d_ops.h>
669
+ #include <ATen/ops/grid_sampler_2d_backward_ops.h>
670
+ #include <ATen/ops/grid_sampler_3d_ops.h>
671
+ #include <ATen/ops/grid_sampler_3d_backward_ops.h>
672
+ #include <ATen/ops/group_norm_ops.h>
673
+ #include <ATen/ops/gru_ops.h>
674
+ #include <ATen/ops/gru_cell_ops.h>
675
+ #include <ATen/ops/gt_ops.h>
676
+ #include <ATen/ops/hamming_window_ops.h>
677
+ #include <ATen/ops/hann_window_ops.h>
678
+ #include <ATen/ops/hardshrink_ops.h>
679
+ #include <ATen/ops/hardshrink_backward_ops.h>
680
+ #include <ATen/ops/hardsigmoid_ops.h>
681
+ #include <ATen/ops/hardsigmoid_backward_ops.h>
682
+ #include <ATen/ops/hardswish_ops.h>
683
+ #include <ATen/ops/hardswish_backward_ops.h>
684
+ #include <ATen/ops/hardtanh_ops.h>
685
+ #include <ATen/ops/hardtanh_backward_ops.h>
686
+ #include <ATen/ops/heaviside_ops.h>
687
+ #include <ATen/ops/hinge_embedding_loss_ops.h>
688
+ #include <ATen/ops/histc_ops.h>
689
+ #include <ATen/ops/histogram_ops.h>
690
+ #include <ATen/ops/histogramdd_ops.h>
691
+ #include <ATen/ops/hsplit_ops.h>
692
+ #include <ATen/ops/hspmm_ops.h>
693
+ #include <ATen/ops/hstack_ops.h>
694
+ #include <ATen/ops/huber_loss_ops.h>
695
+ #include <ATen/ops/huber_loss_backward_ops.h>
696
+ #include <ATen/ops/hypot_ops.h>
697
+ #include <ATen/ops/i0_ops.h>
698
+ #include <ATen/ops/igamma_ops.h>
699
+ #include <ATen/ops/igammac_ops.h>
700
+ #include <ATen/ops/im2col_ops.h>
701
+ #include <ATen/ops/imag_ops.h>
702
+ #include <ATen/ops/index_ops.h>
703
+ #include <ATen/ops/index_add_ops.h>
704
+ #include <ATen/ops/index_copy_ops.h>
705
+ #include <ATen/ops/index_fill_ops.h>
706
+ #include <ATen/ops/index_put_ops.h>
707
+ #include <ATen/ops/index_reduce_ops.h>
708
+ #include <ATen/ops/index_select_ops.h>
709
+ #include <ATen/ops/index_select_backward_ops.h>
710
+ #include <ATen/ops/indices_ops.h>
711
+ #include <ATen/ops/indices_copy_ops.h>
712
+ #include <ATen/ops/infinitely_differentiable_gelu_backward_ops.h>
713
+ #include <ATen/ops/inner_ops.h>
714
+ #include <ATen/ops/instance_norm_ops.h>
715
+ #include <ATen/ops/int_repr_ops.h>
716
+ #include <ATen/ops/inverse_ops.h>
717
+ #include <ATen/ops/is_coalesced_ops.h>
718
+ #include <ATen/ops/is_complex_ops.h>
719
+ #include <ATen/ops/is_conj_ops.h>
720
+ #include <ATen/ops/is_distributed_ops.h>
721
+ #include <ATen/ops/is_floating_point_ops.h>
722
+ #include <ATen/ops/is_inference_ops.h>
723
+ #include <ATen/ops/is_leaf_ops.h>
724
+ #include <ATen/ops/is_neg_ops.h>
725
+ #include <ATen/ops/is_nonzero_ops.h>
726
+ #include <ATen/ops/is_pinned_ops.h>
727
+ #include <ATen/ops/is_same_size_ops.h>
728
+ #include <ATen/ops/is_set_to_ops.h>
729
+ #include <ATen/ops/is_signed_ops.h>
730
+ #include <ATen/ops/is_vulkan_available_ops.h>
731
+ #include <ATen/ops/isclose_ops.h>
732
+ #include <ATen/ops/isfinite_ops.h>
733
+ #include <ATen/ops/isin_ops.h>
734
+ #include <ATen/ops/isinf_ops.h>
735
+ #include <ATen/ops/isnan_ops.h>
736
+ #include <ATen/ops/isneginf_ops.h>
737
+ #include <ATen/ops/isposinf_ops.h>
738
+ #include <ATen/ops/isreal_ops.h>
739
+ #include <ATen/ops/istft_ops.h>
740
+ #include <ATen/ops/item_ops.h>
741
+ #include <ATen/ops/kaiser_window_ops.h>
742
+ #include <ATen/ops/kl_div_ops.h>
743
+ #include <ATen/ops/kron_ops.h>
744
+ #include <ATen/ops/kthvalue_ops.h>
745
+ #include <ATen/ops/l1_loss_ops.h>
746
+ #include <ATen/ops/layer_norm_ops.h>
747
+ #include <ATen/ops/lcm_ops.h>
748
+ #include <ATen/ops/ldexp_ops.h>
749
+ #include <ATen/ops/le_ops.h>
750
+ #include <ATen/ops/leaky_relu_ops.h>
751
+ #include <ATen/ops/leaky_relu_backward_ops.h>
752
+ #include <ATen/ops/lerp_ops.h>
753
+ #include <ATen/ops/less_ops.h>
754
+ #include <ATen/ops/less_equal_ops.h>
755
+ #include <ATen/ops/lgamma_ops.h>
756
+ #include <ATen/ops/lift_ops.h>
757
+ #include <ATen/ops/lift_fresh_ops.h>
758
+ #include <ATen/ops/lift_fresh_copy_ops.h>
759
+ #include <ATen/ops/linalg_cholesky_ops.h>
760
+ #include <ATen/ops/linalg_cholesky_ex_ops.h>
761
+ #include <ATen/ops/linalg_cond_ops.h>
762
+ #include <ATen/ops/linalg_cross_ops.h>
763
+ #include <ATen/ops/linalg_det_ops.h>
764
+ #include <ATen/ops/linalg_diagonal_ops.h>
765
+ #include <ATen/ops/linalg_eig_ops.h>
766
+ #include <ATen/ops/linalg_eigh_ops.h>
767
+ #include <ATen/ops/linalg_eigvals_ops.h>
768
+ #include <ATen/ops/linalg_eigvalsh_ops.h>
769
+ #include <ATen/ops/linalg_householder_product_ops.h>
770
+ #include <ATen/ops/linalg_inv_ops.h>
771
+ #include <ATen/ops/linalg_inv_ex_ops.h>
772
+ #include <ATen/ops/linalg_ldl_factor_ops.h>
773
+ #include <ATen/ops/linalg_ldl_factor_ex_ops.h>
774
+ #include <ATen/ops/linalg_ldl_solve_ops.h>
775
+ #include <ATen/ops/linalg_lstsq_ops.h>
776
+ #include <ATen/ops/linalg_lu_ops.h>
777
+ #include <ATen/ops/linalg_lu_factor_ops.h>
778
+ #include <ATen/ops/linalg_lu_factor_ex_ops.h>
779
+ #include <ATen/ops/linalg_lu_solve_ops.h>
780
+ #include <ATen/ops/linalg_matmul_ops.h>
781
+ #include <ATen/ops/linalg_matrix_exp_ops.h>
782
+ #include <ATen/ops/linalg_matrix_norm_ops.h>
783
+ #include <ATen/ops/linalg_matrix_power_ops.h>
784
+ #include <ATen/ops/linalg_matrix_rank_ops.h>
785
+ #include <ATen/ops/linalg_multi_dot_ops.h>
786
+ #include <ATen/ops/linalg_norm_ops.h>
787
+ #include <ATen/ops/linalg_pinv_ops.h>
788
+ #include <ATen/ops/linalg_qr_ops.h>
789
+ #include <ATen/ops/linalg_slogdet_ops.h>
790
+ #include <ATen/ops/linalg_solve_ops.h>
791
+ #include <ATen/ops/linalg_solve_ex_ops.h>
792
+ #include <ATen/ops/linalg_solve_triangular_ops.h>
793
+ #include <ATen/ops/linalg_svd_ops.h>
794
+ #include <ATen/ops/linalg_svdvals_ops.h>
795
+ #include <ATen/ops/linalg_tensorinv_ops.h>
796
+ #include <ATen/ops/linalg_tensorsolve_ops.h>
797
+ #include <ATen/ops/linalg_vander_ops.h>
798
+ #include <ATen/ops/linalg_vecdot_ops.h>
799
+ #include <ATen/ops/linalg_vector_norm_ops.h>
800
+ #include <ATen/ops/linear_ops.h>
801
+ #include <ATen/ops/linear_backward_ops.h>
802
+ #include <ATen/ops/linspace_ops.h>
803
+ #include <ATen/ops/log_ops.h>
804
+ #include <ATen/ops/log10_ops.h>
805
+ #include <ATen/ops/log1p_ops.h>
806
+ #include <ATen/ops/log2_ops.h>
807
+ #include <ATen/ops/log_normal_ops.h>
808
+ #include <ATen/ops/log_sigmoid_ops.h>
809
+ #include <ATen/ops/log_sigmoid_backward_ops.h>
810
+ #include <ATen/ops/log_sigmoid_forward_ops.h>
811
+ #include <ATen/ops/log_softmax_ops.h>
812
+ #include <ATen/ops/logaddexp_ops.h>
813
+ #include <ATen/ops/logaddexp2_ops.h>
814
+ #include <ATen/ops/logcumsumexp_ops.h>
815
+ #include <ATen/ops/logdet_ops.h>
816
+ #include <ATen/ops/logical_and_ops.h>
817
+ #include <ATen/ops/logical_not_ops.h>
818
+ #include <ATen/ops/logical_or_ops.h>
819
+ #include <ATen/ops/logical_xor_ops.h>
820
+ #include <ATen/ops/logit_ops.h>
821
+ #include <ATen/ops/logit_backward_ops.h>
822
+ #include <ATen/ops/logspace_ops.h>
823
+ #include <ATen/ops/logsumexp_ops.h>
824
+ #include <ATen/ops/lshift_ops.h>
825
+ #include <ATen/ops/lstm_ops.h>
826
+ #include <ATen/ops/lstm_cell_ops.h>
827
+ #include <ATen/ops/lstm_mps_backward_ops.h>
828
+ #include <ATen/ops/lt_ops.h>
829
+ #include <ATen/ops/lu_solve_ops.h>
830
+ #include <ATen/ops/lu_unpack_ops.h>
831
+ #include <ATen/ops/mH_ops.h>
832
+ #include <ATen/ops/mT_ops.h>
833
+ #include <ATen/ops/margin_ranking_loss_ops.h>
834
+ #include <ATen/ops/masked_fill_ops.h>
835
+ #include <ATen/ops/masked_scatter_ops.h>
836
+ #include <ATen/ops/masked_scatter_backward_ops.h>
837
+ #include <ATen/ops/masked_select_ops.h>
838
+ #include <ATen/ops/masked_select_backward_ops.h>
839
+ #include <ATen/ops/matmul_ops.h>
840
+ #include <ATen/ops/matmul_backward_ops.h>
841
+ #include <ATen/ops/matrix_H_ops.h>
842
+ #include <ATen/ops/matrix_exp_ops.h>
843
+ #include <ATen/ops/matrix_exp_backward_ops.h>
844
+ #include <ATen/ops/matrix_power_ops.h>
845
+ #include <ATen/ops/max_ops.h>
846
+ #include <ATen/ops/max_pool1d_ops.h>
847
+ #include <ATen/ops/max_pool1d_with_indices_ops.h>
848
+ #include <ATen/ops/max_pool2d_ops.h>
849
+ #include <ATen/ops/max_pool2d_backward_ops.h>
850
+ #include <ATen/ops/max_pool2d_with_indices_ops.h>
851
+ #include <ATen/ops/max_pool2d_with_indices_backward_ops.h>
852
+ #include <ATen/ops/max_pool3d_ops.h>
853
+ #include <ATen/ops/max_pool3d_with_indices_ops.h>
854
+ #include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
855
+ #include <ATen/ops/max_unpool2d_ops.h>
856
+ #include <ATen/ops/max_unpool3d_ops.h>
857
+ #include <ATen/ops/maximum_ops.h>
858
+ #include <ATen/ops/mean_ops.h>
859
+ #include <ATen/ops/median_ops.h>
860
+ #include <ATen/ops/meshgrid_ops.h>
861
+ #include <ATen/ops/min_ops.h>
862
+ #include <ATen/ops/minimum_ops.h>
863
+ #include <ATen/ops/miopen_batch_norm_ops.h>
864
+ #include <ATen/ops/miopen_batch_norm_backward_ops.h>
865
+ #include <ATen/ops/miopen_convolution_ops.h>
866
+ #include <ATen/ops/miopen_convolution_add_relu_ops.h>
867
+ #include <ATen/ops/miopen_convolution_relu_ops.h>
868
+ #include <ATen/ops/miopen_convolution_transpose_ops.h>
869
+ #include <ATen/ops/miopen_depthwise_convolution_ops.h>
870
+ #include <ATen/ops/miopen_rnn_ops.h>
871
+ #include <ATen/ops/miopen_rnn_backward_ops.h>
872
+ #include <ATen/ops/mish_ops.h>
873
+ #include <ATen/ops/mish_backward_ops.h>
874
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h>
875
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h>
876
+ #include <ATen/ops/mkldnn_convolution_ops.h>
877
+ #include <ATen/ops/mkldnn_linear_ops.h>
878
+ #include <ATen/ops/mkldnn_linear_backward_ops.h>
879
+ #include <ATen/ops/mkldnn_linear_backward_input_ops.h>
880
+ #include <ATen/ops/mkldnn_linear_backward_weights_ops.h>
881
+ #include <ATen/ops/mkldnn_max_pool2d_ops.h>
882
+ #include <ATen/ops/mkldnn_max_pool2d_backward_ops.h>
883
+ #include <ATen/ops/mkldnn_max_pool3d_ops.h>
884
+ #include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
885
+ #include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h>
886
+ #include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
887
+ #include <ATen/ops/mkldnn_rnn_layer_ops.h>
888
+ #include <ATen/ops/mkldnn_rnn_layer_backward_ops.h>
889
+ #include <ATen/ops/mm_ops.h>
890
+ #include <ATen/ops/mode_ops.h>
891
+ #include <ATen/ops/moveaxis_ops.h>
892
+ #include <ATen/ops/movedim_ops.h>
893
+ #include <ATen/ops/mps_convolution_backward_ops.h>
894
+ #include <ATen/ops/mps_convolution_transpose_backward_ops.h>
895
+ #include <ATen/ops/mse_loss_ops.h>
896
+ #include <ATen/ops/mse_loss_backward_ops.h>
897
+ #include <ATen/ops/msort_ops.h>
898
+ #include <ATen/ops/mul_ops.h>
899
+ #include <ATen/ops/multi_margin_loss_ops.h>
900
+ #include <ATen/ops/multi_margin_loss_backward_ops.h>
901
+ #include <ATen/ops/multilabel_margin_loss_ops.h>
902
+ #include <ATen/ops/multilabel_margin_loss_backward_ops.h>
903
+ #include <ATen/ops/multilabel_margin_loss_forward_ops.h>
904
+ #include <ATen/ops/multinomial_ops.h>
905
+ #include <ATen/ops/multiply_ops.h>
906
+ #include <ATen/ops/mv_ops.h>
907
+ #include <ATen/ops/mvlgamma_ops.h>
908
+ #include <ATen/ops/nan_to_num_ops.h>
909
+ #include <ATen/ops/nanmean_ops.h>
910
+ #include <ATen/ops/nanmedian_ops.h>
911
+ #include <ATen/ops/nanquantile_ops.h>
912
+ #include <ATen/ops/nansum_ops.h>
913
+ #include <ATen/ops/narrow_ops.h>
914
+ #include <ATen/ops/narrow_copy_ops.h>
915
+ #include <ATen/ops/native_batch_norm_ops.h>
916
+ #include <ATen/ops/native_batch_norm_backward_ops.h>
917
+ #include <ATen/ops/native_channel_shuffle_ops.h>
918
+ #include <ATen/ops/native_dropout_ops.h>
919
+ #include <ATen/ops/native_dropout_backward_ops.h>
920
+ #include <ATen/ops/native_group_norm_ops.h>
921
+ #include <ATen/ops/native_group_norm_backward_ops.h>
922
+ #include <ATen/ops/native_layer_norm_ops.h>
923
+ #include <ATen/ops/native_layer_norm_backward_ops.h>
924
+ #include <ATen/ops/native_norm_ops.h>
925
+ #include <ATen/ops/ne_ops.h>
926
+ #include <ATen/ops/neg_ops.h>
927
+ #include <ATen/ops/negative_ops.h>
928
+ #include <ATen/ops/nested_to_padded_tensor_ops.h>
929
+ #include <ATen/ops/new_empty_ops.h>
930
+ #include <ATen/ops/new_empty_strided_ops.h>
931
+ #include <ATen/ops/new_full_ops.h>
932
+ #include <ATen/ops/new_ones_ops.h>
933
+ #include <ATen/ops/new_zeros_ops.h>
934
+ #include <ATen/ops/nextafter_ops.h>
935
+ #include <ATen/ops/nll_loss_ops.h>
936
+ #include <ATen/ops/nll_loss2d_ops.h>
937
+ #include <ATen/ops/nll_loss2d_backward_ops.h>
938
+ #include <ATen/ops/nll_loss2d_forward_ops.h>
939
+ #include <ATen/ops/nll_loss_backward_ops.h>
940
+ #include <ATen/ops/nll_loss_forward_ops.h>
941
+ #include <ATen/ops/nll_loss_nd_ops.h>
942
+ #include <ATen/ops/nonzero_ops.h>
943
+ #include <ATen/ops/nonzero_numpy_ops.h>
944
+ #include <ATen/ops/nonzero_static_ops.h>
945
+ #include <ATen/ops/norm_ops.h>
946
+ #include <ATen/ops/norm_except_dim_ops.h>
947
+ #include <ATen/ops/normal_ops.h>
948
+ #include <ATen/ops/not_equal_ops.h>
949
+ #include <ATen/ops/nuclear_norm_ops.h>
950
+ #include <ATen/ops/numpy_T_ops.h>
951
+ #include <ATen/ops/one_hot_ops.h>
952
+ #include <ATen/ops/ones_ops.h>
953
+ #include <ATen/ops/ones_like_ops.h>
954
+ #include <ATen/ops/or_ops.h>
955
+ #include <ATen/ops/orgqr_ops.h>
956
+ #include <ATen/ops/ormqr_ops.h>
957
+ #include <ATen/ops/outer_ops.h>
958
+ #include <ATen/ops/output_nr_ops.h>
959
+ #include <ATen/ops/pad_ops.h>
960
+ #include <ATen/ops/pad_sequence_ops.h>
961
+ #include <ATen/ops/pairwise_distance_ops.h>
962
+ #include <ATen/ops/pdist_ops.h>
963
+ #include <ATen/ops/permute_ops.h>
964
+ #include <ATen/ops/permute_copy_ops.h>
965
+ #include <ATen/ops/pin_memory_ops.h>
966
+ #include <ATen/ops/pinverse_ops.h>
967
+ #include <ATen/ops/pixel_shuffle_ops.h>
968
+ #include <ATen/ops/pixel_unshuffle_ops.h>
969
+ #include <ATen/ops/poisson_ops.h>
970
+ #include <ATen/ops/poisson_nll_loss_ops.h>
971
+ #include <ATen/ops/polar_ops.h>
972
+ #include <ATen/ops/polygamma_ops.h>
973
+ #include <ATen/ops/positive_ops.h>
974
+ #include <ATen/ops/pow_ops.h>
975
+ #include <ATen/ops/prelu_ops.h>
976
+ #include <ATen/ops/prod_ops.h>
977
+ #include <ATen/ops/promote_types_ops.h>
978
+ #include <ATen/ops/put_ops.h>
979
+ #include <ATen/ops/q_per_channel_axis_ops.h>
980
+ #include <ATen/ops/q_per_channel_scales_ops.h>
981
+ #include <ATen/ops/q_per_channel_zero_points_ops.h>
982
+ #include <ATen/ops/q_scale_ops.h>
983
+ #include <ATen/ops/q_zero_point_ops.h>
984
+ #include <ATen/ops/qr_ops.h>
985
+ #include <ATen/ops/qscheme_ops.h>
986
+ #include <ATen/ops/quantile_ops.h>
987
+ #include <ATen/ops/quantize_per_channel_ops.h>
988
+ #include <ATen/ops/quantize_per_tensor_ops.h>
989
+ #include <ATen/ops/quantize_per_tensor_dynamic_ops.h>
990
+ #include <ATen/ops/quantized_batch_norm_ops.h>
991
+ #include <ATen/ops/quantized_gru_cell_ops.h>
992
+ #include <ATen/ops/quantized_lstm_cell_ops.h>
993
+ #include <ATen/ops/quantized_max_pool1d_ops.h>
994
+ #include <ATen/ops/quantized_max_pool2d_ops.h>
995
+ #include <ATen/ops/quantized_max_pool3d_ops.h>
996
+ #include <ATen/ops/quantized_rnn_relu_cell_ops.h>
997
+ #include <ATen/ops/quantized_rnn_tanh_cell_ops.h>
998
+ #include <ATen/ops/rad2deg_ops.h>
999
+ #include <ATen/ops/rand_ops.h>
1000
+ #include <ATen/ops/rand_like_ops.h>
1001
+ #include <ATen/ops/randint_ops.h>
1002
+ #include <ATen/ops/randint_like_ops.h>
1003
+ #include <ATen/ops/randn_ops.h>
1004
+ #include <ATen/ops/randn_like_ops.h>
1005
+ #include <ATen/ops/random_ops.h>
1006
+ #include <ATen/ops/randperm_ops.h>
1007
+ #include <ATen/ops/range_ops.h>
1008
+ #include <ATen/ops/ravel_ops.h>
1009
+ #include <ATen/ops/real_ops.h>
1010
+ #include <ATen/ops/reciprocal_ops.h>
1011
+ #include <ATen/ops/record_stream_ops.h>
1012
+ #include <ATen/ops/refine_names_ops.h>
1013
+ #include <ATen/ops/reflection_pad1d_ops.h>
1014
+ #include <ATen/ops/reflection_pad1d_backward_ops.h>
1015
+ #include <ATen/ops/reflection_pad2d_ops.h>
1016
+ #include <ATen/ops/reflection_pad2d_backward_ops.h>
1017
+ #include <ATen/ops/reflection_pad3d_ops.h>
1018
+ #include <ATen/ops/reflection_pad3d_backward_ops.h>
1019
+ #include <ATen/ops/relu_ops.h>
1020
+ #include <ATen/ops/relu6_ops.h>
1021
+ #include <ATen/ops/remainder_ops.h>
1022
+ #include <ATen/ops/rename_ops.h>
1023
+ #include <ATen/ops/renorm_ops.h>
1024
+ #include <ATen/ops/repeat_ops.h>
1025
+ #include <ATen/ops/repeat_interleave_ops.h>
1026
+ #include <ATen/ops/replication_pad1d_ops.h>
1027
+ #include <ATen/ops/replication_pad1d_backward_ops.h>
1028
+ #include <ATen/ops/replication_pad2d_ops.h>
1029
+ #include <ATen/ops/replication_pad2d_backward_ops.h>
1030
+ #include <ATen/ops/replication_pad3d_ops.h>
1031
+ #include <ATen/ops/replication_pad3d_backward_ops.h>
1032
+ #include <ATen/ops/requires_grad_ops.h>
1033
+ #include <ATen/ops/reshape_ops.h>
1034
+ #include <ATen/ops/reshape_as_ops.h>
1035
+ #include <ATen/ops/resize_ops.h>
1036
+ #include <ATen/ops/resize_as_ops.h>
1037
+ #include <ATen/ops/resize_as_sparse_ops.h>
1038
+ #include <ATen/ops/resolve_conj_ops.h>
1039
+ #include <ATen/ops/resolve_neg_ops.h>
1040
+ #include <ATen/ops/result_type_ops.h>
1041
+ #include <ATen/ops/retain_grad_ops.h>
1042
+ #include <ATen/ops/retains_grad_ops.h>
1043
+ #include <ATen/ops/rnn_relu_ops.h>
1044
+ #include <ATen/ops/rnn_relu_cell_ops.h>
1045
+ #include <ATen/ops/rnn_tanh_ops.h>
1046
+ #include <ATen/ops/rnn_tanh_cell_ops.h>
1047
+ #include <ATen/ops/roll_ops.h>
1048
+ #include <ATen/ops/rot90_ops.h>
1049
+ #include <ATen/ops/round_ops.h>
1050
+ #include <ATen/ops/row_indices_ops.h>
1051
+ #include <ATen/ops/row_indices_copy_ops.h>
1052
+ #include <ATen/ops/row_stack_ops.h>
1053
+ #include <ATen/ops/rrelu_ops.h>
1054
+ #include <ATen/ops/rrelu_with_noise_ops.h>
1055
+ #include <ATen/ops/rrelu_with_noise_backward_ops.h>
1056
+ #include <ATen/ops/rshift_ops.h>
1057
+ #include <ATen/ops/rsqrt_ops.h>
1058
+ #include <ATen/ops/rsub_ops.h>
1059
+ #include <ATen/ops/scalar_tensor_ops.h>
1060
+ #include <ATen/ops/scaled_dot_product_attention_ops.h>
1061
+ #include <ATen/ops/scatter_ops.h>
1062
+ #include <ATen/ops/scatter_add_ops.h>
1063
+ #include <ATen/ops/scatter_reduce_ops.h>
1064
+ #include <ATen/ops/searchsorted_ops.h>
1065
+ #include <ATen/ops/segment_reduce_ops.h>
1066
+ #include <ATen/ops/select_ops.h>
1067
+ #include <ATen/ops/select_backward_ops.h>
1068
+ #include <ATen/ops/select_copy_ops.h>
1069
+ #include <ATen/ops/select_scatter_ops.h>
1070
+ #include <ATen/ops/selu_ops.h>
1071
+ #include <ATen/ops/set_ops.h>
1072
+ #include <ATen/ops/set_data_ops.h>
1073
+ #include <ATen/ops/sgn_ops.h>
1074
+ #include <ATen/ops/sigmoid_ops.h>
1075
+ #include <ATen/ops/sigmoid_backward_ops.h>
1076
+ #include <ATen/ops/sign_ops.h>
1077
+ #include <ATen/ops/signbit_ops.h>
1078
+ #include <ATen/ops/silu_ops.h>
1079
+ #include <ATen/ops/silu_backward_ops.h>
1080
+ #include <ATen/ops/sin_ops.h>
1081
+ #include <ATen/ops/sinc_ops.h>
1082
+ #include <ATen/ops/sinh_ops.h>
1083
+ #include <ATen/ops/size_ops.h>
1084
+ #include <ATen/ops/slice_ops.h>
1085
+ #include <ATen/ops/slice_backward_ops.h>
1086
+ #include <ATen/ops/slice_copy_ops.h>
1087
+ #include <ATen/ops/slice_scatter_ops.h>
1088
+ #include <ATen/ops/slogdet_ops.h>
1089
+ #include <ATen/ops/slow_conv3d_ops.h>
1090
+ #include <ATen/ops/slow_conv3d_forward_ops.h>
1091
+ #include <ATen/ops/slow_conv_dilated2d_ops.h>
1092
+ #include <ATen/ops/slow_conv_dilated3d_ops.h>
1093
+ #include <ATen/ops/slow_conv_transpose2d_ops.h>
1094
+ #include <ATen/ops/slow_conv_transpose3d_ops.h>
1095
+ #include <ATen/ops/smm_ops.h>
1096
+ #include <ATen/ops/smooth_l1_loss_ops.h>
1097
+ #include <ATen/ops/smooth_l1_loss_backward_ops.h>
1098
+ #include <ATen/ops/soft_margin_loss_ops.h>
1099
+ #include <ATen/ops/soft_margin_loss_backward_ops.h>
1100
+ #include <ATen/ops/softmax_ops.h>
1101
+ #include <ATen/ops/softplus_ops.h>
1102
+ #include <ATen/ops/softplus_backward_ops.h>
1103
+ #include <ATen/ops/softshrink_ops.h>
1104
+ #include <ATen/ops/softshrink_backward_ops.h>
1105
+ #include <ATen/ops/sort_ops.h>
1106
+ #include <ATen/ops/sparse_bsc_tensor_ops.h>
1107
+ #include <ATen/ops/sparse_bsr_tensor_ops.h>
1108
+ #include <ATen/ops/sparse_compressed_tensor_ops.h>
1109
+ #include <ATen/ops/sparse_coo_tensor_ops.h>
1110
+ #include <ATen/ops/sparse_csc_tensor_ops.h>
1111
+ #include <ATen/ops/sparse_csr_tensor_ops.h>
1112
+ #include <ATen/ops/sparse_dim_ops.h>
1113
+ #include <ATen/ops/sparse_mask_ops.h>
1114
+ #include <ATen/ops/sparse_resize_ops.h>
1115
+ #include <ATen/ops/sparse_resize_and_clear_ops.h>
1116
+ #include <ATen/ops/sparse_sampled_addmm_ops.h>
1117
+ #include <ATen/ops/special_airy_ai_ops.h>
1118
+ #include <ATen/ops/special_bessel_j0_ops.h>
1119
+ #include <ATen/ops/special_bessel_j1_ops.h>
1120
+ #include <ATen/ops/special_bessel_y0_ops.h>
1121
+ #include <ATen/ops/special_bessel_y1_ops.h>
1122
+ #include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
1123
+ #include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
1124
+ #include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
1125
+ #include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
1126
+ #include <ATen/ops/special_digamma_ops.h>
1127
+ #include <ATen/ops/special_entr_ops.h>
1128
+ #include <ATen/ops/special_erf_ops.h>
1129
+ #include <ATen/ops/special_erfc_ops.h>
1130
+ #include <ATen/ops/special_erfcx_ops.h>
1131
+ #include <ATen/ops/special_erfinv_ops.h>
1132
+ #include <ATen/ops/special_exp2_ops.h>
1133
+ #include <ATen/ops/special_expit_ops.h>
1134
+ #include <ATen/ops/special_expm1_ops.h>
1135
+ #include <ATen/ops/special_gammainc_ops.h>
1136
+ #include <ATen/ops/special_gammaincc_ops.h>
1137
+ #include <ATen/ops/special_gammaln_ops.h>
1138
+ #include <ATen/ops/special_hermite_polynomial_h_ops.h>
1139
+ #include <ATen/ops/special_hermite_polynomial_he_ops.h>
1140
+ #include <ATen/ops/special_i0_ops.h>
1141
+ #include <ATen/ops/special_i0e_ops.h>
1142
+ #include <ATen/ops/special_i1_ops.h>
1143
+ #include <ATen/ops/special_i1e_ops.h>
1144
+ #include <ATen/ops/special_laguerre_polynomial_l_ops.h>
1145
+ #include <ATen/ops/special_legendre_polynomial_p_ops.h>
1146
+ #include <ATen/ops/special_log1p_ops.h>
1147
+ #include <ATen/ops/special_log_ndtr_ops.h>
1148
+ #include <ATen/ops/special_log_softmax_ops.h>
1149
+ #include <ATen/ops/special_logit_ops.h>
1150
+ #include <ATen/ops/special_logsumexp_ops.h>
1151
+ #include <ATen/ops/special_modified_bessel_i0_ops.h>
1152
+ #include <ATen/ops/special_modified_bessel_i1_ops.h>
1153
+ #include <ATen/ops/special_modified_bessel_k0_ops.h>
1154
+ #include <ATen/ops/special_modified_bessel_k1_ops.h>
1155
+ #include <ATen/ops/special_multigammaln_ops.h>
1156
+ #include <ATen/ops/special_ndtr_ops.h>
1157
+ #include <ATen/ops/special_ndtri_ops.h>
1158
+ #include <ATen/ops/special_polygamma_ops.h>
1159
+ #include <ATen/ops/special_psi_ops.h>
1160
+ #include <ATen/ops/special_round_ops.h>
1161
+ #include <ATen/ops/special_scaled_modified_bessel_k0_ops.h>
1162
+ #include <ATen/ops/special_scaled_modified_bessel_k1_ops.h>
1163
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
1164
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
1165
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
1166
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
1167
+ #include <ATen/ops/special_sinc_ops.h>
1168
+ #include <ATen/ops/special_softmax_ops.h>
1169
+ #include <ATen/ops/special_spherical_bessel_j0_ops.h>
1170
+ #include <ATen/ops/special_xlog1py_ops.h>
1171
+ #include <ATen/ops/special_xlogy_ops.h>
1172
+ #include <ATen/ops/special_zeta_ops.h>
1173
+ #include <ATen/ops/split_ops.h>
1174
+ #include <ATen/ops/split_copy_ops.h>
1175
+ #include <ATen/ops/split_with_sizes_ops.h>
1176
+ #include <ATen/ops/split_with_sizes_copy_ops.h>
1177
+ #include <ATen/ops/sqrt_ops.h>
1178
+ #include <ATen/ops/square_ops.h>
1179
+ #include <ATen/ops/squeeze_ops.h>
1180
+ #include <ATen/ops/squeeze_copy_ops.h>
1181
+ #include <ATen/ops/sspaddmm_ops.h>
1182
+ #include <ATen/ops/stack_ops.h>
1183
+ #include <ATen/ops/std_ops.h>
1184
+ #include <ATen/ops/std_mean_ops.h>
1185
+ #include <ATen/ops/stft_ops.h>
1186
+ #include <ATen/ops/stride_ops.h>
1187
+ #include <ATen/ops/sub_ops.h>
1188
+ #include <ATen/ops/subtract_ops.h>
1189
+ #include <ATen/ops/sum_ops.h>
1190
+ #include <ATen/ops/sum_to_size_ops.h>
1191
+ #include <ATen/ops/svd_ops.h>
1192
+ #include <ATen/ops/swapaxes_ops.h>
1193
+ #include <ATen/ops/swapdims_ops.h>
1194
+ #include <ATen/ops/sym_constrain_range_ops.h>
1195
+ #include <ATen/ops/sym_constrain_range_for_size_ops.h>
1196
+ #include <ATen/ops/sym_numel_ops.h>
1197
+ #include <ATen/ops/sym_size_ops.h>
1198
+ #include <ATen/ops/sym_storage_offset_ops.h>
1199
+ #include <ATen/ops/sym_stride_ops.h>
1200
+ #include <ATen/ops/t_ops.h>
1201
+ #include <ATen/ops/t_copy_ops.h>
1202
+ #include <ATen/ops/take_ops.h>
1203
+ #include <ATen/ops/take_along_dim_ops.h>
1204
+ #include <ATen/ops/tan_ops.h>
1205
+ #include <ATen/ops/tanh_ops.h>
1206
+ #include <ATen/ops/tanh_backward_ops.h>
1207
+ #include <ATen/ops/tensor_split_ops.h>
1208
+ #include <ATen/ops/tensordot_ops.h>
1209
+ #include <ATen/ops/thnn_conv2d_ops.h>
1210
+ #include <ATen/ops/threshold_ops.h>
1211
+ #include <ATen/ops/threshold_backward_ops.h>
1212
+ #include <ATen/ops/tile_ops.h>
1213
+ #include <ATen/ops/to_ops.h>
1214
+ #include <ATen/ops/to_dense_ops.h>
1215
+ #include <ATen/ops/to_dense_backward_ops.h>
1216
+ #include <ATen/ops/to_mkldnn_ops.h>
1217
+ #include <ATen/ops/to_mkldnn_backward_ops.h>
1218
+ #include <ATen/ops/to_padded_tensor_ops.h>
1219
+ #include <ATen/ops/to_sparse_ops.h>
1220
+ #include <ATen/ops/to_sparse_bsc_ops.h>
1221
+ #include <ATen/ops/to_sparse_bsr_ops.h>
1222
+ #include <ATen/ops/to_sparse_csc_ops.h>
1223
+ #include <ATen/ops/to_sparse_csr_ops.h>
1224
+ #include <ATen/ops/topk_ops.h>
1225
+ #include <ATen/ops/trace_ops.h>
1226
+ #include <ATen/ops/trace_backward_ops.h>
1227
+ #include <ATen/ops/transpose_ops.h>
1228
+ #include <ATen/ops/transpose_copy_ops.h>
1229
+ #include <ATen/ops/trapezoid_ops.h>
1230
+ #include <ATen/ops/trapz_ops.h>
1231
+ #include <ATen/ops/triangular_solve_ops.h>
1232
+ #include <ATen/ops/tril_ops.h>
1233
+ #include <ATen/ops/tril_indices_ops.h>
1234
+ #include <ATen/ops/triplet_margin_loss_ops.h>
1235
+ #include <ATen/ops/triu_ops.h>
1236
+ #include <ATen/ops/triu_indices_ops.h>
1237
+ #include <ATen/ops/true_divide_ops.h>
1238
+ #include <ATen/ops/trunc_ops.h>
1239
+ #include <ATen/ops/type_as_ops.h>
1240
+ #include <ATen/ops/unbind_ops.h>
1241
+ #include <ATen/ops/unbind_copy_ops.h>
1242
+ #include <ATen/ops/unflatten_ops.h>
1243
+ #include <ATen/ops/unflatten_dense_tensors_ops.h>
1244
+ #include <ATen/ops/unfold_ops.h>
1245
+ #include <ATen/ops/unfold_backward_ops.h>
1246
+ #include <ATen/ops/unfold_copy_ops.h>
1247
+ #include <ATen/ops/uniform_ops.h>
1248
+ #include <ATen/ops/unique_consecutive_ops.h>
1249
+ #include <ATen/ops/unique_dim_ops.h>
1250
+ #include <ATen/ops/unique_dim_consecutive_ops.h>
1251
+ #include <ATen/ops/unsafe_chunk_ops.h>
1252
+ #include <ATen/ops/unsafe_split_ops.h>
1253
+ #include <ATen/ops/unsafe_split_with_sizes_ops.h>
1254
+ #include <ATen/ops/unsqueeze_ops.h>
1255
+ #include <ATen/ops/unsqueeze_copy_ops.h>
1256
+ #include <ATen/ops/upsample_bicubic2d_ops.h>
1257
+ #include <ATen/ops/upsample_bicubic2d_backward_ops.h>
1258
+ #include <ATen/ops/upsample_bilinear2d_ops.h>
1259
+ #include <ATen/ops/upsample_bilinear2d_backward_ops.h>
1260
+ #include <ATen/ops/upsample_linear1d_ops.h>
1261
+ #include <ATen/ops/upsample_linear1d_backward_ops.h>
1262
+ #include <ATen/ops/upsample_nearest1d_ops.h>
1263
+ #include <ATen/ops/upsample_nearest1d_backward_ops.h>
1264
+ #include <ATen/ops/upsample_nearest2d_ops.h>
1265
+ #include <ATen/ops/upsample_nearest2d_backward_ops.h>
1266
+ #include <ATen/ops/upsample_nearest3d_ops.h>
1267
+ #include <ATen/ops/upsample_nearest3d_backward_ops.h>
1268
+ #include <ATen/ops/upsample_trilinear3d_ops.h>
1269
+ #include <ATen/ops/upsample_trilinear3d_backward_ops.h>
1270
+ #include <ATen/ops/value_selecting_reduction_backward_ops.h>
1271
+ #include <ATen/ops/values_ops.h>
1272
+ #include <ATen/ops/values_copy_ops.h>
1273
+ #include <ATen/ops/vander_ops.h>
1274
+ #include <ATen/ops/var_ops.h>
1275
+ #include <ATen/ops/var_mean_ops.h>
1276
+ #include <ATen/ops/vdot_ops.h>
1277
+ #include <ATen/ops/view_ops.h>
1278
+ #include <ATen/ops/view_as_ops.h>
1279
+ #include <ATen/ops/view_as_complex_ops.h>
1280
+ #include <ATen/ops/view_as_complex_copy_ops.h>
1281
+ #include <ATen/ops/view_as_real_ops.h>
1282
+ #include <ATen/ops/view_as_real_copy_ops.h>
1283
+ #include <ATen/ops/view_copy_ops.h>
1284
+ #include <ATen/ops/vsplit_ops.h>
1285
+ #include <ATen/ops/vstack_ops.h>
1286
+ #include <ATen/ops/where_ops.h>
1287
+ #include <ATen/ops/xlogy_ops.h>
1288
+ #include <ATen/ops/xor_ops.h>
1289
+ #include <ATen/ops/zero_ops.h>
1290
+ #include <ATen/ops/zeros_ops.h>
1291
+ #include <ATen/ops/zeros_like_ops.h>
1292
+
1293
+ // Extension writers: do you write wrapper functions? Are you frustrated with
1294
+ // resolving overloads of operators? Are you frustrated with dealing with
1295
+ // pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
1296
+ // further, this is the utility for you.
1297
+ //
1298
+ // Given an operator schema: aten::op.overload(...
1299
+ //
1300
+ // Use ATEN_FN2(op, overload) to get a *function* version of the operator
1301
+ // that is guaranteed to not be overloaded. This means that you can safely
1302
+ // decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
1303
+ //
1304
+ // Given an operator schema without an overload name: aten::op(...
1305
+ //
1306
+ // Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
1307
+ //
1308
+ // There is some interesting behavior for out= operations.
1309
+ // ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
1310
+ // that is, the order of arguments is exactly what it looks like in the schema.
1311
+
1312
+ #define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
1313
+ #define ATEN_FN(op_name) at::_ops::op_name::call
1314
+
1315
+ // Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
1316
+ // metadata about a given aten operator.
1317
+ // Notable data on the class includes:
1318
+ // - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
1319
+ // - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
1320
+ // - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
1321
+ // - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
1322
+
1323
+ #define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
1324
+ #define ATEN_OP(op_name) at::_ops::op_name
1325
+
1326
+ // WARNING: Please do not call any of the ops in the _ops namespace directly.
1327
+ // Use the ATEN_FN macros. We do not guarantee stability of the naming
1328
+ // scheme for the functions in at::_ops
1329
+
1330
+ // See Note [The ATen Operators API] for details of the at::_ops namespace
1331
+
1332
+ namespace at {
1333
+ namespace _ops {
1334
+
1335
+ } // namespace _ops
1336
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <c10/util/SmallVector.h>
5
+
6
+ namespace at {
7
+
8
+ template <class F>
9
+ inline void parallel_for(
10
+ const int64_t begin,
11
+ const int64_t end,
12
+ const int64_t grain_size,
13
+ const F& f) {
14
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0);
15
+ if (begin >= end) {
16
+ return;
17
+ }
18
+
19
+ #ifdef INTRA_OP_PARALLEL
20
+ at::internal::lazy_init_num_threads();
21
+ const auto numiter = end - begin;
22
+ const bool use_parallel =
23
+ (numiter > grain_size && numiter > 1 && !at::in_parallel_region() &&
24
+ at::get_num_threads() > 1);
25
+ if (!use_parallel) {
26
+ internal::ThreadIdGuard tid_guard(0);
27
+ f(begin, end);
28
+ return;
29
+ }
30
+
31
+ internal::invoke_parallel(begin, end, grain_size, f);
32
+ #else
33
+ internal::ThreadIdGuard tid_guard(0);
34
+ f(begin, end);
35
+ #endif
36
+ }
37
+
38
+ template <class scalar_t, class F, class SF>
39
+ inline scalar_t parallel_reduce(
40
+ const int64_t begin,
41
+ const int64_t end,
42
+ const int64_t grain_size,
43
+ const scalar_t ident,
44
+ const F& f,
45
+ const SF& sf) {
46
+ TORCH_CHECK(grain_size >= 0);
47
+ if (begin >= end) {
48
+ return ident;
49
+ }
50
+
51
+ #ifdef INTRA_OP_PARALLEL
52
+ at::internal::lazy_init_num_threads();
53
+ const auto max_threads = at::get_num_threads();
54
+ const bool use_parallel =
55
+ ((end - begin) > grain_size && !at::in_parallel_region() &&
56
+ max_threads > 1);
57
+ if (!use_parallel) {
58
+ internal::ThreadIdGuard tid_guard(0);
59
+ return f(begin, end, ident);
60
+ }
61
+
62
+ c10::SmallVector<scalar_t, 64> results(max_threads, ident);
63
+ internal::invoke_parallel(
64
+ begin,
65
+ end,
66
+ grain_size,
67
+ [&](const int64_t my_begin, const int64_t my_end) {
68
+ const auto tid = at::get_thread_num();
69
+ results[tid] = f(my_begin, my_end, ident);
70
+ });
71
+
72
+ scalar_t result = ident;
73
+ for (auto partial_result : results) {
74
+ result = sf(result, partial_result);
75
+ }
76
+ return result;
77
+ #else
78
+ internal::ThreadIdGuard tid_guard(0);
79
+ return f(begin, end, ident);
80
+ #endif
81
+ }
82
+
83
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <cstddef>
5
+ #include <exception>
6
+
7
+ #include <c10/util/Exception.h>
8
+
9
+ #define INTRA_OP_PARALLEL
10
+
11
+ namespace at::internal {
12
+
13
+ TORCH_API void invoke_parallel(
14
+ const int64_t begin,
15
+ const int64_t end,
16
+ const int64_t grain_size,
17
+ const std::function<void(int64_t, int64_t)>& f);
18
+
19
+ } // namespace at::internal
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <atomic>
5
+ #include <cstddef>
6
+ #include <exception>
7
+
8
+ #ifdef _OPENMP
9
+ #define INTRA_OP_PARALLEL
10
+
11
+ #include <omp.h>
12
+ #endif
13
+
14
+ namespace at {
15
+
16
+ #ifdef _OPENMP
17
+ namespace internal {
18
+ template <typename F>
19
+ inline void invoke_parallel(
20
+ int64_t begin,
21
+ int64_t end,
22
+ int64_t grain_size,
23
+ const F& f) {
24
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
25
+ std::exception_ptr eptr;
26
+
27
+ #pragma omp parallel
28
+ {
29
+ // choose number of tasks based on grain size and number of threads
30
+ // can't use num_threads clause due to bugs in GOMP's thread pool (See
31
+ // #32008)
32
+ int64_t num_threads = omp_get_num_threads();
33
+ if (grain_size > 0) {
34
+ num_threads = std::min(num_threads, divup((end - begin), grain_size));
35
+ }
36
+
37
+ int64_t tid = omp_get_thread_num();
38
+ int64_t chunk_size = divup((end - begin), num_threads);
39
+ int64_t begin_tid = begin + tid * chunk_size;
40
+ if (begin_tid < end) {
41
+ try {
42
+ internal::ThreadIdGuard tid_guard(tid);
43
+ f(begin_tid, std::min(end, chunk_size + begin_tid));
44
+ } catch (...) {
45
+ if (!err_flag.test_and_set()) {
46
+ eptr = std::current_exception();
47
+ }
48
+ }
49
+ }
50
+ }
51
+ if (eptr) {
52
+ std::rethrow_exception(eptr);
53
+ }
54
+ }
55
+ } // namespace internal
56
+ #endif // _OPENMP
57
+
58
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SafePyObject.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ namespace at::impl {
7
+
8
+ enum TorchFunctionDisabledState { ENABLED, SUBCLASSES_DISABLED, ALL_DISABLED };
9
+
10
+ struct TORCH_API PythonTorchFunctionTLS {
11
+ static void set_disabled_state(TorchFunctionDisabledState disabled_state_);
12
+ static TorchFunctionDisabledState get_disabled_state();
13
+
14
+ static void push_onto_stack(std::shared_ptr<SafePyObject> mode);
15
+ static const std::shared_ptr<SafePyObject> pop_stack();
16
+ static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
17
+ static int64_t stack_len();
18
+
19
+ static const PythonTorchFunctionTLS& get_state();
20
+ static void set_state(const PythonTorchFunctionTLS& state);
21
+
22
+ private:
23
+ // The mode TLS is split into
24
+ // - disabled_state, which says which part of torch function are disabled
25
+ // - stack_, which is a vector of modes representing the stack of user
26
+ // defined modes
27
+ TorchFunctionDisabledState disabled_state_ =
28
+ TorchFunctionDisabledState::ENABLED;
29
+ std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
30
+ };
31
+
32
+ TORCH_API bool torch_function_mode_enabled();
33
+
34
+ } // namespace at::impl
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SavedTensorHooks.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/python_stub.h>
6
+ #include <stack>
7
+ #include <string>
8
+
9
+ #include <utility>
10
+
11
+ namespace at {
12
+
13
+ namespace impl {
14
+
15
+ struct TORCH_API SavedTensorDefaultHooksTLS {
16
+ // PyObject is defined in c10/util/python_stub.h
17
+ std::stack<std::pair<PyObject*, PyObject*>> stack;
18
+
19
+ // See NOTE: [Disabling SavedTensorDefaultHooks] for context
20
+ // NOTE: [disabled_error_message invariant]
21
+ // disabled_error_message is nullopt IFF Saved Tensor hooks is enabled
22
+ // We did this for efficiency (so we didn't have to keep a separate bool
23
+ // around)
24
+ c10::optional<std::string> disabled_error_message;
25
+ };
26
+
27
+ } // namespace impl
28
+
29
+ struct TORCH_API SavedTensorDefaultHooks {
30
+ static void push_hooks(PyObject* pack_hook, PyObject* unpack_hook);
31
+ static void pop_hooks();
32
+ static std::pair<PyObject*, PyObject*> get_hooks();
33
+ static void lazy_initialize();
34
+ static std::stack<std::pair<PyObject*, PyObject*>> get_stack();
35
+ static void set_stack(std::stack<std::pair<PyObject*, PyObject*>>);
36
+
37
+ static const impl::SavedTensorDefaultHooksTLS& get_tls_state();
38
+ static void set_tls_state(const impl::SavedTensorDefaultHooksTLS& tls);
39
+
40
+ // NOTE: [Disabling SavedTensorDefaultHooks]
41
+ // A developer of a PyTorch feature may choose to disable SavedTensorDefault
42
+ // hooks, especially if their feature does not work with it. If they are
43
+ // disabled, then the following will raise an error:
44
+ // - Attempting to push_hooks
45
+ // - calling disable(message) with a non-zero stack (from get_stack) size
46
+ static void disable(const std::string& error_message);
47
+ static void enable();
48
+ static bool is_enabled();
49
+ static const c10::optional<std::string>& get_disabled_error_message();
50
+ };
51
+
52
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Scalar.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Scalar.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ATenGeneral.h> // for BC reasons
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/ScalarType.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Storage.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Storage.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Tensor.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorAccessor.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/TensorMeta.h>
4
+ #include <ATen/core/Dimname.h>
5
+ #include <ATen/core/Range.h>
6
+ #include <ATen/core/TensorBase.h>
7
+ #include <c10/core/DynamicCast.h>
8
+ #include <c10/util/FunctionRef.h>
9
+ #include <c10/util/MaybeOwned.h>
10
+ #include <c10/util/SmallVector.h>
11
+ #include <c10/util/TypeCast.h>
12
+ #include <c10/util/irange.h>
13
+
14
+ #include <array>
15
+ #include <bitset>
16
+
17
+ namespace at {
18
+ class Tensor;
19
+ class OptionalTensorRef;
20
+ using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
21
+ } // namespace at
22
+
23
+ // TensorIterator is a helper class for element-wise operations, such as
24
+ // arithmetic, comparisons, and trigonometric functions. It handles
25
+ // broadcasting and type conversions of operands.
26
+ //
27
+ // This is inspired by NumPy's Array Iterator API (NpyIter).
28
+ //
29
+ // The files Loops.h and Loops.cuh provide functions to build kernels that
30
+ // use TensorIterator.
31
+ //
32
+ // Example:
33
+ //
34
+ // auto iter = TensorIteratorConfig()
35
+ // .add_output(output)
36
+ // .add_input(input)
37
+ // .build()
38
+ //
39
+ // [MyKernel.cpp / MyKernel.cu]
40
+ // cpu_kernel(iter, [](float a, float b) {
41
+ // return a + b;
42
+ // });
43
+ //
44
+ // gpu_kernel(iter, []GPU_LAMBDA(float a, float b) -> float {
45
+ // return a + b;
46
+ // });
47
+ //
48
+ // Note [Order of Construction]
49
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50
+ // When setting up the tensor iterator configuration, the output Tensors
51
+ // have to be added first via
52
+ // TensorIteratorConfig::add_owned_output(at::Tensor). After adding all outputs,
53
+ // the inputs can be added via
54
+ // TensorIteratorConfig::add_owned_input(at::Tensor).
55
+ // Adding another output after inputs have been added will rise an exception.
56
+ //
57
+ // Note [Common Dtype Computation]
58
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
59
+ // Some operations have a natural notion of a "common dtype" or
60
+ // "computation dtype" where all inputs are cast to one dtype, the
61
+ // operation is performed, and then the results are cast to all outputs.
62
+ //
63
+ // TensorIterator infers a common dtype if all inputs have the same dtype,
64
+ // and it computes one using type promotion rules on its inputs if
65
+ // promote_inputs_to_common_dtype_ is true. Attempting to query
66
+ // a common dtype otherwise will throw an exception.
67
+ //
68
+ // Note that the outputs are not considered when computing a common dtype.
69
+
70
+ namespace at {
71
+
72
+ namespace internal {
73
+ // This parameter is heuristically chosen to determine the minimum number of
74
+ // work that warrants parallelism. For example, when summing an array, it is
75
+ // deemed inefficient to parallelise over arrays shorter than 32768. Further,
76
+ // no parallel algorithm (such as parallel_reduce) should split work into
77
+ // smaller than GRAIN_SIZE chunks.
78
+ constexpr int64_t GRAIN_SIZE = 32768;
79
+
80
+ // Storage for a non-owning Tensor, without needing to include Tensor.h
81
+ class TORCH_API OpaqueOptionalTensorRef {
82
+ alignas(alignof(TensorBase)) std::array<char, sizeof(TensorBase)> data_;
83
+
84
+ public:
85
+ OpaqueOptionalTensorRef();
86
+ OpaqueOptionalTensorRef(const OpaqueOptionalTensorRef&) = default;
87
+ OpaqueOptionalTensorRef& operator=(const OpaqueOptionalTensorRef&) = default;
88
+ OpaqueOptionalTensorRef(OpaqueOptionalTensorRef&&) noexcept = default;
89
+ OpaqueOptionalTensorRef& operator=(OpaqueOptionalTensorRef&&) noexcept =
90
+ default;
91
+ ~OpaqueOptionalTensorRef();
92
+
93
+ OptionalTensorRef* get() {
94
+ return reinterpret_cast<OptionalTensorRef*>(data_.data());
95
+ }
96
+ const OptionalTensorRef* get() const {
97
+ return reinterpret_cast<const OptionalTensorRef*>(data_.data());
98
+ }
99
+
100
+ OptionalTensorRef& operator*() {
101
+ return *get();
102
+ }
103
+ const OptionalTensorRef& operator*() const {
104
+ return *get();
105
+ }
106
+ OptionalTensorRef* operator->() {
107
+ return get();
108
+ }
109
+ const OptionalTensorRef* operator->() const {
110
+ return get();
111
+ }
112
+
113
+ const Tensor& getTensor() const;
114
+ };
115
+ } // namespace internal
116
+
117
+ struct TORCH_API OperandInfo {
118
+ using StrideVector = SmallVector<int64_t, 6>;
119
+ OperandInfo() = default;
120
+ C10_ALWAYS_INLINE explicit OperandInfo(c10::MaybeOwned<TensorBase>&& t) {
121
+ if (t->defined()) {
122
+ device = t->device();
123
+ target_dtype = t->scalar_type();
124
+ current_dtype = target_dtype;
125
+ }
126
+ tensor(std::move(t));
127
+ validate();
128
+ }
129
+
130
+ C10_ALWAYS_INLINE OperandInfo(const OperandInfo&) = default;
131
+ C10_ALWAYS_INLINE OperandInfo& operator=(const OperandInfo&) = default;
132
+ C10_ALWAYS_INLINE OperandInfo(OperandInfo&&) noexcept = default;
133
+ C10_ALWAYS_INLINE OperandInfo& operator=(OperandInfo&&) noexcept = default;
134
+ C10_ALWAYS_INLINE ~OperandInfo() = default;
135
+
136
+ /// The data pointer. This may be different from tensor->data_ptr() if the
137
+ /// iterator is split.
138
+ void* data = nullptr;
139
+
140
+ /// Stride after broadcasting. The stride is in bytes, not number of elements.
141
+ StrideVector stride_bytes;
142
+
143
+ /// The desired device and type for the operand. For inputs, this specifies
144
+ /// that the input should be converted to this type if necessary. For outputs,
145
+ /// this specifies which type to allocate. target_dtype and device are
146
+ /// initialized with the dtype and device of the tensor but during type
147
+ /// promotion target_dtype value can become different from tensor's dtype
148
+ /// also, during type promotion target_dtype and device can be set for an
149
+ /// undefined tensor so that tensor can be properly constructed later.
150
+ c10::optional<Device> device = c10::nullopt;
151
+ ScalarType target_dtype = ScalarType::Undefined;
152
+ // Caches dtype of the tensor, because scalar_type is an expensive operation
153
+ // If dtype of the tensor is changed (e.g. as a result of type promotion or in
154
+ // allocate_outputs), this
155
+ // value should be changed too.
156
+ ScalarType current_dtype = ScalarType::Undefined;
157
+
158
+ bool is_device_defined() const {
159
+ return device.has_value();
160
+ }
161
+ bool is_type_defined() const {
162
+ return target_dtype != ScalarType::Undefined;
163
+ }
164
+ TensorOptions options() const {
165
+ return TensorOptions(target_dtype).device(device);
166
+ }
167
+
168
+ bool is_output = false;
169
+
170
+ bool will_resize = false;
171
+
172
+ bool is_read_write = false;
173
+
174
+ void validate() {
175
+ TORCH_CHECK(
176
+ !tensor_base_->defined() || tensor_base_->layout() == kStrided,
177
+ "unsupported tensor layout: ",
178
+ tensor_base_->layout());
179
+ }
180
+
181
+ /// The tensor operand. Note that the strides, data pointer, and
182
+ /// other attributes may differ due to dimension reordering and
183
+ /// coalescing.
184
+ const Tensor& tensor() const {
185
+ return tensor_storage_.getTensor();
186
+ }
187
+ const TensorBase& tensor_base() const {
188
+ return *tensor_base_;
189
+ }
190
+ void tensor(c10::MaybeOwned<TensorBase>&& tensor);
191
+
192
+ // Save the original tensor operand in cases when an output is modified
193
+ // (e.g. if dtype is changed)
194
+ const Tensor& original_tensor() const {
195
+ return original_tensor_storage_.getTensor();
196
+ }
197
+ const TensorBase& original_tensor_base() const {
198
+ return *original_tensor_base_;
199
+ }
200
+
201
+ // Set tensor to a new value, and store the old tensor value in
202
+ // original_tensor Should only ever be called once for the lifetime of an
203
+ // operand
204
+ void exchange_tensor(c10::MaybeOwned<TensorBase>&& new_tensor);
205
+
206
+ // Move original_tensor back into tensor, exchange_tensor must have been
207
+ // called before
208
+ void restore_original_tensor();
209
+
210
+ private:
211
+ c10::MaybeOwned<TensorBase> tensor_base_;
212
+ c10::MaybeOwned<TensorBase> original_tensor_base_ =
213
+ c10::MaybeOwned<TensorBase>::owned(c10::in_place);
214
+
215
+ // We store TensorBase visibly in the header to allow inline access.
216
+ // However, we sometimes need a genuine `const Tensor &` for the
217
+ // TensorIterator API. So, we also store a non-owning `Tensor`
218
+ // object in these `_storage_` variables.
219
+ internal::OpaqueOptionalTensorRef tensor_storage_;
220
+ internal::OpaqueOptionalTensorRef original_tensor_storage_;
221
+ };
222
+
223
+ struct SplitUntil32Bit;
224
+
225
+ enum class FastSetupType : uint8_t {
226
+ NONE,
227
+ CONTIGUOUS,
228
+ CHANNELS_LAST,
229
+ NON_OVERLAPPING_DENSE
230
+ };
231
+
232
+ class TensorIteratorConfig;
233
+ struct TensorIterator;
234
+
235
+ struct TORCH_API TensorIteratorBase : public impl::MetaBase {
236
+ using DimMask = std::bitset<64>;
237
+ using PtrVector = SmallVector<char*, 4>;
238
+ using StrideVector = SmallVector<int64_t, 6>;
239
+
240
+ TensorIteratorBase();
241
+ void build(TensorIteratorConfig&);
242
+
243
+ // The inner-loop function operates on the fastest moving dimension. It
244
+ // implements element-wise operations in terms of 1-d strided tensors.
245
+ //
246
+ // Arguments:
247
+ // data: data pointers for each operand (length `ntensors`)
248
+ // strides: stride for each operand (length `ntensors`)
249
+ // size: size of inner loop
250
+ //
251
+ // The `size` often matches shape[0], but may be smaller due to
252
+ // parallelization of the inner loop.
253
+ using loop2d_t = c10::function_ref<
254
+ void(char** data, const int64_t* strides, int64_t size0, int64_t size1)>;
255
+
256
+ using loop_subiter_t = c10::function_ref<void(TensorIteratorBase& subiter)>;
257
+
258
+ void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);
259
+
260
+ int ndim() const {
261
+ return static_cast<int>(shape_.size());
262
+ }
263
+ IntArrayRef shape() const {
264
+ return shape_;
265
+ }
266
+ int64_t numel() const;
267
+ int ntensors() const {
268
+ return static_cast<int>(operands_.size());
269
+ }
270
+ int noutputs() const {
271
+ return num_outputs_;
272
+ }
273
+ int ninputs() const {
274
+ return ntensors() - noutputs();
275
+ }
276
+ IntArrayRef view_offsets() const {
277
+ return view_offsets_;
278
+ }
279
+
280
+ /// number of elements in the output operand. this is the same as numel() for
281
+ /// operations that are not reductions.
282
+ int64_t num_output_elements() const;
283
+
284
+ /// number of reduced dimensions in a reduction operation
285
+ int num_reduce_dims() const;
286
+
287
+ /// 1-dimensional iteration and no buffering or type conversion
288
+ bool is_trivial_1d() const;
289
+ /// Reducible to 1-dimensional and all operands are contiguous
290
+ bool is_contiguous() const;
291
+ bool is_dim_reduced(int dim) const;
292
+
293
+ /// Accessors for each operand
294
+ IntArrayRef strides(int arg) const {
295
+ return operands_[arg].stride_bytes;
296
+ }
297
+ void* data_ptr(int arg) const;
298
+ ScalarType dtype(int arg = 0) const {
299
+ return operands_[arg].current_dtype;
300
+ }
301
+ ScalarType common_dtype() const {
302
+ TORCH_INTERNAL_ASSERT(
303
+ common_dtype_ != ScalarType::Undefined,
304
+ "Queried for invalid common dtype!");
305
+ return common_dtype_;
306
+ }
307
+ ScalarType input_dtype(int arg = 0) const {
308
+ return operands_[num_outputs_ + arg].current_dtype;
309
+ }
310
+ Device device(int arg = 0) const {
311
+ return operands_[arg].device.value();
312
+ }
313
+ c10::DeviceType device_type(int arg = 0) const {
314
+ return device(arg).type();
315
+ }
316
+ int64_t element_size(int arg) const {
317
+ return static_cast<int64_t>(elementSize(dtype(arg)));
318
+ }
319
+ bool is_scalar(int arg) const;
320
+ bool is_cpu_scalar(int arg) const;
321
+
322
+ const TensorBase& tensor_base(int arg) const {
323
+ return operands_[arg].tensor_base();
324
+ }
325
+ const Tensor& tensor(int arg) const {
326
+ return operands_[arg].tensor();
327
+ }
328
+
329
+ const TensorBase& output_base(int arg = 0) const {
330
+ AT_ASSERT(arg < num_outputs_);
331
+ return tensor_base(arg);
332
+ }
333
+
334
+ const Tensor& output(int arg = 0) const {
335
+ AT_ASSERT(arg < num_outputs_);
336
+ return tensor(arg);
337
+ }
338
+
339
+ const TensorBase& input_base(int arg = 0) const {
340
+ AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
341
+ return tensor_base(num_outputs_ + arg);
342
+ }
343
+ const Tensor& input(int arg = 0) const {
344
+ AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
345
+ return tensor(num_outputs_ + arg);
346
+ }
347
+
348
+ // Copies from temporary outputs back to the original outputs
349
+ // NOTE: only used on CPU
350
+ void cast_outputs();
351
+
352
+ /// Removes an operand from this iterator
353
+ void remove_operand(int arg);
354
+ /// Shrinks an iterated dimension
355
+ void narrow(int dim, int64_t start, int64_t size);
356
+ /// Narrows every dim after and including `start_dim` to size one.
357
+ void select_all_keeping_dim(int start_dim, IntArrayRef starts);
358
+ /// Replaces the data pointer for the operand at index `arg`.
359
+ /// The new pointer should have the same sizes, strides and dtype as the
360
+ /// original
361
+ void unsafe_replace_operand(int arg, void* data);
362
+
363
+ /// Splits this TensorIterator into two iterators. Together they iterate over
364
+ /// the entire operation. Used by `with_32bit_indexing()`.
365
+ std::unique_ptr<TensorIterator> split(int dim);
366
+
367
+ /// Returns the dimension with the largest extent: (size[dim]-1) * stride[dim]
368
+ int get_dim_to_split() const;
369
+
370
+ template <typename T>
371
+ T scalar_value(int arg) {
372
+ auto& op = operands_[arg];
373
+ return c10::fetch_and_cast<T>(op.tensor_base().scalar_type(), op.data);
374
+ }
375
+
376
+ /// Return scalar value from original_tensor_base if it is defined. When
377
+ /// common_dtype is Half, casting scalar input to common_dtype might overflow.
378
+ /// If the scalar is aleady given in the type of Half, then return scalar
379
+ /// value from tensor_base.
380
+ template <typename T>
381
+ T original_scalar_value(int arg) {
382
+ auto& original_tensor_base = operands_[arg].original_tensor_base();
383
+ if (original_tensor_base.defined()) {
384
+ TORCH_INTERNAL_ASSERT(
385
+ original_tensor_base.scalar_type() != common_dtype());
386
+ return c10::fetch_and_cast<T>(
387
+ original_tensor_base.scalar_type(), original_tensor_base.data_ptr());
388
+ } else {
389
+ return scalar_value<T>(arg);
390
+ }
391
+ }
392
+
393
+ private:
394
+ template <typename loop1d_t>
395
+ auto loop_2d_from_1d(const loop1d_t& loop) {
396
+ return
397
+ [loop, ntensor = ntensors()](
398
+ char** base, const int64_t* strides, int64_t size0, int64_t size1) {
399
+ PtrVector data(base, base + ntensor);
400
+ const int64_t* outer_strides = &strides[ntensor];
401
+ for (const auto i : c10::irange(size1)) {
402
+ if (i > 0) {
403
+ for (const auto arg : c10::irange(ntensor)) {
404
+ data[arg] += outer_strides[arg];
405
+ }
406
+ }
407
+ loop(data.data(), strides, size0);
408
+ }
409
+ };
410
+ }
411
+
412
+ public:
413
+ template <
414
+ typename loop1d_t,
415
+ std::enable_if_t<
416
+ std::is_convertible<
417
+ loop1d_t,
418
+ c10::function_ref<
419
+ void(char**, const int64_t* strides, int64_t size)>>::value,
420
+ int> = 0>
421
+ void for_each(loop1d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE) {
422
+ for_each(loop_2d_from_1d(loop), grain_size);
423
+ }
424
+
425
+ void for_each(loop2d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE);
426
+
427
+ void parallel_reduce(loop2d_t loop);
428
+
429
+ template <
430
+ typename loop1d_t,
431
+ std::enable_if_t<
432
+ std::is_convertible<
433
+ loop1d_t,
434
+ c10::function_ref<
435
+ void(char**, const int64_t* strides, int64_t size)>>::value,
436
+ int> = 0>
437
+ void serial_for_each(loop1d_t loop, Range range) {
438
+ serial_for_each(loop_2d_from_1d(loop), range);
439
+ }
440
+
441
+ void serial_for_each(loop2d_t loop, Range range) const;
442
+
443
+ /// Create a strides array for a Tensor with shape of this iterator. The
444
+ /// parameter `element_size` specifies the size of Tensor's data type in
445
+ /// bytes (e.g. `4` for `float`)
446
+ StrideVector compatible_stride(int element_size) const;
447
+
448
+ /// Inverts the re-ordering done by reorder_dimensions. This can only be
449
+ /// called *before* coalesce_dimensions() is called.
450
+ DimVector invert_perm(IntArrayRef input) const;
451
+
452
+ /// Reapply same re-ordering as it is done by reorder_dimensions. This can
453
+ /// only be called *before* coalesce_dimensions() is called.
454
+ DimVector apply_perm_and_mul(IntArrayRef input, int mul) const;
455
+
456
+ /// Helper functions for CPU iteration
457
+ StrideVector get_dim_strides(int dim) const;
458
+ StrideVector get_strides() const;
459
+ StrideVector get_inner_strides() const {
460
+ return get_dim_strides(0);
461
+ }
462
+ PtrVector get_base_ptrs() const;
463
+
464
+ // Helper functions for advanced stride manipulations (e.g. torch.flip)
465
+ void _unsafe_set_arg_strides(const int arg, IntArrayRef strides) {
466
+ operands_[arg].stride_bytes = strides;
467
+ }
468
+ void _unsafe_set_arg_data(const int arg, void* data) {
469
+ operands_[arg].data = data;
470
+ }
471
+
472
+ /// true if the stride computation can use 32-bit arithmetic. Used by GPU
473
+ /// kernels
474
+ bool can_use_32bit_indexing() const;
475
+
476
+ /// An "iteratable" object that recursively splits this iterator into
477
+ /// sub-iterators that can use 32-bit indexing.
478
+ SplitUntil32Bit with_32bit_indexing() const;
479
+
480
+ /// If the kernel should accumulate into the output. Only relevant for CUDA
481
+ /// reductions.
482
+ bool should_accumulate() const {
483
+ return accumulate_;
484
+ }
485
+
486
+ /// Whether this iterator produces the actual output,
487
+ /// as opposed to something that will be accumulated further. Only relevant
488
+ /// for CUDA reductions.
489
+ bool is_final_output() const {
490
+ return final_output_;
491
+ }
492
+
493
+ bool has_contiguous_first_dim() const {
494
+ if (ndim() == 0) {
495
+ return true;
496
+ }
497
+
498
+ int num_tensors = ntensors();
499
+ for (const auto i : c10::irange(num_tensors)) {
500
+ if (strides(i)[0] != element_size(i)) {
501
+ return false;
502
+ }
503
+ }
504
+ return true;
505
+ }
506
+
507
+ void set_output_raw_strided(
508
+ int64_t output_idx,
509
+ IntArrayRef sizes,
510
+ IntArrayRef strides,
511
+ TensorOptions options,
512
+ DimnameList names) override;
513
+
514
+ #define TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, maybestatic) \
515
+ maybestatic void methodname( \
516
+ TensorBase&& out, const TensorBase& a, const TensorBase& b) = delete; \
517
+ maybestatic void methodname( \
518
+ const TensorBase& out, TensorBase&& a, const TensorBase& b) = delete; \
519
+ maybestatic void methodname( \
520
+ const TensorBase& out, const TensorBase& a, TensorBase&& b) = delete; \
521
+ maybestatic void methodname( \
522
+ TensorBase&& out, TensorBase&& a, const TensorBase& b) = delete; \
523
+ maybestatic void methodname( \
524
+ TensorBase&& out, const TensorBase& a, TensorBase&& b) = delete; \
525
+ maybestatic void methodname( \
526
+ const TensorBase& out, TensorBase&& a, TensorBase&& b) = delete; \
527
+ maybestatic void methodname( \
528
+ TensorBase&& out, TensorBase&& a, TensorBase&& b) = delete;
529
+
530
+ #define TORCH_DISALLOW_TEMPORARIES(methodname) \
531
+ TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, )
532
+
533
+ void build_binary_float_op(
534
+ const TensorBase& out,
535
+ const TensorBase& a,
536
+ const TensorBase& b);
537
+ void build_borrowing_binary_float_op(
538
+ const TensorBase& out,
539
+ const TensorBase& a,
540
+ const TensorBase& b);
541
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_float_op)
542
+ void build_binary_op(
543
+ const TensorBase& out,
544
+ const TensorBase& a,
545
+ const TensorBase& b);
546
+ void build_borrowing_binary_op(
547
+ const TensorBase& out,
548
+ const TensorBase& a,
549
+ const TensorBase& b);
550
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_op)
551
+ void build_unary_float_op(const TensorBase& out, const TensorBase& a);
552
+ void build_borrowing_unary_float_op(
553
+ const TensorBase& out,
554
+ const TensorBase& a);
555
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_float_op)
556
+ void build_unary_op(const TensorBase& out, const TensorBase& a);
557
+ // Odd special case needed for pow. Has to borrow the output because
558
+ // it's a structured kernel, but the argument is potentially a copy.
559
+ void build_output_borrowing_argument_owning_unary_op(
560
+ const TensorBase& out,
561
+ const TensorBase& a);
562
+ void build_borrowing_unary_op(const TensorBase& out, const TensorBase& a);
563
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_op)
564
+ void build_borrowing_unary_force_boolean_op(
565
+ const TensorBase& out,
566
+ const TensorBase& a);
567
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_force_boolean_op)
568
+ void build_comparison_op(
569
+ const TensorBase& out,
570
+ const TensorBase& a,
571
+ const TensorBase& b);
572
+ void build_borrowing_comparison_op(
573
+ const TensorBase& out,
574
+ const TensorBase& a,
575
+ const TensorBase& b);
576
+ TORCH_DISALLOW_TEMPORARIES(build_borrowing_comparison_op)
577
+ // Another special case: we need to own the second argument for comparison
578
+ // ops.
579
+ void build_borrowing_except_last_argument_comparison_op(
580
+ const TensorBase& out,
581
+ const TensorBase& a,
582
+ const TensorBase& b);
583
+ void build_ternary_op(
584
+ const TensorBase& out,
585
+ const TensorBase& a,
586
+ const TensorBase& b,
587
+ const TensorBase& c);
588
+
589
+ #undef TORCH_DISALLOW_TEMPORARIES
590
+ protected:
591
+ // Mutable reference as it moves tensors out of TensorIteratorConfig
592
+ void populate_operands(TensorIteratorConfig&);
593
+ void mark_outputs();
594
+ void mark_resize_outputs(const TensorIteratorConfig&);
595
+ void compute_mem_overlaps(const TensorIteratorConfig&);
596
+ void compute_shape(const TensorIteratorConfig&);
597
+ void compute_strides(const TensorIteratorConfig&);
598
+ void reorder_dimensions();
599
+ void permute_dimensions(IntArrayRef perm);
600
+ void compute_types(const TensorIteratorConfig&);
601
+ ScalarType compute_common_dtype();
602
+ void allocate_or_resize_outputs();
603
+ bool fast_set_up(const TensorIteratorConfig&);
604
+ FastSetupType compute_fast_setup_type(const TensorIteratorConfig&);
605
+ void compute_names(const TensorIteratorConfig&);
606
+ void propagate_names_to_outputs();
607
+ void coalesce_dimensions();
608
+
609
+ protected:
610
+ /// Records the "computation" shape of the output tensor. The computation
611
+ /// shape is different from the regular shape in a few ways:
612
+ ///
613
+ /// - The shape may be permuted (via permute_dimensions) so that we
614
+ /// process the dimensions in the most computationally efficient order
615
+ /// (rather than the logical order given to us by the users.)
616
+ /// - The shape may have adjacent dimensions collapsed (via
617
+ /// coalesce_dimensions) so that we minimize the number of
618
+ /// dimensions we have to explicitly iterate over. For example,
619
+ /// a pointwise operation on a contiguous tensor "computationally"
620
+ /// consists of only a single dimension.
621
+ ///
622
+ /// In other words, the computation shape is the output shape as it
623
+ /// actually matters for implementing the kernel, but not necessarily the
624
+ /// output shape that the user will see in the end.
625
+ ///
626
+ /// The lifecycle of mutations to shape_ in TensorIterator:
627
+ /// - declare_static_shape() sets an initial shape explicitly
628
+ /// provided by user, otherwise
629
+ /// - compute_shape() computes the true (non-computational) shape
630
+ /// specified by the user.
631
+ /// - reorder_dimensions() reorders dimensions to improve coalescing.
632
+ /// - coalesce_dimensions() then coalesces adjacent dimensions when
633
+ /// possible.
634
+ ///
635
+ /// The shape may also be further modified if we create sub-TensorIterators,
636
+ /// e.g., via narrow or select_all_keeping_dim.
637
+ DimVector shape_;
638
+
639
+ /// Temporarily records the permutation computed by reorder_dimensions.
640
+ /// This permutation maps the computation output dimension (dim) to
641
+ /// the original true output dimension (perm_[dim]). It is used by
642
+ /// invert_perm to undo the permutation. After coalesce_dimensions is
643
+ /// called, the permutation is no longer valid (as, in general, there
644
+ /// is no permutation that will make computation dimensions to
645
+ /// output dimensions); methods that manipulate perm_ are obligated
646
+ /// to test that !has_coalesced_dimensions
647
+ DimVector perm_;
648
+
649
+ /// Has coalesce_dimensions() (or any moral equivalent, e.g., fast_build())
650
+ /// been called? This is SOLELY used to check validity of perm_.
651
+ bool has_coalesced_dimensions_ = false;
652
+
653
+ /// Whether iteration must be fixed. This disables dimension permuting and
654
+ /// also changes how for_each divides work among threads.
655
+ bool enforce_linear_iteration_ = false;
656
+
657
+ /// The index offsets into the original tensors for each dimension.
658
+ /// This is only non-zero when you narrow() a TensorIterator (e.g.,
659
+ /// when you make sub-TensorIterators).
660
+ DimVector view_offsets_;
661
+
662
+ /// The computed names of the output tensor. Computed by compute_names()
663
+ NameVector names_;
664
+
665
+ /// The operands of the TensorIterator: both the inputs and outputs. The
666
+ /// outputs MUST come first in the operands_ list. There is always an
667
+ /// operand for each output of the TensorIterator, even if TensorIterator
668
+ /// will ultimately be responsible for allocating the output; in those
669
+ /// cases, tensor is simply undefined (and will be populated later
670
+ /// during build()).
671
+ ///
672
+ /// This list is initially populated prior to build(), but build() mutates
673
+ /// OperandInfo to populate more information.
674
+ SmallVector<OperandInfo, 4> operands_;
675
+
676
+ /// Number of outputs in operands_ (the length of the outputs prefix
677
+ /// in operands_).
678
+ int num_outputs_ = 0;
679
+
680
+ /// Whether or not all operands have the same shape and are 1d+. Having all
681
+ /// the same shape affects whether or not the iterator is eligible for fast
682
+ /// setup.
683
+ bool all_ops_same_shape_ = false;
684
+ /// Whether or not all operands are 0d, this affects type promotion
685
+ bool all_ops_are_scalars_ = false;
686
+
687
+ /// The "computation" dtype of TensorIterator, specifying what the dtype
688
+ /// we will do the internal computation in TensorIterator. Typically,
689
+ /// this matches the dtype of the output tensors, but not always!
690
+ ScalarType common_dtype_ = ScalarType::Undefined;
691
+
692
+ /// This is currently defined as kCPU, or the device of the first non-CPU
693
+ /// tensor argument. See TensorIteratorBase::compute_types for details.
694
+ Device common_device_ = kCPU;
695
+
696
+ /// Set by split(), see should_accumulate() and is_final_output()
697
+ bool accumulate_ = false;
698
+ bool final_output_ = true;
699
+
700
+ // From TensorIteratorConfig
701
+ bool is_reduction_ = false;
702
+
703
+ /// Set by populate_operands(), says if we're handling meta tensors
704
+ bool is_meta_ = false;
705
+ };
706
+
707
+ struct TORCH_API TensorIterator final : public TensorIteratorBase {
708
+ TensorIterator() : TensorIteratorBase() {}
709
+ // Slicing is OK, TensorIterator guaranteed NOT to have any fields
710
+ TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {}
711
+
712
+ #define TORCH_DISALLOW_TEMPORARIES(methodname) \
713
+ TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static)
714
+
715
+ static TensorIterator binary_float_op(
716
+ TensorBase& out,
717
+ const TensorBase& a,
718
+ const TensorBase& b);
719
+ static TensorIterator binary_op(
720
+ TensorBase& out,
721
+ const TensorBase& a,
722
+ const TensorBase& b);
723
+ static TensorIterator borrowing_binary_op(
724
+ const TensorBase& out,
725
+ const TensorBase& a,
726
+ const TensorBase& b);
727
+ TORCH_DISALLOW_TEMPORARIES(borrowing_binary_op)
728
+ static TensorIterator comparison_op(
729
+ TensorBase& out,
730
+ const TensorBase& a,
731
+ const TensorBase& b);
732
+ static TensorIterator unary_op(TensorBase& out, const TensorBase& a);
733
+ static TensorIterator unary_float_op(TensorBase& out, const TensorBase& a);
734
+ static TensorIterator nullary_op(TensorBase& out);
735
+ static TensorIterator borrowing_nullary_op(const TensorBase& out);
736
+ static TensorIterator borrowing_nullary_op(TensorBase&& out) = delete;
737
+ static TensorIterator reduce_op(TensorBase& out, const TensorBase& a);
738
+ static TensorIterator reduce_op(
739
+ TensorBase& out1,
740
+ TensorBase& out2,
741
+ const TensorBase& a);
742
+ #undef TORCH_DISALLOW_TEMPORARIES
743
+ #undef TORCH_DISALLOW_TEMPORARIES_IMPL
744
+
745
+ const Tensor& maybe_get_output(int64_t output_idx) override;
746
+ void set_output_raw_strided(
747
+ int64_t output_idx,
748
+ IntArrayRef sizes,
749
+ IntArrayRef strides,
750
+ TensorOptions options,
751
+ DimnameList names) override;
752
+ };
753
+
754
+ class TORCH_API TensorIteratorConfig final {
755
+ public:
756
+ friend struct TensorIteratorBase;
757
+ friend struct TensorIterator;
758
+
759
+ TensorIteratorConfig() = default;
760
+
761
+ C10_DISABLE_COPY_AND_ASSIGN(TensorIteratorConfig);
762
+
763
+ /// Construction
764
+ // Stores input/output Tensors without incrementing the reference count.
765
+ // Important: the outputs have to be added before the inputs.
766
+ TensorIteratorConfig& add_output(const TensorBase& output) {
767
+ return add_borrowed_output(output);
768
+ }
769
+ TensorIteratorConfig& add_input(const TensorBase& input) {
770
+ return add_borrowed_input(input);
771
+ }
772
+
773
+ // Borrowing from temporaries is unlikely to go well.
774
+ TensorIteratorConfig& add_output(TensorBase&& output) = delete;
775
+ TensorIteratorConfig& add_input(TensorBase&& input) = delete;
776
+
777
+ // Stores input/output Tensors while incrementing the reference count.
778
+ // Note that add_{in,out}put are nearly always what you
779
+ // want, and the exception (adding an unnamed temporary) won't
780
+ // compile.
781
+ TensorIteratorConfig& add_owned_output(const TensorBase& output);
782
+ TensorIteratorConfig& add_owned_input(const TensorBase& input);
783
+
784
+ // Advanced API: stores input/output Tensors without incrementing
785
+ // the reference count. The caller must ensure that these Tensors
786
+ // live at least as long as this TensorIteratorConfig and any
787
+ // TensorIteratorBase built from this TensorIteratorConfig.
788
+ // Important: the outputs have to be added before the inputs.
789
+ TensorIteratorConfig& add_borrowed_output(const TensorBase& output);
790
+ TensorIteratorConfig& add_borrowed_input(const TensorBase& input);
791
+
792
+ // Borrowing from temporaries is unlikely to go well.
793
+ TensorIteratorConfig& add_borrowed_output(TensorBase&& output) = delete;
794
+ TensorIteratorConfig& add_borrowed_input(TensorBase&& input) = delete;
795
+
796
+ // Sets the check_mem_overlap_ flag, which is true by default.
797
+ // If true, inputs are checked for partial overlap with the outputs and
798
+ // outputs are checked for internal overlap (e.g. broadcasted views). An error
799
+ // is raised if unacceptable overlap is detected.
800
+ // If you're migrating an existing operator to using TensorIterator, please
801
+ // consider if the previous implementation checked memory overlap. If it did
802
+ // not, and if the operator is idempotent (for example, Tensor.fill_(0)), then
803
+ // checking memory overlap is BC-breaking. Please don't check memory overlap
804
+ // in that case.
805
+ TensorIteratorConfig& set_check_mem_overlap(bool check_mem_overlap) {
806
+ check_mem_overlap_ = check_mem_overlap;
807
+ return *this;
808
+ }
809
+
810
+ // Sets the check_all_same_dtype_ flag, which is true by default
811
+ // If true, checks that all inputs and defined outputs have the same dtype
812
+ // Setting either of promote_inputs_to_common_dtype_
813
+ // or cast_common_dtype_to_outputs_ to true will set
814
+ // check_all_same_dtype_ to false.
815
+ TensorIteratorConfig& check_all_same_dtype(const bool _check_all_same_dtype) {
816
+ check_all_same_dtype_ = _check_all_same_dtype;
817
+ return *this;
818
+ }
819
+
820
+ // Sets the check_all_same_device_ flag, which is true by default
821
+ // If true, all operands must be on the same device, with the possible
822
+ // exception of CPU scalars, which can be passed to some CUDA kernels
823
+ // as kernel arguments.
824
+ TensorIteratorConfig& check_all_same_device(
825
+ const bool _check_all_same_device) {
826
+ check_all_same_device_ = _check_all_same_device;
827
+ return *this;
828
+ }
829
+
830
+ // Sets the enforce_safe_casting_to_output_ flag, which is false by default
831
+ // If true, the iterator's "common dtype" must be computable
832
+ // (see the [Common Dtype Computation] note) and
833
+ // canCast(common dtype, output dtype) must be true for all outputs.
834
+ TensorIteratorConfig& enforce_safe_casting_to_output(
835
+ const bool _enforce_safe_casting_to_output) {
836
+ enforce_safe_casting_to_output_ = _enforce_safe_casting_to_output;
837
+ return *this;
838
+ }
839
+
840
+ // Sets the enforce_linear_iteration_ flag, which is false by default.
841
+ // If true, iteration goes in the same order as a C-contiguous tensor
842
+ // is layed out in memory. i.e. last dimension iterates fastest.
843
+ //
844
+ // This iteration order can be less efficient and may even prevent
845
+ // vectorization. So only use if the correctness of your kernel depends on it.
846
+ TensorIteratorConfig& enforce_linear_iteration(
847
+ const bool _enforce_linear_iteration = true) {
848
+ enforce_linear_iteration_ = _enforce_linear_iteration;
849
+ return *this;
850
+ }
851
+
852
+ // Sets the promote_inputs_to_common_dtype_ flag, which is false by default
853
+ // If true, the iterator's "common dtype" is always computed (see the
854
+ // [Common Dtype Computation] note) and, on the CPU, temporary copies of
855
+ // the inputs in the common dtype are passed as the actual inputs to
856
+ // the operation.
857
+ // Setting this flag to true sets check_all_same_dtype_ to false.
858
+ TensorIteratorConfig& promote_inputs_to_common_dtype(
859
+ const bool _promote_inputs_to_common_dtype) {
860
+ promote_inputs_to_common_dtype_ = _promote_inputs_to_common_dtype;
861
+ if (_promote_inputs_to_common_dtype) {
862
+ check_all_same_dtype_ = false;
863
+ }
864
+ return *this;
865
+ }
866
+
867
+ // Sets the promote_integer_inputs_to_float_ flag, which is false by default
868
+ // NOTE: If set to true, the promote_inputs_to_common_dtype_ must also be
869
+ // true. If true, if the iterator's "common dtype" is an integral type
870
+ // (including bool)
871
+ // then it is changed to the default float scalar type.
872
+ TensorIteratorConfig& promote_integer_inputs_to_float(
873
+ const bool _promote_integer_inputs_to_float) {
874
+ promote_integer_inputs_to_float_ = _promote_integer_inputs_to_float;
875
+ TORCH_INTERNAL_ASSERT(
876
+ !promote_integer_inputs_to_float_ || promote_inputs_to_common_dtype_);
877
+ return *this;
878
+ }
879
+
880
+ TensorIteratorConfig& is_reduction(const bool _is_reduction) {
881
+ is_reduction_ = _is_reduction;
882
+ return *this;
883
+ }
884
+
885
+ TensorIteratorConfig& allow_cpu_scalars(const bool _allow_cpu_scalars) {
886
+ allow_cpu_scalars_ = _allow_cpu_scalars;
887
+ return *this;
888
+ }
889
+
890
+ // Sets the cast_common_dtype_to_outputs_ flag, which is false by default
891
+ // If true, the iterator's "common dtype" must be computatable
892
+ // (see the [Common Dtype Computation] note) and, on the CPU, temporary
893
+ // copies of the outputs are passed as the actual output to the operation.
894
+ // These temporaries are then copied to the original outputs after
895
+ // the operation is performed (see cast_outputs()).
896
+ // Setting this flag to true sets check_all_same_dtype_ to false.
897
+ TensorIteratorConfig& cast_common_dtype_to_outputs(
898
+ const bool _cast_common_dtype_to_outputs) {
899
+ cast_common_dtype_to_outputs_ = _cast_common_dtype_to_outputs;
900
+ if (_cast_common_dtype_to_outputs) {
901
+ check_all_same_dtype_ = false;
902
+ }
903
+ return *this;
904
+ }
905
+
906
+ TensorIteratorConfig& resize_outputs(bool resize_outputs) {
907
+ resize_outputs_ = resize_outputs;
908
+ return *this;
909
+ }
910
+
911
+ // Bypass output dtype/device computation and fix the dtype/device as
912
+ // specified here.
913
+ TensorIteratorConfig& declare_static_dtype_and_device(
914
+ ScalarType dtype,
915
+ Device device);
916
+ TensorIteratorConfig& declare_static_dtype(ScalarType dtype);
917
+ TensorIteratorConfig& declare_static_device(Device device);
918
+ TensorIteratorConfig& declare_static_shape(IntArrayRef shape);
919
+ TensorIteratorConfig& declare_static_shape(
920
+ IntArrayRef shape,
921
+ IntArrayRef squash_dims);
922
+
923
+ // It would be better if this was && qualified, but this would be at the cost
924
+ // of a lot of boilerplate above
925
+ TensorIterator build() {
926
+ TensorIterator iter;
927
+ iter.build(*this);
928
+ return iter;
929
+ }
930
+
931
+ private:
932
+ SmallVector<c10::MaybeOwned<TensorBase>, 4> tensors_;
933
+ int num_outputs_ = 0;
934
+ int num_inputs_ = 0;
935
+
936
+ c10::optional<DimVector> static_shape_ = c10::nullopt;
937
+ c10::optional<ScalarType> static_dtype_ = c10::nullopt;
938
+ c10::optional<Device> static_device_ = c10::nullopt;
939
+ bool check_mem_overlap_ = true;
940
+ bool allow_cpu_scalars_ = false;
941
+ bool is_reduction_ = false;
942
+ bool resize_outputs_ = true;
943
+ bool check_all_same_dtype_ = true;
944
+ bool check_all_same_device_ = true;
945
+ bool enforce_safe_casting_to_output_ = false;
946
+ bool enforce_linear_iteration_ = false;
947
+ bool promote_inputs_to_common_dtype_ = false;
948
+ bool promote_integer_inputs_to_float_ = false;
949
+ bool cast_common_dtype_to_outputs_ = false;
950
+ };
951
+
952
+ /// A container-like struct that acts as if it contains splits of a
953
+ /// TensorIterator that can use 32-bit indexing. Taken together the splits cover
954
+ /// the original TensorIterator.
955
+ struct TORCH_API SplitUntil32Bit {
956
+ struct TORCH_API iterator {
957
+ iterator() = default;
958
+ iterator(const TensorIteratorBase& iter);
959
+ iterator(iterator&&) = default;
960
+
961
+ // Guaranteed to be a TensorIterator proper!
962
+ TensorIterator& operator*() const;
963
+ iterator& operator++();
964
+ bool operator==(const iterator& other) const {
965
+ // two iterators are equal if they are the same object or they're both
966
+ // empty
967
+ return this == &other || (vec.empty() && other.vec.empty());
968
+ }
969
+ // needed for C++11 range-based for loop
970
+ bool operator!=(const iterator& other) const {
971
+ return !(*this == other);
972
+ }
973
+
974
+ /// stack of TensorIterators to be split
975
+ std::vector<std::unique_ptr<TensorIterator>> vec;
976
+ };
977
+
978
+ SplitUntil32Bit(const TensorIteratorBase& iter) : iter(iter) {}
979
+
980
+ iterator begin() const;
981
+ iterator end() const;
982
+
983
+ private:
984
+ const TensorIteratorBase& iter;
985
+ };
986
+
987
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <ATen/core/Dimname.h>
5
+ #include <c10/core/TensorOptions.h>
6
+ #include <c10/util/strides.h>
7
+
8
+ namespace at {
9
+
10
+ class Tensor;
11
+
12
+ namespace impl {
13
+
14
+ // Use this to define the prototype for a meta function. There are two
15
+ // versions; one that takes one argument (just the operator name), or FUNC2
16
+ // variant that takes two arguments (operator name and overload name).
17
+ //
18
+ // Example usage:
19
+ //
20
+ // TORCH_META_FUNC2(add, Tensor) (
21
+ // const Tensor& self, const Tensor& other
22
+ // ) {
23
+ // ... compute sizes and options ...
24
+ // set_output(sizes, options);
25
+ // }
26
+ //
27
+ #define TORCH_META_FUNC(name) void structured_##name::meta
28
+ #define TORCH_META_FUNC2(name, overload) \
29
+ void structured_##name##_##overload::meta
30
+
31
+ // These are versions of TORCH_META_FUNC(2) that include a precompute_out struct
32
+ // as a return value. They should be used when the kernel in question has
33
+ // precomputed values declared in native_functions.yaml and the corresponding
34
+ // implementation should return an instance of the aforementioned struct.
35
+ #define TORCH_PRECOMPUTE_META_FUNC(name) \
36
+ structured_##name::meta_return_ty structured_##name::meta
37
+ #define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \
38
+ structured_##name##_##overload::meta_return_ty \
39
+ structured_##name##_##overload::meta
40
+
41
+ // Use this to create a precompute struct in a meta function.
42
+ #define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<>
43
+ #define TORCH_PRECOMPUTE_STRUCT2(name, overload) \
44
+ structured_##name##_##overload::precompute_out<>
45
+
46
+ // Use this to define the prototype for an implementation. This takes only
47
+ // one argument, which is the name of the dispatch key entry you're
48
+ // implementing.
49
+ //
50
+ // Example usage:
51
+ //
52
+ // TORCH_IMPL_FUNC(add_cpu) (
53
+ // Tensor& result, const Tensor& self, const Tensor& other
54
+ // ) {
55
+ // ... do the actual implementation ...
56
+ // }
57
+ //
58
+ #define TORCH_IMPL_FUNC(name) void structured_##name::impl
59
+
60
+ // Base class for all structured kernel classes. The set_output virtual
61
+ // method is varied depending whether or not the operator is
62
+ // functional/out/inplace, and could also be specialized for CPU/CUDA/etc
63
+ // (although presently it isn't).
64
+ //
65
+ // A notable subclass of this interface is TensorIteratorBase.
66
+ struct TORCH_API MetaBase {
67
+ MetaBase() = default;
68
+ MetaBase(const MetaBase&) = default;
69
+ MetaBase& operator=(const MetaBase&) = default;
70
+ MetaBase(MetaBase&&) noexcept = default;
71
+ MetaBase& operator=(MetaBase&&) noexcept = default;
72
+ virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
73
+
74
+ // Note: [set_output_*]
75
+ // See: https://github.com/pytorch/pytorch/issues/69813
76
+ // Whenever defining the output properties in the META function of a
77
+ // structured kernel (what was usually done with `set_output`), use one of
78
+ // these 3 variants, instead. In order to decide which variant to use, check
79
+ // the following decision tree:
80
+ //
81
+ // - Can the kernel you are going to implement support output tensors
82
+ // with arbitrary strides?
83
+ // |
84
+ // -- YES: `set_output_raw_strided`
85
+ // |
86
+ // -- NO: Should the output tensor strides be contiguous?
87
+ // |
88
+ // -- YES: `set_output_contiguous`
89
+ // |
90
+ // -- NO: `set_output_strided`
91
+ //
92
+ // Use this function whenever the kernel requires specific strides for the
93
+ // output. If `strides` does not match the given output strides, proxy outputs
94
+ // will be created and passed to the IMPL function.
95
+ virtual void set_output_strided(
96
+ int64_t output_idx,
97
+ IntArrayRef sizes,
98
+ IntArrayRef strides,
99
+ TensorOptions options,
100
+ DimnameList names = {}) {
101
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
102
+ }
103
+
104
+ // Use this function whenever the kernel knows how to handle arbitrary strided
105
+ // outputs. This function has the same behavior as the old `set_output`: it
106
+ // will only re-stride if the given output was resized.
107
+ virtual void set_output_raw_strided(
108
+ int64_t output_idx,
109
+ IntArrayRef sizes,
110
+ IntArrayRef strides_hint,
111
+ TensorOptions options,
112
+ DimnameList names = {}) {
113
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
114
+ }
115
+
116
+ // Use this function if the kernel requires contiguous strides.
117
+ // Alias for `set_output_strided`, but with contiguous strides.
118
+ void set_output_contiguous(
119
+ int64_t output_idx,
120
+ IntArrayRef sizes,
121
+ TensorOptions options,
122
+ DimnameList names = {}) {
123
+ auto strides = c10::contiguous_strides(sizes);
124
+ set_output_strided(output_idx, sizes, strides, options, names);
125
+ }
126
+
127
+ // Returns a reference to an undefined tensor if there is no presupplied
128
+ // output
129
+ const Tensor& maybe_get_output() {
130
+ return maybe_get_output(0);
131
+ }
132
+ virtual ~MetaBase() = default;
133
+ };
134
+
135
+ } // namespace impl
136
+
137
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/WrapDimUtils.h>
4
+
5
+ namespace at::namedinference {
6
+
7
+ // TensorName and TensorNames are wrappers around Dimname and DimnameList
8
+ // that contain helper functions to make writing name inference rules easier.
9
+ //
10
+ // A TensorName represents a Dimname associated with some DimnameList (from a
11
+ // Tensor). This encapsulates all the information that is needed to check if
12
+ // names *match* and to *unify* names.
13
+ //
14
+ // Definition: Two names in two tensors *match* if they are equal, or if at
15
+ // least one of them is a wildcard that can be *refined* to the other name.
16
+ //
17
+ // Definition: unify(name, other) fails if the names do not match. Otherwise,
18
+ // it returns the most refined of name and other.
19
+ //
20
+ // Here is an example of checking if two names match.
21
+ // tensor: Tensor[A, None]
22
+ // other: Tensor[A]
23
+ //
24
+ // Let's say we wish to check if tensor.names[-1] matches other.names[-1].
25
+ // None (in tensor) cannot match A (in other) because if the None were refined
26
+ // to A, `tensor` would have duplicate names [A, A]. Therefore we need to check
27
+ // tensor.names [A, None] for the existence of A.
28
+ struct TORCH_API TensorName {
29
+ explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
30
+ : origin_(origin),
31
+ name_(origin[maybe_wrap_dim(
32
+ origin_idx,
33
+ static_cast<int64_t>(origin.size()))]),
34
+ origin_idx_(origin_idx) {}
35
+
36
+ // op_name is only used for error reporting.
37
+ const TensorName& unify(const TensorName& other, const char* op_name) const;
38
+ Dimname toDimname() const;
39
+
40
+ private:
41
+ ArrayRef<Dimname> origin_;
42
+ Dimname name_;
43
+ int origin_idx_; // A named tensor can have at most 64 dims.
44
+
45
+ TORCH_API friend std::ostream& operator<<(
46
+ std::ostream& out,
47
+ const TensorName& tensorname);
48
+ };
49
+
50
+ using TensorNameVec = SmallVector<TensorName, 10>;
51
+
52
+ struct TORCH_API TensorNames {
53
+ explicit TensorNames(ArrayRef<Dimname> names);
54
+
55
+ // Create TensorNames from names[start:end]. Each individual TensorName stores
56
+ // `names`, NOT names[start:end], because the original tensor's names are
57
+ // `names`.
58
+ explicit TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end);
59
+
60
+ // op_name is only used for error reporting.
61
+ TensorNames& unifyFromRightInplace(
62
+ const TensorNames& other,
63
+ const char* op_name = "unify");
64
+ void checkUnique(const char* op_name) const;
65
+
66
+ void append(TensorName&& name);
67
+ std::vector<Dimname> toDimnameVec() const;
68
+
69
+ private:
70
+ explicit TensorNames(TensorNameVec&& names) : names_(names){};
71
+
72
+ TensorNameVec names_;
73
+ };
74
+
75
+ } // namespace at::namedinference
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/TensorOptions.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/List.h>
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/Functions.h>
8
+ #else
9
+ #include <ATen/ops/equal.h>
10
+ #endif
11
+
12
+ namespace at {
13
+
14
+ // Note [Tensor-subclass-like Tensors]
15
+ // Tensor-subclass-like is defined as:
16
+ // - a Tensor subclass (via __torch_dispatch__ in Python or extending
17
+ // TensorImpl in C++)
18
+ // - anything else that shares the same perils as Tensor subclasses.
19
+ // For example, many Tensor subclasses do not have storage and meta Tensors
20
+ // do not have storage either, so meta Tensors belong here.
21
+ //
22
+ // We should ensure that PyTorch internals supports Tensor-subclass-like
23
+ // objects. In particular, Tensor-subclass-like objects struggle with two
24
+ // classes of operations that are problematic for Tensor subclasses:
25
+ // 1. Because some Tensor subclasses do not have storage, .item() or
26
+ // .data_ptr() calls are not good.
27
+ // 2. Certain in-place operations can eliminate the typing of the Tensor
28
+ // subclass. For example:
29
+ // >>> torch.zeros(input.sizes(), grad.options()).diag().copy_(input)
30
+ // If input is a Tensor subclass, then the above ends up either erroring out
31
+ // or returning a regular non-Tensor-subclass Tensor!
32
+
33
+ constexpr auto kFunctorchWrappedTensors = DispatchKeySet(
34
+ {DispatchKey::FuncTorchGradWrapper,
35
+ DispatchKey::FuncTorchBatched,
36
+ DispatchKey::Functionalize});
37
+
38
+ constexpr auto kTensorSubclassLike =
39
+ kFunctorchWrappedTensors |
40
+ DispatchKeySet(
41
+ {// WARNING: DO NOT put combined backend component + functionality keys
42
+ // here, you will incorrectly always match on the functionality key
43
+ // no matter the backend component
44
+ DispatchKey::Batched,
45
+ DispatchKey::Sparse,
46
+ DispatchKey::SparseCsrCPU,
47
+ DispatchKey::SparseCsrCUDA,
48
+ DispatchKey::Python}) |
49
+ DispatchKeySet(BackendComponent::MetaBit);
50
+
51
+ inline bool isTensorSubclassLike(const Tensor& tensor) {
52
+ if (c10::impl::dispatch_mode_enabled())
53
+ return true;
54
+ auto key_set = tensor.unsafeGetTensorImpl()->key_set();
55
+ return !(key_set & kTensorSubclassLike).empty();
56
+ }
57
+
58
+ inline bool areAnyTensorSubclassLike(TensorList tensors) {
59
+ if (c10::impl::dispatch_mode_enabled())
60
+ return true;
61
+ return std::any_of(tensors.begin(), tensors.end(), isTensorSubclassLike);
62
+ }
63
+
64
+ inline bool areAnyOptionalTensorSubclassLike(
65
+ const c10::List<c10::optional<Tensor>>& tensors) {
66
+ if (c10::impl::dispatch_mode_enabled())
67
+ return true;
68
+ return std::any_of(
69
+ tensors.begin(), tensors.end(), [](const optional<Tensor>& opt_tensor) {
70
+ return (
71
+ opt_tensor.has_value() && isTensorSubclassLike(opt_tensor.value()));
72
+ });
73
+ }
74
+
75
+ // Helper function to deal testing truthfulness of a scalar tensor
76
+ // in a Composite Compliant manner.
77
+ // NOTE: This function expects a scalar tensor of boolean dtype.
78
+ // Eg.
79
+ // Non-Composite Compliant Pattern : (t == 0).all().item<bool>()
80
+ // Composite Compliant Patter : is_salar_tensor_true((t == 0).all())
81
+ inline bool is_scalar_tensor_true(const Tensor& t) {
82
+ TORCH_INTERNAL_ASSERT(t.dim() == 0)
83
+ TORCH_INTERNAL_ASSERT(t.scalar_type() == kBool)
84
+ return at::equal(t, t.new_ones({}, t.options()));
85
+ }
86
+
87
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <ATen/EmptyTensor.h>
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/TensorGeometry.h>
7
+ #include <ATen/Utils.h>
8
+
9
+ #include <utility>
10
+
11
+ // These functions are NOT in Utils.h, because this file has a dep on Tensor.h
12
+
13
+ #define TORCH_CHECK_TENSOR_ALL(cond, ...) \
14
+ TORCH_CHECK((cond)._is_all_true().item<bool>(), __VA_ARGS__);
15
+
16
+ namespace at {
17
+
18
+ // The following are utility functions for checking that arguments
19
+ // make sense. These are particularly useful for native functions,
20
+ // which do NO argument checking by default.
21
+
22
+ struct TORCH_API TensorArg {
23
+ const Tensor& tensor;
24
+ const char* name;
25
+ int pos; // 1-indexed
26
+ TensorArg(const Tensor& tensor, const char* name, int pos)
27
+ : tensor(tensor), name(name), pos(pos) {}
28
+ // Try to mitigate any possibility of dangling reference to temporaries.
29
+ TensorArg(Tensor&& tensor, const char* name, int pos) = delete;
30
+ const Tensor* operator->() const {
31
+ return &tensor;
32
+ }
33
+ const Tensor& operator*() const {
34
+ return tensor;
35
+ }
36
+ };
37
+
38
+ struct TORCH_API TensorGeometryArg {
39
+ TensorGeometry tensor;
40
+ const char* name;
41
+ int pos; // 1-indexed
42
+ /* implicit */ TensorGeometryArg(TensorArg arg)
43
+ : tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {}
44
+ TensorGeometryArg(TensorGeometry tensor, const char* name, int pos)
45
+ : tensor(std::move(tensor)), name(name), pos(pos) {}
46
+ const TensorGeometry* operator->() const {
47
+ return &tensor;
48
+ }
49
+ const TensorGeometry& operator*() const {
50
+ return tensor;
51
+ }
52
+ };
53
+
54
+ // A string describing which function did checks on its input
55
+ // arguments.
56
+ // TODO: Consider generalizing this into a call stack.
57
+ using CheckedFrom = const char*;
58
+
59
+ // The undefined convention: singular operators assume their arguments
60
+ // are defined, but functions which take multiple tensors will
61
+ // implicitly filter out undefined tensors (to make it easier to perform
62
+ // tests which should apply if the tensor is defined, and should not
63
+ // otherwise.)
64
+ //
65
+ // NB: This means that the n-ary operators take lists of TensorArg,
66
+ // not TensorGeometryArg, because the Tensor to TensorGeometry
67
+ // conversion will blow up if you have undefined tensors.
68
+
69
+ TORCH_API std::ostream& operator<<(std::ostream& out, TensorGeometryArg t);
70
+ TORCH_API void checkDim(
71
+ CheckedFrom c,
72
+ const Tensor& tensor,
73
+ const char* name,
74
+ int pos, // 1-indexed
75
+ int64_t dim);
76
+ TORCH_API void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim);
77
+ // NB: this is an inclusive-exclusive range
78
+ TORCH_API void checkDimRange(
79
+ CheckedFrom c,
80
+ const TensorGeometryArg& t,
81
+ int64_t dim_start,
82
+ int64_t dim_end);
83
+ TORCH_API void checkSameDim(
84
+ CheckedFrom c,
85
+ const TensorGeometryArg& t1,
86
+ const TensorGeometryArg& t2);
87
+ TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t);
88
+ TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
89
+ TORCH_API void checkSize(
90
+ CheckedFrom c,
91
+ const TensorGeometryArg& t,
92
+ IntArrayRef sizes);
93
+ TORCH_API void checkSize_symint(
94
+ CheckedFrom c,
95
+ const TensorGeometryArg& t,
96
+ c10::SymIntArrayRef sizes);
97
+ TORCH_API void checkSize(
98
+ CheckedFrom c,
99
+ const TensorGeometryArg& t,
100
+ int64_t dim,
101
+ int64_t size);
102
+ TORCH_API void checkSize_symint(
103
+ CheckedFrom c,
104
+ const TensorGeometryArg& t,
105
+ int64_t dim,
106
+ c10::SymInt size);
107
+ TORCH_API void checkNumel(
108
+ CheckedFrom c,
109
+ const TensorGeometryArg& t,
110
+ int64_t numel);
111
+ TORCH_API void checkSameNumel(
112
+ CheckedFrom c,
113
+ const TensorArg& t1,
114
+ const TensorArg& t2);
115
+ TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors);
116
+ TORCH_API void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType s);
117
+ TORCH_API void checkScalarTypes(
118
+ CheckedFrom c,
119
+ const TensorArg& t,
120
+ at::ArrayRef<ScalarType> l);
121
+ TORCH_API void checkSameGPU(
122
+ CheckedFrom c,
123
+ const TensorArg& t1,
124
+ const TensorArg& t2);
125
+ TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors);
126
+ TORCH_API void checkSameType(
127
+ CheckedFrom c,
128
+ const TensorArg& t1,
129
+ const TensorArg& t2);
130
+ TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors);
131
+ TORCH_API void checkSameSize(
132
+ CheckedFrom c,
133
+ const TensorArg& t1,
134
+ const TensorArg& t2);
135
+ TORCH_API void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors);
136
+ TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t);
137
+ TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t);
138
+
139
+ // FixMe: does TensorArg slow things down?
140
+ TORCH_API void checkBackend(
141
+ CheckedFrom c,
142
+ at::ArrayRef<Tensor> t,
143
+ at::Backend backend);
144
+
145
+ TORCH_API void checkDeviceType(
146
+ CheckedFrom c,
147
+ at::ArrayRef<Tensor> tensors,
148
+ at::DeviceType device_type);
149
+
150
+ TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout);
151
+
152
+ TORCH_API void checkLayout(
153
+ CheckedFrom c,
154
+ at::ArrayRef<Tensor> tensors,
155
+ at::Layout layout);
156
+
157
+ // Methods for getting data_ptr if tensor is defined
158
+ TORCH_API void* maybe_data_ptr(const Tensor& tensor);
159
+ TORCH_API void* maybe_data_ptr(const TensorArg& tensor);
160
+
161
+ TORCH_API void check_dim_size(
162
+ const Tensor& tensor,
163
+ int64_t dim,
164
+ int64_t dim_size,
165
+ int64_t size);
166
+
167
+ namespace detail {
168
+ TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
169
+
170
+ TORCH_API c10::optional<std::vector<int64_t>> computeStride(
171
+ IntArrayRef oldshape,
172
+ IntArrayRef oldstride,
173
+ IntArrayRef newshape);
174
+
175
+ TORCH_API c10::optional<SymDimVector> computeStride(
176
+ c10::SymIntArrayRef oldshape,
177
+ c10::SymIntArrayRef oldstride,
178
+ c10::SymIntArrayRef newshape);
179
+
180
+ TORCH_API c10::optional<DimVector> computeStride(
181
+ IntArrayRef oldshape,
182
+ IntArrayRef oldstride,
183
+ const DimVector& newshape);
184
+
185
+ } // namespace detail
186
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stack>
4
+
5
+ #include <c10/core/InferenceMode.h>
6
+ #include <c10/core/impl/LocalDispatchKeySet.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/ThreadLocalDebugInfo.h>
9
+
10
+ #include <ATen/FuncTorchTLS.h>
11
+ #include <ATen/PythonTorchFunctionTLS.h>
12
+ #include <ATen/SavedTensorHooks.h>
13
+ #include <ATen/ThreadLocalPythonObjects.h>
14
+ #include <ATen/record_function.h>
15
+ #include <c10/core/impl/PythonDispatcherTLS.h>
16
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
17
+
18
+ namespace at {
19
+
20
+ // Thread local state contains values that are preserved across
21
+ // thread boundaries (e.g. at::launch/JIT fork, autograd).
22
+ // Note at::parallel_for doesn't preserve TLS across thread boundaries.
23
+ class TORCH_API ThreadLocalState {
24
+ public:
25
+ // Saves the thread local variables' values and
26
+ // returns them as a ThreadLocalState
27
+ ThreadLocalState();
28
+
29
+ // set_grad_mode - force the value of the grad mode TLS in
30
+ // the current state object. This is used for example in the
31
+ // autograd engine.
32
+ void set_grad_mode(bool enabled);
33
+
34
+ // set_multithreading_enabled - force the value of the multithreadinmaximum
35
+ // threads TLS in
36
+ // the current state object. This is used for example in the
37
+ // autograd engine.
38
+ void set_multithreading_enabled(bool enabled);
39
+
40
+ // Sets thread local variables in the current thread,
41
+ // according to the thread boundary specified
42
+ static void setThreadLocalState(const ThreadLocalState& state);
43
+
44
+ private:
45
+ c10::impl::LocalDispatchKeySet dispatch_key_;
46
+
47
+ // ThreadLocalDebugInfo does not change after being created
48
+ // with DebugInfoGuard
49
+ std::shared_ptr<c10::ThreadLocalDebugInfo> debug_info_;
50
+
51
+ // RecordFunction TLS
52
+ RecordFunctionTLS rf_tls_;
53
+
54
+ // TLS for out-of-tree functorch
55
+ // See NOTE [functorch TLS in pytorch/pytorch] for why this needs to be a
56
+ // pointer (spoiler alert: it's due to the indirection)
57
+ // This needs to be a shared_ptr instead of a unique_ptr because
58
+ // ThreadLocalState is copy-able and does indeed get copied. Maybe we can
59
+ // consider adding an explicit copy constructor for ThreadLocalState in the
60
+ // future but I didn't want to add one just for this.
61
+ std::shared_ptr<const functorch::FuncTorchTLSBase> functorch_tls_;
62
+
63
+ // TLS for AutogradModes
64
+ AutogradState autograd_tls_;
65
+
66
+ // TLS for enable_torch_dispatch_mode
67
+ c10::impl::TorchDispatchModeTLS torch_dispatch_mode_state_;
68
+
69
+ // TLS for enable_python_dispatcher
70
+ c10::impl::PyInterpreter* python_dispatcher_state_;
71
+
72
+ // TLS for __torch_function__ (mode and disable_torch_function)
73
+ at::impl::PythonTorchFunctionTLS python_torch_function_state_;
74
+
75
+ // TLS for saved tensors default hooks
76
+ at::impl::SavedTensorDefaultHooksTLS saved_tensors_default_hooks_state_;
77
+
78
+ bool functionalization_reapply_views_state_;
79
+
80
+ // TLS for arbitrary python objects that is registered via hooks
81
+ at::impl::ThreadLocalPythonObjects saved_objects_;
82
+
83
+ friend class ThreadLocalStateGuard;
84
+ };
85
+
86
+ // Guard to set and reset the thread local state
87
+ class TORCH_API ThreadLocalStateGuard {
88
+ public:
89
+ explicit ThreadLocalStateGuard(const ThreadLocalState& state)
90
+ : prev_state_(ThreadLocalState()) {
91
+ // set the given state across the thread boundary
92
+ ThreadLocalState::setThreadLocalState(state);
93
+ }
94
+
95
+ ~ThreadLocalStateGuard() {
96
+ // restore previously set variables
97
+ ThreadLocalState::setThreadLocalState(prev_state_);
98
+ }
99
+
100
+ private:
101
+ const ThreadLocalState prev_state_;
102
+ };
103
+
104
+ template <typename T>
105
+ auto wrapPropagateTLSState(T callback) {
106
+ return [tls_state = ThreadLocalState(),
107
+ callback = std::move(callback)](auto&&... args) {
108
+ ThreadLocalStateGuard g(tls_state);
109
+ // Propagate value returned by callback().
110
+ return callback(std::forward<decltype(args)>(args)...);
111
+ };
112
+ }
113
+
114
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TypeDefault.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Dimname.h>
4
+ #include <c10/core/MemoryFormat.h>
5
+ #include <c10/core/QScheme.h>
6
+ #include <c10/core/Scalar.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/ArrayRef.h>
10
+ #include <c10/util/intrusive_ptr.h>
11
+
12
+ namespace c10 {
13
+ struct Storage;
14
+ }
15
+
16
+ namespace at {
17
+
18
+ class Tensor;
19
+ using TensorList = ArrayRef<Tensor>;
20
+
21
+ class Context;
22
+ struct Generator;
23
+
24
+ struct Quantizer;
25
+ // This is temporary typedef to enable Quantizer in aten native function API
26
+ // we'll remove them when we are actually exposing Quantizer class
27
+ // to frontend
28
+ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
29
+
30
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/autocast_mode.h ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/NativeFunctions.h>
5
+ #include <ATen/Operators.h>
6
+ #include <torch/library.h>
7
+
8
+ #include <c10/core/impl/LocalDispatchKeySet.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+
11
+ namespace at::autocast {
12
+
13
+ TORCH_API bool is_enabled();
14
+ TORCH_API void set_enabled(bool enabled);
15
+ TORCH_API void clear_cache();
16
+ TORCH_API int increment_nesting();
17
+ TORCH_API int decrement_nesting();
18
+ TORCH_API bool is_cpu_enabled();
19
+ TORCH_API void set_cpu_enabled(bool enabled);
20
+ TORCH_API at::ScalarType get_autocast_gpu_dtype();
21
+ TORCH_API at::ScalarType get_autocast_cpu_dtype();
22
+ TORCH_API void set_autocast_gpu_dtype(at::ScalarType dtype);
23
+ TORCH_API void set_autocast_cpu_dtype(at::ScalarType dtype);
24
+ TORCH_API bool is_xpu_enabled();
25
+ TORCH_API void set_xpu_enabled(bool enabled);
26
+ TORCH_API at::ScalarType get_autocast_xpu_dtype();
27
+ TORCH_API void set_autocast_xpu_dtype(at::ScalarType dtype);
28
+ TORCH_API bool is_ipu_enabled();
29
+ TORCH_API void set_ipu_enabled(bool enabled);
30
+ TORCH_API at::ScalarType get_autocast_ipu_dtype();
31
+ TORCH_API void set_autocast_ipu_dtype(at::ScalarType dtype);
32
+ TORCH_API bool is_hpu_enabled();
33
+ TORCH_API void set_hpu_enabled(bool enabled);
34
+ TORCH_API at::ScalarType get_autocast_hpu_dtype();
35
+ TORCH_API void set_autocast_hpu_dtype(at::ScalarType dtype);
36
+ TORCH_API bool is_xla_enabled();
37
+ TORCH_API void set_xla_enabled(bool enabled);
38
+ TORCH_API at::ScalarType get_autocast_xla_dtype();
39
+ TORCH_API void set_autocast_xla_dtype(at::ScalarType dtype);
40
+ TORCH_API bool is_privateuseone_enabled();
41
+ TORCH_API void set_privateuseone_enabled(bool enabled);
42
+ TORCH_API at::ScalarType get_autocast_privateuseone_dtype();
43
+ TORCH_API void set_autocast_privateuseone_dtype(at::ScalarType dtype);
44
+ TORCH_API bool is_autocast_cache_enabled();
45
+ TORCH_API void set_autocast_cache_enabled(bool enabled);
46
+
47
+ namespace {
48
+ inline bool is_autocast_eligible(
49
+ const Tensor& tensor,
50
+ c10::DeviceType device_type) {
51
+ switch (device_type) {
52
+ case c10::DeviceType::CUDA:
53
+ return (tensor.is_cuda() || tensor.is_xla()) &&
54
+ tensor.is_floating_point();
55
+ case c10::DeviceType::CPU:
56
+ return (tensor.is_cpu() || tensor.is_mkldnn()) &&
57
+ tensor.is_floating_point();
58
+ case c10::DeviceType::XPU:
59
+ return tensor.is_xpu() && tensor.is_floating_point();
60
+ case c10::DeviceType::IPU:
61
+ return tensor.is_ipu() && tensor.is_floating_point();
62
+ case c10::DeviceType::HPU:
63
+ return tensor.is_hpu() && tensor.is_floating_point();
64
+ case c10::DeviceType::XLA:
65
+ return tensor.is_xla() && tensor.is_floating_point();
66
+ case c10::DeviceType::PrivateUse1:
67
+ return tensor.is_privateuseone() && tensor.is_floating_point();
68
+ default:
69
+ return false;
70
+ }
71
+ }
72
+ } // namespace
73
+
74
+ inline DispatchKey get_autocast_dispatch_key_from_device_type(
75
+ c10::DeviceType device_type) {
76
+ switch (device_type) {
77
+ case c10::DeviceType::CUDA:
78
+ return DispatchKey::Autocast;
79
+ case c10::DeviceType::CPU:
80
+ return DispatchKey::AutocastCPU;
81
+ case c10::DeviceType::XPU:
82
+ return DispatchKey::AutocastXPU;
83
+ case c10::DeviceType::IPU:
84
+ return DispatchKey::AutocastIPU;
85
+ case c10::DeviceType::HPU:
86
+ return DispatchKey::AutocastHPU;
87
+ case c10::DeviceType::XLA:
88
+ return DispatchKey::AutocastXLA;
89
+ case c10::DeviceType::PrivateUse1:
90
+ return DispatchKey::AutocastPrivateUse1;
91
+ default:
92
+ throw std::runtime_error(
93
+ "unknown device type for autocast in get_autocast_dispatch_key_from_device_type");
94
+ }
95
+ }
96
+
97
+ inline at::ScalarType get_lower_precision_fp_from_device_type(
98
+ c10::DeviceType device_type) {
99
+ switch (device_type) {
100
+ case c10::DeviceType::CUDA:
101
+ return get_autocast_gpu_dtype();
102
+ case c10::DeviceType::CPU:
103
+ return get_autocast_cpu_dtype();
104
+ case c10::DeviceType::XPU:
105
+ return get_autocast_xpu_dtype();
106
+ case c10::DeviceType::IPU:
107
+ return get_autocast_ipu_dtype();
108
+ case c10::DeviceType::HPU:
109
+ return get_autocast_hpu_dtype();
110
+ case c10::DeviceType::XLA:
111
+ return get_autocast_xla_dtype();
112
+ case c10::DeviceType::PrivateUse1:
113
+ return get_autocast_privateuseone_dtype();
114
+ default:
115
+ throw std::runtime_error(
116
+ "unknown device type for autocast in get_lower_precision_fp_from_device_type");
117
+ }
118
+ }
119
+
120
+ /********************************************************************
121
+ Logic to extract the promote type from any Tensor or TensorList args.
122
+ ********************************************************************/
123
+
124
+ // Overload to catch Tensor args.
125
+ // If nextArg is floating-point, compare its scalar_type with our
126
+ // current best guess for the promote type, and update if necessary.
127
+ inline at::ScalarType prioritize(
128
+ at::ScalarType current,
129
+ const Tensor& nextArg,
130
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
131
+ if (current == at::kDouble) {
132
+ AT_ERROR("promote type is double in at::autocast::prioritize");
133
+ return current;
134
+ }
135
+ at::ScalarType lower_precision_fp =
136
+ get_lower_precision_fp_from_device_type(device_type);
137
+ if (is_autocast_eligible(nextArg, device_type)) {
138
+ auto next = nextArg.scalar_type();
139
+ if (next == at::kDouble) {
140
+ return current; // ignores double tensors
141
+ } else if (current == at::kFloat || next == at::kFloat) {
142
+ return at::kFloat; // prioritizes float over lower_precision_fp
143
+ } else if (current == lower_precision_fp && next == lower_precision_fp) {
144
+ return lower_precision_fp;
145
+ } else {
146
+ AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize");
147
+ return current;
148
+ }
149
+ } else {
150
+ return current;
151
+ }
152
+ }
153
+
154
+ // Overload to catch TensorList args (for e.g. cat, stack).
155
+ // Reuses the overload above to process each Tensor in the list.
156
+ inline at::ScalarType prioritize(
157
+ at::ScalarType current,
158
+ const TensorList& list,
159
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
160
+ for (const auto& tensor : list) {
161
+ current = prioritize(current, tensor, device_type);
162
+ }
163
+ return current;
164
+ }
165
+
166
+ inline at::ScalarType prioritize(
167
+ at::ScalarType current,
168
+ const ITensorListRef& list,
169
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
170
+ for (const auto& tensor : list) {
171
+ current = prioritize(current, tensor, device_type);
172
+ }
173
+ return current;
174
+ }
175
+
176
+ // Template to catch non-Tensor args (no-op that returns current best guess)
177
+ template <typename T>
178
+ inline at::ScalarType prioritize(
179
+ at::ScalarType current,
180
+ T nextArg,
181
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
182
+ return current;
183
+ }
184
+
185
+ // Overload for the tail case.
186
+ inline at::ScalarType promote_type(
187
+ at::ScalarType current,
188
+ c10::DeviceType device_type) {
189
+ return current;
190
+ }
191
+
192
+ // Unpack args and determine if incoming lower_precision_fp tensors need to be
193
+ // promoted to float32. Non-Tensor arguments are ignored.
194
+ template <typename Arg0, typename... Args>
195
+ inline at::ScalarType promote_type(
196
+ at::ScalarType current,
197
+ c10::DeviceType device_type,
198
+ Arg0 arg0,
199
+ Args... args) {
200
+ auto new_current = prioritize(current, arg0, device_type);
201
+ return promote_type(new_current, device_type, args...);
202
+ }
203
+
204
+ /****************************************************
205
+ Logic to apply cached casting to any Tensor argument.
206
+ ****************************************************/
207
+ inline bool is_eligible(
208
+ const Tensor& arg,
209
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
210
+ return (
211
+ arg.defined() && is_autocast_eligible(arg, device_type) &&
212
+ (arg.scalar_type() != at::kDouble));
213
+ }
214
+
215
+ // Overload to catch Tensor args
216
+ TORCH_API Tensor cached_cast(
217
+ at::ScalarType to_type,
218
+ const Tensor& arg,
219
+ c10::DeviceType device_type = c10::DeviceType::CUDA);
220
+
221
+ // Overload to process optional<Tensor>
222
+ inline c10::optional<Tensor> cached_cast(
223
+ at::ScalarType to_type,
224
+ const c10::optional<Tensor>& arg,
225
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
226
+ if (arg.has_value()) {
227
+ return cached_cast(to_type, *arg, device_type);
228
+ } else {
229
+ return c10::nullopt;
230
+ }
231
+ }
232
+
233
+ // Overload to process TensorLists
234
+ inline std::vector<Tensor> cached_cast(
235
+ at::ScalarType to_type,
236
+ const TensorList& arg,
237
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
238
+ std::vector<Tensor> vec;
239
+ vec.reserve(arg.size());
240
+ for (const auto& t : arg) {
241
+ vec.emplace_back(cached_cast(to_type, t, device_type));
242
+ }
243
+ return vec;
244
+ }
245
+
246
+ inline std::vector<Tensor> cached_cast(
247
+ at::ScalarType to_type,
248
+ const ITensorListRef& arg,
249
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
250
+ std::vector<Tensor> vec;
251
+ vec.reserve(arg.size());
252
+ for (const auto& t : arg) {
253
+ vec.emplace_back(cached_cast(to_type, t, device_type));
254
+ }
255
+ return vec;
256
+ }
257
+
258
+ // Template to catch non-Tensor args.
259
+ template <typename T>
260
+ inline T cached_cast(
261
+ at::ScalarType to_type,
262
+ T arg,
263
+ c10::DeviceType device_type = c10::DeviceType::CUDA) {
264
+ return arg;
265
+ }
266
+
267
+ /*******************************************************
268
+ Logic to flip an output dtype flag.
269
+ Keep it simple for now by assuming only one such flag is
270
+ present in the argument list. If I ever need a function
271
+ with more than flag I'll figure out something else.
272
+ The policy is:
273
+ If the user has explicity specified a dtype, respect it.
274
+ Otherwise, set it to the autocast type.
275
+ ********************************************************/
276
+
277
+ // Overload to catch dtype flags
278
+ c10::optional<ScalarType> inline set_opt_dtype(
279
+ at::ScalarType to_type,
280
+ const c10::optional<ScalarType>& dtype) {
281
+ return dtype.has_value() ? dtype : to_type;
282
+ }
283
+
284
+ // Template to catch other args
285
+ template <typename T>
286
+ inline T set_opt_dtype(at::ScalarType to_type, T arg) {
287
+ return arg;
288
+ }
289
+
290
+ template <typename... Args>
291
+ inline bool firstarg_is_eligible(
292
+ c10::DeviceType device_type,
293
+ const Tensor& arg,
294
+ Args... args) {
295
+ return is_eligible(arg, device_type);
296
+ }
297
+
298
+ template <typename... Args>
299
+ inline at::ScalarType type_from_firstarg(
300
+ c10::DeviceType device_type,
301
+ at::ScalarType to_type,
302
+ const Tensor& arg,
303
+ Args... args) {
304
+ return (is_eligible(arg, device_type) ? to_type : arg.scalar_type());
305
+ }
306
+
307
+ // Policies correspond to op categories that need code-divergent handling.
308
+ // Wrapper templates below are specialized based on a policy template parameter.
309
+ enum class CastPolicy : uint8_t {
310
+ lower_precision_fp = 0, // Cast all inputs to lower_precision_fp before
311
+ // running the op. Currently, lower_precision_fp is
312
+ // fp16 for AutocastCUDA, and is defined by user
313
+ // (default bf16) for AutocastCPU or other device.
314
+ fp32, // Cast all inputs to at::kFloat before running the op.
315
+ fp32_set_opt_dtype, // Treats functions (like softmax) that
316
+ // 1. we'd like to run in fp32 and
317
+ // 2. have a c10::optional<ScalarType> arg that controls
318
+ // the output type.
319
+ // fp32_set_opt_dtype wrappers' policy is: if the output
320
+ // type is already set, don't touch it, otherwise, set
321
+ // it to at::kFloat.
322
+ fp32_append_dtype, // Treats functions (like norm) that
323
+ // 1. we'd like to run in fp32 and
324
+ // 2. have some overloads that accept an output type and
325
+ // other overloads that don't.
326
+ // fp32_append_dtype wrappers wrap the overloads that don't
327
+ // have an output dtype.
328
+ // The wrapper policy is: append at::kFloat to the args,
329
+ // and redispatch to the type-aware overload.
330
+ promote, // Run in the widest dtype among several args.
331
+ };
332
+
333
+ /********************************************************************************************************
334
+ Templates to provide wrapper functions
335
+
336
+ I'm copying the pattern used in core/boxing/impl/WrapFunctionIntoFunctor.h to
337
+ extract args and return type. (see also
338
+ https://stackoverflow.com/questions/46533698/how-to-deduce-argument-list-from-function-pointer)
339
+
340
+ This strategy uses an exterior "WrapFunction" that extracts arguments on behalf
341
+ of (in my case several specializations of) an interior "WrapFunction_".
342
+ Interior WrapFunction_ specializations are defined for each CastPolicy.
343
+ ********************************************************************************************************/
344
+
345
+ // Base template for WrapFunction_, which is specialized to contain a "call"
346
+ // method each CastPolicy
347
+ template <
348
+ CastPolicy policy,
349
+ c10::DeviceType device_type,
350
+ class Redispatch,
351
+ Redispatch* F,
352
+ class Ret,
353
+ class ArgList>
354
+ struct WrapFunction_ {};
355
+
356
+ // CastPolicy::lower_precision_fp General_DeviceType
357
+ template <
358
+ c10::DeviceType device_type,
359
+ class Redispatch,
360
+ Redispatch* F,
361
+ class Ret,
362
+ class... Args>
363
+ struct WrapFunction_<
364
+ CastPolicy::lower_precision_fp,
365
+ device_type,
366
+ Redispatch,
367
+ F,
368
+ Ret,
369
+ guts::typelist::typelist<Args...>> {
370
+ static Ret call(Args... args) {
371
+ c10::impl::ExcludeDispatchKeyGuard no_autocast(
372
+ get_autocast_dispatch_key_from_device_type(device_type));
373
+ return (*F)(cached_cast(
374
+ get_lower_precision_fp_from_device_type(device_type),
375
+ args,
376
+ device_type)...);
377
+ }
378
+ };
379
+
380
+ // CastPolicy::fp32 General_DeviceType
381
+ template <
382
+ c10::DeviceType device_type,
383
+ class Redispatch,
384
+ Redispatch* F,
385
+ class Ret,
386
+ class... Args>
387
+ struct WrapFunction_<
388
+ CastPolicy::fp32,
389
+ device_type,
390
+ Redispatch,
391
+ F,
392
+ Ret,
393
+ guts::typelist::typelist<Args...>> {
394
+ static Ret call(Args... args) {
395
+ c10::impl::ExcludeDispatchKeyGuard no_autocast(
396
+ get_autocast_dispatch_key_from_device_type(device_type));
397
+ return (*F)(cached_cast(at::kFloat, args, device_type)...);
398
+ }
399
+ };
400
+
401
+ // CastPolicy::fp32_set_opt_dtype General_DeviceType
402
+ template <
403
+ c10::DeviceType device_type,
404
+ class Redispatch,
405
+ Redispatch* F,
406
+ class Ret,
407
+ class... Args>
408
+ struct WrapFunction_<
409
+ CastPolicy::fp32_set_opt_dtype,
410
+ device_type,
411
+ Redispatch,
412
+ F,
413
+ Ret,
414
+ guts::typelist::typelist<Args...>> {
415
+ static Ret call(Args... args) {
416
+ c10::impl::ExcludeDispatchKeyGuard no_autocast(
417
+ get_autocast_dispatch_key_from_device_type(device_type));
418
+ if (firstarg_is_eligible(device_type, args...)) {
419
+ return (*F)(set_opt_dtype(at::kFloat, args)...);
420
+ } else {
421
+ // If ineligible, calls F with unaltered args. Does not set opt dtype,
422
+ // because setting opt dtype explicitly may interfere with internal
423
+ // implicit promotion decisions.
424
+ return (*F)(args...);
425
+ }
426
+ }
427
+ };
428
+
429
+ // CastPolicy::fp32_append_dtype General_DeviceType
430
+ template <
431
+ c10::DeviceType device_type,
432
+ class Redispatch,
433
+ Redispatch* F,
434
+ class Ret,
435
+ class... Args>
436
+ struct WrapFunction_<
437
+ CastPolicy::fp32_append_dtype,
438
+ device_type,
439
+ Redispatch,
440
+ F,
441
+ Ret,
442
+ guts::typelist::typelist<Args...>> {
443
+ static Ret call(Args... args) {
444
+ c10::impl::ExcludeDispatchKeyGuard no_autocast(
445
+ get_autocast_dispatch_key_from_device_type(device_type));
446
+ at::ScalarType out_type =
447
+ type_from_firstarg(device_type, at::kFloat, args...);
448
+ return (*F)(args..., out_type);
449
+ }
450
+ };
451
+
452
+ // CastPolicy::promote General_DeviceType
453
+ template <
454
+ c10::DeviceType device_type,
455
+ class Redispatch,
456
+ Redispatch* F,
457
+ class Ret,
458
+ class... Args>
459
+ struct WrapFunction_<
460
+ CastPolicy::promote,
461
+ device_type,
462
+ Redispatch,
463
+ F,
464
+ Ret,
465
+ guts::typelist::typelist<Args...>> {
466
+ static Ret call(Args... args) {
467
+ c10::impl::ExcludeDispatchKeyGuard no_autocast(
468
+ get_autocast_dispatch_key_from_device_type(device_type));
469
+ auto to_type = promote_type(
470
+ get_lower_precision_fp_from_device_type(device_type),
471
+ device_type,
472
+ args...);
473
+ return (*F)(cached_cast(to_type, args, device_type)...);
474
+ }
475
+ };
476
+
477
+ // Wrapper to infer return_type and parameter_types for WrapFunction_ (imitating
478
+ // core/boxing/impl/WrapFunctionIntoFunctor.h)
479
+ template <
480
+ CastPolicy policy,
481
+ c10::DeviceType device_type,
482
+ class Registered, // The signature for which we're registering. The
483
+ // dispatcher's calling code invokes our registered
484
+ // functions with arguments matching Registered, so we
485
+ // register WrapFunction_::call methods with a matching
486
+ // signature to properly field those arguments.
487
+ // guts::function_traits below extracts return_type and
488
+ // parameter_types from Registered, which WrapFunction_
489
+ // templates above use to declare their call methods.
490
+ class Redispatch, // The signature for the function we're redispatching to.
491
+ // In most cases this is the same as Registered, but for
492
+ // some ops (for example, ops where we append a dtype)
493
+ // it's useful to redispatch to a function with a
494
+ // different signature.
495
+ Redispatch* F> // The actual function we're redispatching to.
496
+ struct WrapFunction final {
497
+ using type = WrapFunction_<
498
+ policy,
499
+ device_type,
500
+ Redispatch,
501
+ F,
502
+ typename guts::function_traits<Registered>::return_type,
503
+ typename guts::function_traits<Registered>::parameter_types>;
504
+ };
505
+
506
+ /*****************************************************************************************************************
507
+ This section performs load-time registration for autocast wrappers.
508
+
509
+ It's debatable at what level operations should be patched. We'd like casts to
510
+ be autograd-exposed and precede autograd history recording, so that for
511
+ lower_precision_fp ops, input tensors are saved for backward in
512
+ lower_precision_fp rather than fp32. Saving inputs in lower_precision_fp
513
+ can significantly reduce a model's memory footprint.
514
+
515
+ Option 1 (strawman): Patch only at the level of explicit calls into
516
+ cudnn/cublas (cudnn_convolution, etc), because those are the code paths that are
517
+ guaranteed to use Tensor Cores, therefore they're the ones that will benefit
518
+ most from lower_precision_fp. Potential pitfall: convolutions (and other ops)
519
+ are wrapped in several layers of at::* calls. If one of those happens to record
520
+ autograd history, then we've lost the opportunity to save inputs in
521
+ lower_precision_fp.
522
+
523
+ Option 2: Patch the Python-exposed surface of calls, to make 100% sure autograd
524
+ history recording can't sneak in ahead of autocast. This mirrors Apex most
525
+ closely.
526
+
527
+ I think Option 2 is the right answer for all ops, not just convolutions. Option
528
+ 2 is what I implement here.
529
+ *****************************************************************************************************************/
530
+
531
+ /********************************************************************************************************************
532
+ Explicit registration for out-of-place ops
533
+
534
+ The stuff below could be codegenned. Ed said
535
+ > you are going to have to write the function definition at some point, I
536
+ wouldn't try to get clever about it Therefore, for the moment, this is all
537
+ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions.
538
+ ********************************************************************************************************************/
539
+
540
+ } // namespace at::autocast
541
+
542
+ #define ADD_NS(RAW_OP) at::RAW_OP
543
+
544
+ // Common cases where registration signature matches redispatch signature
545
+ // (that's why SIGNATURE is repeated in the WrapFunction instantiation)
546
+ #define KERNEL(DISPATCHKEY, OP, POLICY) \
547
+ m.impl( \
548
+ TORCH_SELECTIVE_NAME("aten::" #OP), \
549
+ &::at::autocast::WrapFunction< \
550
+ ::at::autocast::CastPolicy::POLICY, \
551
+ DISPATCHKEY, \
552
+ decltype(ATEN_FN(OP)), \
553
+ decltype(ATEN_FN(OP)), \
554
+ &ATEN_FN(OP)>::type::call);
555
+
556
+ #define KERNEL2(DISPATCHKEY, OP, OVERLOAD, POLICY) \
557
+ m.impl( \
558
+ TORCH_SELECTIVE_NAME("aten::" #OP "." #OVERLOAD), \
559
+ &::at::autocast::WrapFunction< \
560
+ ::at::autocast::CastPolicy::POLICY, \
561
+ DISPATCHKEY, \
562
+ decltype(ATEN_FN2(OP, OVERLOAD)), \
563
+ decltype(ATEN_FN2(OP, OVERLOAD)), \
564
+ &ATEN_FN2(OP, OVERLOAD)>::type::call);
565
+
566
+ // Less-common but still useful case: redispatching to a function
567
+ // with a new signature (e.g. appending a dtype)
568
+ #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \
569
+ DISPATCHKEY, \
570
+ REDISPATCH_FUNC, \
571
+ REGISTER_NAME, \
572
+ REGISTER_SIGNATURE, \
573
+ REDISPATCH_SIGNATURE, \
574
+ POLICY) \
575
+ m.impl( \
576
+ TORCH_SELECTIVE_NAME("aten::" REGISTER_NAME), \
577
+ &::at::autocast::WrapFunction< \
578
+ ::at::autocast::CastPolicy::POLICY, \
579
+ DISPATCHKEY, \
580
+ REGISTER_SIGNATURE, \
581
+ REDISPATCH_SIGNATURE, \
582
+ &REDISPATCH_FUNC>::type::call);
583
+
584
+ // KERNEL_CPU/KERNEL_CPU2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU
585
+ // registration for AutocastCPU
586
+ #define KERNEL_CPU(OP, POLICY) KERNEL(c10::DeviceType::CPU, OP, POLICY)
587
+
588
+ #define KERNEL_CPU2(OP, OVERLOAD, POLICY) \
589
+ KERNEL2(c10::DeviceType::CPU, OP, OVERLOAD, POLICY)
590
+
591
+ #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU( \
592
+ REDISPATCH_FUNC, \
593
+ REGISTER_NAME, \
594
+ REGISTER_SIGNATURE, \
595
+ REDISPATCH_SIGNATURE, \
596
+ POLICY) \
597
+ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \
598
+ c10::DeviceType::CPU, \
599
+ REDISPATCH_FUNC, \
600
+ REGISTER_NAME, \
601
+ REGISTER_SIGNATURE, \
602
+ REDISPATCH_SIGNATURE, \
603
+ POLICY)
604
+
605
+ // KERNEL_CUDA/KERNEL_CUDA2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA
606
+ // registration for AutocastCUDA
607
+ #define KERNEL_CUDA(OP, POLICY) KERNEL(c10::DeviceType::CUDA, OP, POLICY)
608
+
609
+ #define KERNEL_CUDA2(OP, OVERLOAD, POLICY) \
610
+ KERNEL2(c10::DeviceType::CUDA, OP, OVERLOAD, POLICY)
611
+
612
+ #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA( \
613
+ REDISPATCH_FUNC, \
614
+ REGISTER_NAME, \
615
+ REGISTER_SIGNATURE, \
616
+ REDISPATCH_SIGNATURE, \
617
+ POLICY) \
618
+ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \
619
+ c10::DeviceType::CUDA, \
620
+ REDISPATCH_FUNC, \
621
+ REGISTER_NAME, \
622
+ REGISTER_SIGNATURE, \
623
+ REDISPATCH_SIGNATURE, \
624
+ POLICY)
625
+
626
+ // KERNEL_PRIVATEUSEONE/KERNEL_PRIVATEUSEONE2/
627
+ // KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE
628
+ // registration for AutocastPrivateUse1
629
+ #define KERNEL_PRIVATEUSEONE(OP, POLICY) \
630
+ KERNEL(c10::DeviceType::PrivateUse1, OP, POLICY)
631
+
632
+ #define KERNEL_PRIVATEUSEONE2(OP, OVERLOAD, POLICY) \
633
+ KERNEL2(c10::DeviceType::PrivateUse1, OP, OVERLOAD, POLICY)
634
+
635
+ #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE( \
636
+ REDISPATCH_FUNC, \
637
+ REGISTER_NAME, \
638
+ REGISTER_SIGNATURE, \
639
+ REDISPATCH_SIGNATURE, \
640
+ POLICY) \
641
+ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \
642
+ c10::DeviceType::PrivateUse1, \
643
+ REDISPATCH_FUNC, \
644
+ REGISTER_NAME, \
645
+ REGISTER_SIGNATURE, \
646
+ REDISPATCH_SIGNATURE, \
647
+ POLICY)
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/code_template.h ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+
5
+ #include <sstream>
6
+ #include <string>
7
+ #include <unordered_map>
8
+ #include <vector>
9
+
10
+ namespace at {
11
+ namespace jit {
12
+
13
+ // A template environment is a mapping from template variable names, e.g.,
14
+ // identifier (corresponding to $identifier) to their expansions.
15
+ //
16
+ // This template environment supports storing strings, numbers and lists
17
+ // of strings, and can be chained together (so that lookup proceeds in
18
+ // in the top level environment, and then recurses into a parent
19
+ // environment if the key is not found.)
20
+ struct TemplateEnv {
21
+ TemplateEnv() = default;
22
+ TemplateEnv(TemplateEnv& parent) : parent(&parent) {}
23
+
24
+ using string_list = std::vector<std::string>;
25
+
26
+ // Add a string 'v' to the map at key 'k'.
27
+ void s(const std::string& k, const std::string& v) {
28
+ strings_[k] = v;
29
+ lists_.erase(k);
30
+ }
31
+
32
+ // Add a number 'v' to the map at key 'k'
33
+ template <typename T>
34
+ void d(const std::string& k, const T& v) {
35
+ strings_[k] = c10::to_string(v);
36
+ lists_.erase(k);
37
+ }
38
+
39
+ // Retrieve the string representation of the value stored at 'k' from the map.
40
+ // Raises an exception if the key is not found.
41
+ const std::string& s(const std::string& k) const {
42
+ if (strings_.count(k) == 0) {
43
+ if (parent) {
44
+ return parent->s(k);
45
+ }
46
+ notFound(k);
47
+ }
48
+ return strings_.at(k);
49
+ }
50
+
51
+ // Store a list of strings 'v' in the map at 'k'.
52
+ void v(const std::string& k, const string_list& v) {
53
+ lists_[k] = v;
54
+ strings_.erase(k);
55
+ }
56
+
57
+ // Retrieve a list of strings stored at 'k' from the map.
58
+ // Raises an exception if the key is not found.
59
+ const string_list& v(const std::string& k) const {
60
+ if (lists_.count(k) == 0) {
61
+ if (parent) {
62
+ return parent->v(k);
63
+ }
64
+ notFound(k);
65
+ }
66
+ return lists_.at(k);
67
+ }
68
+
69
+ // Test if a string 'k' is a string (as opposed to a list.)
70
+ bool keyIsString(const std::string& k) const {
71
+ if (strings_.count(k) > 0)
72
+ return true;
73
+ if (lists_.count(k) > 0)
74
+ return false;
75
+ if (parent)
76
+ return parent->keyIsString(k);
77
+ notFound(k);
78
+ }
79
+
80
+ private:
81
+ [[noreturn]] void notFound(const std::string& k) const {
82
+ std::stringstream ss;
83
+ ss << "key not found: " << k;
84
+ throw std::logic_error(ss.str());
85
+ }
86
+
87
+ std::unordered_map<std::string, std::string> strings_;
88
+ std::unordered_map<std::string, string_list> lists_;
89
+ TemplateEnv* parent{nullptr};
90
+ };
91
+
92
+ /*
93
+ # Match $identifier or ${identifier} and replace with the value in env.
94
+ # If this identifier is at the beginning of whitespace on a line
95
+ # and its value is a list then it is treated as
96
+ # block substitution by indenting all lines of all elements.
97
+ # If the identifier is on a line starting with non-whitespace and a list
98
+ # then it is comma separated. ${,foo} will insert a comma before the list
99
+ # if this list is not empty and ${foo,} will insert one after.
100
+ */
101
+ struct CodeTemplate {
102
+ /* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {}
103
+
104
+ std::string format(const TemplateEnv& env) const {
105
+ std::stringstream out;
106
+ size_t pos = 0;
107
+ size_t indent = 0;
108
+ bool all_whitespace = true;
109
+ while (pos < template_text.size()) {
110
+ char c = template_text[pos];
111
+ if (c == '$') {
112
+ std::stringstream kss;
113
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
114
+ bool comma_before;
115
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
116
+ bool comma_after;
117
+ size_t new_pos = parseKey(pos, kss, comma_before, comma_after);
118
+ std::string k = kss.str();
119
+ bool is_string = env.keyIsString(k);
120
+ if (all_whitespace) {
121
+ if (is_string)
122
+ emitStringWithIndents(out, indent, env.s(k));
123
+ else
124
+ emitLinesIndented(out, indent, env.v(k));
125
+ } else {
126
+ if (is_string)
127
+ out << env.s(k);
128
+ else
129
+ emitCommaSeparatedList(out, env.v(k), comma_before, comma_after);
130
+ }
131
+ all_whitespace = false;
132
+ pos = new_pos;
133
+ } else {
134
+ out << c;
135
+ if (!isspace(c))
136
+ all_whitespace = false;
137
+ indent++;
138
+ if (c == '\n') {
139
+ indent = 0;
140
+ all_whitespace = true;
141
+ }
142
+ pos++;
143
+ }
144
+ }
145
+ return out.str();
146
+ }
147
+
148
+ private:
149
+ using string_list = std::vector<std::string>;
150
+ char charAt(size_t p) const {
151
+ if (p >= template_text.size())
152
+ throw std::logic_error("EOS found in key");
153
+ return template_text[p];
154
+ }
155
+ size_t parseKey(
156
+ size_t pos,
157
+ std::ostream& k,
158
+ bool& comma_before,
159
+ bool& comma_after) const {
160
+ comma_before = false;
161
+ comma_after = false;
162
+ pos++;
163
+ if (charAt(pos) == '{') {
164
+ pos++;
165
+ if (charAt(pos) == ',') {
166
+ comma_before = true;
167
+ pos++;
168
+ }
169
+ pos = parseIdent(pos, k);
170
+ if (charAt(pos) == ',') {
171
+ comma_after = true;
172
+ pos++;
173
+ }
174
+ if (charAt(pos) != '}')
175
+ throw std::logic_error("missing terminating '}'");
176
+ pos++;
177
+ return pos;
178
+ } else {
179
+ return parseIdent(pos, k);
180
+ }
181
+ }
182
+ size_t parseIdent(size_t pos, std::ostream& k) const {
183
+ while (pos < template_text.size() &&
184
+ (isalnum(template_text[pos]) || template_text[pos] == '_')) {
185
+ k << template_text[pos];
186
+ pos++;
187
+ }
188
+ return pos;
189
+ }
190
+ void emitCommaSeparatedList(
191
+ std::ostream& out,
192
+ const string_list& strings,
193
+ bool comma_before,
194
+ bool comma_after) const {
195
+ if (comma_before && !strings.empty())
196
+ out << ", ";
197
+ for (const auto i : c10::irange(strings.size())) {
198
+ if (i > 0)
199
+ out << ", ";
200
+ out << strings[i];
201
+ }
202
+ if (comma_after && !strings.empty())
203
+ out << ", ";
204
+ }
205
+ // These indentation functions follow the convention that they never emit
206
+ // leading or trailing newlines when the input string does not have leading
207
+ // or trailing newlines. It's the responsibility of the calling function
208
+ // to indent correctly in the context.
209
+ void emitIndent(std::ostream& out, size_t indent) const {
210
+ for (C10_UNUSED const auto i : c10::irange(indent)) {
211
+ out << " ";
212
+ }
213
+ }
214
+ void emitStringWithIndents(
215
+ std::ostream& out,
216
+ size_t indent,
217
+ const std::string& str) const {
218
+ for (auto c : str) {
219
+ out << c;
220
+ if (c == '\n') {
221
+ emitIndent(out, indent);
222
+ }
223
+ }
224
+ }
225
+ void emitLinesIndented(
226
+ std::stringstream& out,
227
+ size_t indent,
228
+ const string_list& strings) const {
229
+ for (const auto i : c10::irange(strings.size())) {
230
+ if (i > 0)
231
+ emitIndent(out, indent);
232
+ emitStringWithIndents(out, indent, strings[i]);
233
+ if (i + 1 != strings.size())
234
+ out << "\n";
235
+ }
236
+ }
237
+ std::string template_text;
238
+ };
239
+
240
+ static inline std::string format(const std::string& fmt, TemplateEnv& env) {
241
+ return CodeTemplate(fmt).format(env);
242
+ }
243
+
244
+ } // namespace jit
245
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpp_custom_type_hack.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
2
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
3
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
4
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
5
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
6
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
7
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
8
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
9
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
10
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
11
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
12
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
13
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
14
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
15
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
16
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
17
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
18
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
19
+
20
+ // YOU ARE IN THE WRONG PLACE! TURN BACK NOW!
21
+
22
+ // This code was a temporary hack to enable embedding arbitrary C++ structures
23
+ // into Tensors. THIS IS UNSAFE AND IS NOT SUPPORTED. IF YOU USE THIS CODE,
24
+ // IT __WILL__ BREAK.
25
+
26
+ // This code has been superseded by custom classes:
27
+ // https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html
28
+
29
+ // Please use custom classes and **DO NOT ADD MORE CALLSITES TO THINGS DEFINED
30
+ // IN THIS FILE**.
31
+
32
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
33
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
34
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
35
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
36
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
37
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
38
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
39
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
40
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
41
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
42
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
43
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
44
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
45
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
46
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
47
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
48
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
49
+ // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP
50
+
51
+ #include <ATen/TracerMode.h>
52
+ #include <ATen/core/Tensor.h>
53
+
54
+ #ifndef AT_PER_OPERATOR_HEADERS
55
+ #include <ATen/Functions.h>
56
+ #else
57
+ #include <ATen/ops/empty.h>
58
+ #endif
59
+
60
+ namespace at {
61
+ namespace cpp_custom_type_hack {
62
+
63
+ template <typename T>
64
+ [[deprecated(
65
+ "Use custom classes instead: "
66
+ "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] bool
67
+ isa(const Tensor& packed) {
68
+ return (packed.scalar_type() == kByte) &&
69
+ (packed.storage().data_ptr().get_deleter() ==
70
+ caffe2::TypeMeta::Make<T>().deleteFn());
71
+ }
72
+
73
+ template <typename T>
74
+ [[deprecated(
75
+ "Use custom classes instead: "
76
+ "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] T&
77
+ cast(const Tensor& packed) {
78
+ TORCH_CHECK(
79
+ packed.scalar_type() == kByte, "Expected temporary cpp type wrapper");
80
+ TORCH_CHECK(
81
+ packed.storage().data_ptr().get_deleter() ==
82
+ caffe2::TypeMeta::Make<T>().deleteFn(),
83
+ "Expected temporary cpp type wrapper of type ",
84
+ caffe2::TypeMeta::TypeName<T>());
85
+ return *reinterpret_cast<T*>(packed.storage().data_ptr().get());
86
+ }
87
+
88
+ template <typename T>
89
+ [[deprecated(
90
+ "Use custom classes instead: "
91
+ "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] Tensor
92
+ create(std::unique_ptr<T> ptr, TensorOptions options) {
93
+ // None of this should trace, so turn off Tracer dispatching
94
+ at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove
95
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
96
+
97
+ // We store this instance away in a Tensor and register a deleter function
98
+ // so that we do not leak memory. On the other side, we pull out the storage's
99
+ // data_ptr and get the right typed pointer.
100
+ void* raw_ptr = ptr.release();
101
+ at::DataPtr at_ptr(
102
+ raw_ptr, raw_ptr, caffe2::TypeMeta::Make<T>().deleteFn(), at::kCPU);
103
+
104
+ // size doesn't really matter, but we can align it to the actual size
105
+ // returning variables because one likely want to use this hack from python
106
+ auto retval = at::empty({sizeof(T)}, options.device(kCPU).dtype(at::kByte));
107
+ retval.storage().set_data_ptr_noswap(std::move(at_ptr));
108
+ return retval;
109
+ }
110
+
111
+ } // namespace cpp_custom_type_hack
112
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/jit_macros.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/CUDAConfig.h>
3
+ #include <string>
4
+
5
+ // AT_USE_JITERATOR(), controls whether we jit some elementwise kernels
6
+ #define AT_USE_JITERATOR() true
7
+ #define jiterator_stringify(...) std::string(#__VA_ARGS__);
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAlgorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
2
+ #include <thrust/binary_search.h>
3
+ #include <thrust/device_vector.h>
4
+ #include <thrust/execution_policy.h>
5
+ #include <thrust/functional.h>
6
+ #endif
7
+ namespace c10 {
8
+ namespace cuda {
9
+ #ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
10
+ template <typename Iter, typename Scalar>
11
+ __forceinline__ __device__ Iter
12
+ lower_bound(Iter start, Iter end, Scalar value) {
13
+ return thrust::lower_bound(thrust::device, start, end, value);
14
+ }
15
+ #else
16
+ // thrust::lower_bound is broken on device, see
17
+ // https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by
18
+ // https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28
19
+ template <typename Iter, typename Scalar>
20
+ __device__ Iter lower_bound(Iter start, Iter end, Scalar value) {
21
+ while (start < end) {
22
+ auto mid = start + ((end - start) >> 1);
23
+ if (*mid < value) {
24
+ start = mid + 1;
25
+ } else {
26
+ end = mid;
27
+ }
28
+ }
29
+ return end;
30
+ }
31
+ #endif // THRUST_DEVICE_LOWER_BOUND_WORKS
32
+ } // namespace cuda
33
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDAAllocatorConfig.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDACachingAllocator.h>
4
+ #include <c10/cuda/CUDAException.h>
5
+ #include <c10/cuda/CUDAMacros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/llvmMathExtras.h>
8
+ #include <cuda_runtime_api.h>
9
+
10
+ #include <atomic>
11
+ #include <vector>
12
+
13
+ namespace c10 {
14
+ namespace cuda {
15
+ namespace CUDACachingAllocator {
16
+
17
+ // Environment config parser
18
+ class C10_CUDA_API CUDAAllocatorConfig {
19
+ public:
20
+ static size_t max_split_size() {
21
+ return instance().m_max_split_size;
22
+ }
23
+ static double garbage_collection_threshold() {
24
+ return instance().m_garbage_collection_threshold;
25
+ }
26
+
27
+ static bool expandable_segments() {
28
+ #ifndef PYTORCH_C10_DRIVER_API_SUPPORTED
29
+ if (instance().m_expandable_segments) {
30
+ TORCH_WARN_ONCE("expandable_segments not supported on this platform")
31
+ }
32
+ return false;
33
+ #else
34
+ return instance().m_expandable_segments;
35
+ #endif
36
+ }
37
+
38
+ static bool release_lock_on_cudamalloc() {
39
+ return instance().m_release_lock_on_cudamalloc;
40
+ }
41
+
42
+ /** Pinned memory allocator settings */
43
+ static bool pinned_use_cuda_host_register() {
44
+ return instance().m_pinned_use_cuda_host_register;
45
+ }
46
+
47
+ static size_t pinned_num_register_threads() {
48
+ return instance().m_pinned_num_register_threads;
49
+ }
50
+
51
+ static size_t pinned_max_register_threads() {
52
+ // Based on the benchmark results, we see better allocation performance
53
+ // with 8 threads. However on future systems, we may need more threads
54
+ // and limiting this to 128 threads.
55
+ return 128;
56
+ }
57
+
58
+ // This is used to round-up allocation size to nearest power of 2 divisions.
59
+ // More description below in function roundup_power2_next_division
60
+ // As ane example, if we want 4 divisions between 2's power, this can be done
61
+ // using env variable: PYTORCH_CUDA_ALLOC_CONF=roundup_power2_divisions:4
62
+ static size_t roundup_power2_divisions(size_t size);
63
+
64
+ static CUDAAllocatorConfig& instance() {
65
+ static CUDAAllocatorConfig* s_instance = ([]() {
66
+ auto inst = new CUDAAllocatorConfig();
67
+ const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF");
68
+ inst->parseArgs(env);
69
+ return inst;
70
+ })();
71
+ return *s_instance;
72
+ }
73
+
74
+ void parseArgs(const char* env);
75
+
76
+ private:
77
+ CUDAAllocatorConfig();
78
+
79
+ void lexArgs(const char* env, std::vector<std::string>& config);
80
+ void consumeToken(
81
+ const std::vector<std::string>& config,
82
+ size_t i,
83
+ const char c);
84
+ size_t parseMaxSplitSize(const std::vector<std::string>& config, size_t i);
85
+ size_t parseGarbageCollectionThreshold(
86
+ const std::vector<std::string>& config,
87
+ size_t i);
88
+ size_t parseRoundUpPower2Divisions(
89
+ const std::vector<std::string>& config,
90
+ size_t i);
91
+ size_t parseAllocatorConfig(
92
+ const std::vector<std::string>& config,
93
+ size_t i,
94
+ bool& used_cudaMallocAsync);
95
+ size_t parsePinnedUseCudaHostRegister(
96
+ const std::vector<std::string>& config,
97
+ size_t i);
98
+ size_t parsePinnedNumRegisterThreads(
99
+ const std::vector<std::string>& config,
100
+ size_t i);
101
+
102
+ std::atomic<size_t> m_max_split_size;
103
+ std::vector<size_t> m_roundup_power2_divisions;
104
+ std::atomic<double> m_garbage_collection_threshold;
105
+ std::atomic<size_t> m_pinned_num_register_threads;
106
+ std::atomic<bool> m_expandable_segments;
107
+ std::atomic<bool> m_release_lock_on_cudamalloc;
108
+ std::atomic<bool> m_pinned_use_cuda_host_register;
109
+ };
110
+
111
+ // General caching allocator utilities
112
+ C10_CUDA_API void setAllocatorSettings(const std::string& env);
113
+
114
+ } // namespace CUDACachingAllocator
115
+ } // namespace cuda
116
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDACachingAllocator.h ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/StorageImpl.h>
5
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
6
+ #include <c10/cuda/CUDAMacros.h>
7
+ #include <c10/cuda/CUDAStream.h>
8
+ #include <c10/util/ApproximateClock.h>
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <array>
12
+ #include <mutex>
13
+ #include <set>
14
+ #include <unordered_set>
15
+
16
+ namespace c10 {
17
+
18
+ // Caching allocator will execute every registered callback if it unable to find
19
+ // block inside of already allocated area.
20
+ class C10_CUDA_API FreeMemoryCallback {
21
+ public:
22
+ virtual ~FreeMemoryCallback() = default;
23
+ virtual bool Execute() = 0;
24
+ };
25
+
26
+ C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
27
+ #define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \
28
+ C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__);
29
+
30
+ namespace cuda {
31
+
32
+ // TODO: Turn this into an honest to goodness class. I briefly attempted to do
33
+ // this, but it was a bit irritating to figure out how to also correctly
34
+ // apply pimpl pattern so I didn't have to leak any internal implementation
35
+ // details in the header (CUDACachingAllocator could be made a pimpl, but
36
+ // you also need to appropriately define a class which is a subclass
37
+ // of Allocator. Not impossible, but required a bit more surgery than
38
+ // I wanted to do at the time.)
39
+ //
40
+ // Why is this using a namespace rather than old-style THCCachingAllocator_
41
+ // prefix? Mostly because it made the HIPify rules easier to write; _ is
42
+ // not counted as a word boundary, so you would otherwise have to list each
43
+ // of these functions.
44
+
45
+ namespace CUDACachingAllocator {
46
+
47
+ extern const size_t kLargeBuffer;
48
+
49
+ struct Stat {
50
+ int64_t current = 0;
51
+ int64_t peak = 0;
52
+ int64_t allocated = 0;
53
+ int64_t freed = 0;
54
+ };
55
+
56
+ enum struct StatType : uint64_t {
57
+ AGGREGATE = 0,
58
+ SMALL_POOL = 1,
59
+ LARGE_POOL = 2,
60
+ NUM_TYPES = 3 // remember to update this whenever a new stat type is added
61
+ };
62
+
63
+ typedef std::array<Stat, static_cast<size_t>(StatType::NUM_TYPES)> StatArray;
64
+
65
+ // Struct containing memory allocator summary statistics for a device.
66
+ struct DeviceStats {
67
+ // COUNT: allocations requested by client code
68
+ StatArray allocation;
69
+ // COUNT: number of allocated segments from cudaMalloc().
70
+ StatArray segment;
71
+ // COUNT: number of active memory blocks (allocated or used by stream)
72
+ StatArray active;
73
+ // COUNT: number of inactive, split memory blocks (unallocated but can't be
74
+ // released via cudaFree)
75
+ StatArray inactive_split;
76
+
77
+ // SUM: bytes allocated by this memory alocator
78
+ StatArray allocated_bytes;
79
+ // SUM: bytes reserved by this memory allocator (both free and used)
80
+ StatArray reserved_bytes;
81
+ // SUM: bytes within active memory blocks
82
+ StatArray active_bytes;
83
+ // SUM: bytes within inactive, split memory blocks
84
+ StatArray inactive_split_bytes;
85
+ // SUM: bytes requested by client code
86
+ StatArray requested_bytes;
87
+
88
+ // COUNT: total number of failed calls to CUDA malloc necessitating cache
89
+ // flushes.
90
+ int64_t num_alloc_retries = 0;
91
+
92
+ // COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush)
93
+ int64_t num_ooms = 0;
94
+
95
+ // COUNT: total number of oversize blocks allocated from pool
96
+ Stat oversize_allocations;
97
+
98
+ // COUNT: total number of oversize blocks requiring malloc
99
+ Stat oversize_segments;
100
+
101
+ // SIZE: maximum block size that is allowed to be split.
102
+ int64_t max_split_size = 0;
103
+ };
104
+
105
+ typedef std::shared_ptr<GatheredContext> (*CreateContextFn)(void);
106
+
107
+ // Struct containing info of an allocation block (i.e. a fractional part of a
108
+ // cudaMalloc)..
109
+ struct BlockInfo {
110
+ int64_t size = 0;
111
+ int64_t requested_size = 0;
112
+ int32_t gc_counter = 0;
113
+ bool allocated = false;
114
+ bool active = false;
115
+ std::shared_ptr<GatheredContext>
116
+ context_when_allocated; // per-watcher context
117
+ };
118
+
119
+ // Struct containing info of a memory segment (i.e. one contiguous cudaMalloc).
120
+ struct SegmentInfo {
121
+ int64_t device = 0;
122
+ int64_t address = 0;
123
+ int64_t total_size = 0;
124
+ int64_t requested_size = 0; // unrounded, actually requested size
125
+ int64_t allocated_size = 0;
126
+ int64_t active_size = 0;
127
+ cudaStream_t stream = 0;
128
+ bool is_large = false;
129
+ bool is_expandable = false;
130
+ MempoolId_t owner_private_pool_id = {0, 0};
131
+ std::vector<BlockInfo> blocks;
132
+ std::shared_ptr<GatheredContext> context_when_allocated;
133
+ };
134
+
135
+ struct AllocatorState {
136
+ virtual ~AllocatorState() = default;
137
+ };
138
+
139
+ union trace_time_ {
140
+ time_t t_;
141
+ approx_time_t approx_t_;
142
+ };
143
+
144
+ struct TraceEntry {
145
+ enum Action {
146
+ ALLOC, // API made to the caching allocator for new memory
147
+ FREE_REQUESTED, // API call made to the caching allocator to free memory
148
+ FREE_COMPLETED, // The allocator might have to delay a free because
149
+ // it is still in use on another stream via record_stream
150
+ // This event is generated when a free actually completes.
151
+ SEGMENT_ALLOC, // a call to cudaMalloc to get more memory from the OS
152
+ SEGMENT_FREE, // a call to cudaFree to return memory to the OS (e.g. to
153
+ // defragment or empty_caches)
154
+ SEGMENT_MAP, // a call to cuMemMap (used with expandable_segments)
155
+ SEGMENT_UNMAP, // unmap part of a segment (used with expandable segments)
156
+ SNAPSHOT, // a call to snapshot, used to correlate memory snapshots to trace
157
+ // events
158
+ OOM // the allocator threw an OutOfMemoryError (addr_ is the amount of free
159
+ // bytes reported by cuda)
160
+ };
161
+ TraceEntry(
162
+ Action action,
163
+ int device,
164
+ int64_t addr,
165
+ size_t size,
166
+ cudaStream_t stream,
167
+ approx_time_t time,
168
+ std::shared_ptr<GatheredContext> context = nullptr)
169
+ : action_(action),
170
+ device_(device),
171
+ addr_(addr),
172
+ context_(std::move(context)),
173
+ stream_(stream),
174
+ size_(size) {
175
+ time_.approx_t_ = time;
176
+ }
177
+ Action action_;
178
+ int device_;
179
+ int64_t addr_; // for OOM, this is the amount of free bytes reported by cuda
180
+ std::shared_ptr<GatheredContext> context_;
181
+ cudaStream_t stream_;
182
+ int64_t size_;
183
+ trace_time_ time_;
184
+ };
185
+
186
+ struct SnapshotInfo {
187
+ std::vector<SegmentInfo> segments;
188
+ std::vector<std::vector<TraceEntry>> device_traces;
189
+ };
190
+
191
+ // returns the pointers freed in the pool
192
+ // and the pointers allocated. Note: a pointer
193
+ // may appear in both freed and allocated
194
+ struct CheckpointDelta {
195
+ std::vector<void*> ptrs_freed;
196
+ std::vector<at::DataPtr> dataptrs_allocd;
197
+ };
198
+
199
+ enum struct RecordContext {
200
+ NEVER = 0,
201
+ STATE = 1, // only keep stacks for active allocations
202
+ ALLOC = 2, // additionally keep stacks for allocations in the trace history
203
+ ALL = 3, // additionally record stacks for when something is freed
204
+ };
205
+
206
+ // Size pretty-printer
207
+ std::string format_size(uint64_t size);
208
+
209
+ using OutOfMemoryObserver = std::function<void(
210
+ int64_t device,
211
+ int64_t allocated,
212
+ int64_t device_total,
213
+ int64_t device_free)>;
214
+
215
+ using AllocatorTraceTracker = std::function<void(const TraceEntry&)>;
216
+
217
+ class CUDAAllocator : public Allocator {
218
+ public:
219
+ virtual void* raw_alloc(size_t nbytes) = 0;
220
+ virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) = 0;
221
+ virtual void raw_delete(void* ptr) = 0;
222
+ virtual void init(int device_count) = 0;
223
+ virtual bool initialized() = 0;
224
+ virtual void setMemoryFraction(double fraction, int device) = 0;
225
+ virtual void emptyCache() = 0;
226
+ virtual void cacheInfo(int dev_id, size_t* largestBlock) = 0;
227
+ virtual void* getBaseAllocation(void* ptr, size_t* size) = 0;
228
+ virtual void recordStream(const DataPtr&, CUDAStream stream) = 0;
229
+ virtual DeviceStats getDeviceStats(int device) = 0;
230
+ virtual void resetAccumulatedStats(int device) = 0;
231
+ virtual void resetPeakStats(int device) = 0;
232
+ virtual SnapshotInfo snapshot() = 0;
233
+ virtual void beginAllocateStreamToPool(
234
+ int device,
235
+ cudaStream_t stream,
236
+ MempoolId_t mempool_id) = 0;
237
+ virtual void endAllocateStreamToPool(int device, cudaStream_t stream) = 0;
238
+ virtual void releasePool(int device, MempoolId_t mempool_id) = 0;
239
+ // returns true if the allocated blocks are equal to expected live allocations
240
+ virtual bool checkPoolLiveAllocations(
241
+ int device,
242
+ MempoolId_t mempool_id,
243
+ const std::unordered_set<void*>& expected_live_allocations) {
244
+ TORCH_CHECK(
245
+ false,
246
+ name(),
247
+ " does not yet support checkPoolLiveAllocations. "
248
+ "If you need it, please file an issue describing your use case.");
249
+ }
250
+ virtual std::shared_ptr<void> getIpcDevPtr(std::string handle) = 0;
251
+ virtual bool isHistoryEnabled() {
252
+ TORCH_CHECK(
253
+ false,
254
+ name(),
255
+ " does not yet support recordHistory. "
256
+ "If you need it, please file an issue describing your use case.");
257
+ }
258
+ virtual void recordHistory(
259
+ bool enabled,
260
+ CreateContextFn context_recorder,
261
+ size_t alloc_trace_max_entries,
262
+ RecordContext when) = 0;
263
+ virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
264
+
265
+ // Attached AllocatorTraceTracker callbacks will be called while the
266
+ // per-device allocator lock is held. Any additional locks taken from within
267
+ // the callback must be proven to always have the lock order that never
268
+ // triggers a deadlock. In particular, Python's GIL may be held when
269
+ // calling the allocator so it is unsafe to try to acquire the GIL in this
270
+ // callback.
271
+ virtual void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) = 0;
272
+
273
+ virtual void enablePeerAccess(int dev, int dev_to_access) = 0;
274
+
275
+ // memory not allocated from cudaMalloc cannot be copied
276
+ // across devices using cudaMemcpyAsync if peer to peer access is disabled.
277
+ // instead it requires cudaMemcpyAsyncPeer
278
+ // with P2P Enabled, all combinations work
279
+ // with P2P Disabled:
280
+ // cudaMalloc cudaMallocAsync/cuMemMap
281
+ // cudaMemcpyAsyncPeer works works
282
+ // cudaMemcpyAsync works error
283
+
284
+ // This function performs chooses to use the Peer version of
285
+ // memcpy if required based on where the allocated put dst/src.
286
+ virtual cudaError_t memcpyAsync(
287
+ void* dst,
288
+ int dstDevice,
289
+ const void* src,
290
+ int srcDevice,
291
+ size_t count,
292
+ cudaStream_t stream,
293
+ bool p2p_enabled) = 0;
294
+ virtual std::shared_ptr<AllocatorState> getCheckpointState(
295
+ int device,
296
+ MempoolId_t id) = 0;
297
+ virtual CheckpointDelta setCheckpointPoolState(
298
+ int device,
299
+ std::shared_ptr<AllocatorState> pps) = 0;
300
+ virtual std::string name() = 0;
301
+ };
302
+
303
+ // Allocator object, statically initialized
304
+ // See BackendInitializer in CUDACachingAllocator.cpp.
305
+ // Atomic loads on x86 are just normal loads,
306
+ // (atomic stores are different), so reading this value
307
+ // is no different than loading a pointer.
308
+ C10_CUDA_API extern std::atomic<CUDAAllocator*> allocator;
309
+
310
+ inline CUDAAllocator* get() {
311
+ return allocator.load();
312
+ }
313
+
314
+ // Called directly by clients.
315
+ inline void* raw_alloc(size_t nbytes) {
316
+ return get()->raw_alloc(nbytes);
317
+ }
318
+
319
+ inline void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) {
320
+ return get()->raw_alloc_with_stream(nbytes, stream);
321
+ }
322
+
323
+ inline void raw_delete(void* ptr) {
324
+ return get()->raw_delete(ptr);
325
+ }
326
+
327
+ inline void init(int device_count) {
328
+ return get()->init(device_count);
329
+ }
330
+
331
+ inline void setMemoryFraction(double fraction, int device) {
332
+ return get()->setMemoryFraction(fraction, device);
333
+ }
334
+
335
+ inline void emptyCache() {
336
+ return get()->emptyCache();
337
+ }
338
+
339
+ inline void cacheInfo(int dev_id, size_t* largestBlock) {
340
+ return get()->cacheInfo(dev_id, largestBlock);
341
+ }
342
+
343
+ inline void* getBaseAllocation(void* ptr, size_t* size) {
344
+ return get()->getBaseAllocation(ptr, size);
345
+ }
346
+
347
+ inline void recordStream(const DataPtr& dataPtr, CUDAStream stream) {
348
+ return get()->recordStream(dataPtr, stream);
349
+ }
350
+
351
+ inline DeviceStats getDeviceStats(int device) {
352
+ return get()->getDeviceStats(device);
353
+ }
354
+
355
+ inline void resetAccumulatedStats(int device) {
356
+ return get()->resetAccumulatedStats(device);
357
+ }
358
+
359
+ inline void resetPeakStats(int device) {
360
+ return get()->resetPeakStats(device);
361
+ }
362
+
363
+ inline SnapshotInfo snapshot() {
364
+ return get()->snapshot();
365
+ }
366
+
367
+ inline std::shared_ptr<AllocatorState> getCheckpointState(
368
+ int device,
369
+ MempoolId_t id) {
370
+ return get()->getCheckpointState(device, id);
371
+ }
372
+
373
+ inline CheckpointDelta setCheckpointPoolState(
374
+ int device,
375
+ std::shared_ptr<AllocatorState> pps) {
376
+ return get()->setCheckpointPoolState(device, pps);
377
+ }
378
+
379
+ // CUDAGraph interactions
380
+ inline void beginAllocateStreamToPool(
381
+ int device,
382
+ cudaStream_t stream,
383
+ MempoolId_t mempool_id) {
384
+ return get()->beginAllocateStreamToPool(device, stream, mempool_id);
385
+ }
386
+
387
+ inline void endAllocateStreamToPool(int device, cudaStream_t stream) {
388
+ return get()->endAllocateStreamToPool(device, stream);
389
+ }
390
+
391
+ inline void recordHistory(
392
+ bool enabled,
393
+ CreateContextFn context_recorder,
394
+ size_t alloc_trace_max_entries,
395
+ RecordContext when) {
396
+ return get()->recordHistory(
397
+ enabled, context_recorder, alloc_trace_max_entries, when);
398
+ }
399
+
400
+ inline bool isHistoryEnabled() {
401
+ return get()->isHistoryEnabled();
402
+ }
403
+
404
+ inline bool checkPoolLiveAllocations(
405
+ int device,
406
+ MempoolId_t mempool_id,
407
+ const std::unordered_set<void*>& expected_live_allocations) {
408
+ return get()->checkPoolLiveAllocations(
409
+ device, mempool_id, expected_live_allocations);
410
+ }
411
+
412
+ inline void attachOutOfMemoryObserver(OutOfMemoryObserver observer) {
413
+ return get()->attachOutOfMemoryObserver(observer);
414
+ }
415
+
416
+ inline void attachAllocatorTraceTracker(AllocatorTraceTracker tracker) {
417
+ return get()->attachAllocatorTraceTracker(tracker);
418
+ }
419
+
420
+ inline void releasePool(int device, MempoolId_t mempool_id) {
421
+ return get()->releasePool(device, mempool_id);
422
+ }
423
+ // Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE
424
+ inline std::shared_ptr<void> getIpcDevPtr(std::string handle) {
425
+ return get()->getIpcDevPtr(handle);
426
+ }
427
+
428
+ inline std::string name() {
429
+ return get()->name();
430
+ }
431
+
432
+ inline cudaError_t memcpyAsync(
433
+ void* dst,
434
+ int dstDevice,
435
+ const void* src,
436
+ int srcDevice,
437
+ size_t count,
438
+ cudaStream_t stream,
439
+ bool p2p_enabled) {
440
+ return get()->memcpyAsync(
441
+ dst, dstDevice, src, srcDevice, count, stream, p2p_enabled);
442
+ }
443
+
444
+ inline void enablePeerAccess(int dev, int dev_to_access) {
445
+ return get()->enablePeerAccess(dev, dev_to_access);
446
+ }
447
+
448
+ } // namespace CUDACachingAllocator
449
+ } // namespace cuda
450
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertion.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAException.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ namespace c10 {
7
+ namespace cuda {
8
+
9
+ #ifdef TORCH_USE_CUDA_DSA
10
+ // Copy string from `src` to `dst`
11
+ static __device__ void dstrcpy(char* dst, const char* src) {
12
+ int i = 0;
13
+ // Copy string from source to destination, ensuring that it
14
+ // isn't longer than `C10_CUDA_DSA_MAX_STR_LEN-1`
15
+ while (*src != '\0' && i++ < C10_CUDA_DSA_MAX_STR_LEN - 1) {
16
+ *dst++ = *src++;
17
+ }
18
+ *dst = '\0';
19
+ }
20
+
21
+ static __device__ void dsa_add_new_assertion_failure(
22
+ DeviceAssertionsData* assertions_data,
23
+ const char* assertion_msg,
24
+ const char* filename,
25
+ const char* function_name,
26
+ const int line_number,
27
+ const uint32_t caller,
28
+ const dim3 block_id,
29
+ const dim3 thread_id) {
30
+ // `assertions_data` may be nullptr if device-side assertion checking
31
+ // is disabled at run-time. If it is disabled at compile time this
32
+ // function will never be called
33
+ if (!assertions_data) {
34
+ return;
35
+ }
36
+
37
+ // Atomically increment so other threads can fail at the same time
38
+ // Note that incrementing this means that the CPU can observe that
39
+ // a failure has happened and can begin to respond before we've
40
+ // written information about that failure out to the buffer.
41
+ const auto nid = atomicAdd(&(assertions_data->assertion_count), 1);
42
+
43
+ if (nid >= C10_CUDA_DSA_ASSERTION_COUNT) {
44
+ // At this point we're ran out of assertion buffer space.
45
+ // We could print a message about this, but that'd get
46
+ // spammy if a lot of threads did it, so we just silently
47
+ // ignore any other assertion failures. In most cases the
48
+ // failures will all probably be analogous anyway.
49
+ return;
50
+ }
51
+
52
+ // Write information about the assertion failure to memory.
53
+ // Note that this occurs only after the `assertion_count`
54
+ // increment broadcasts that there's been a problem.
55
+ auto& self = assertions_data->assertions[nid];
56
+ dstrcpy(self.assertion_msg, assertion_msg);
57
+ dstrcpy(self.filename, filename);
58
+ dstrcpy(self.function_name, function_name);
59
+ self.line_number = line_number;
60
+ self.caller = caller;
61
+ self.block_id[0] = block_id.x;
62
+ self.block_id[1] = block_id.y;
63
+ self.block_id[2] = block_id.z;
64
+ self.thread_id[0] = thread_id.x;
65
+ self.thread_id[1] = thread_id.y;
66
+ self.thread_id[2] = thread_id.z;
67
+ }
68
+
69
+ // Emulates a kernel assertion. The assertion won't stop the kernel's progress,
70
+ // so you should assume everything the kernel produces is garbage if there's an
71
+ // assertion failure.
72
+ // NOTE: This assumes that `assertions_data` and `assertion_caller_id` are
73
+ // arguments of the kernel and therefore accessible.
74
+ #define CUDA_KERNEL_ASSERT2(condition) \
75
+ do { \
76
+ if (C10_UNLIKELY(!(condition))) { \
77
+ /* Has an atomic element so threads can fail at the same time */ \
78
+ c10::cuda::dsa_add_new_assertion_failure( \
79
+ assertions_data, \
80
+ C10_STRINGIZE(condition), \
81
+ __FILE__, \
82
+ __FUNCTION__, \
83
+ __LINE__, \
84
+ assertion_caller_id, \
85
+ blockIdx, \
86
+ threadIdx); \
87
+ /* Now that the kernel has failed we early exit the kernel, but */ \
88
+ /* otherwise keep going and rely on the host to check UVM and */ \
89
+ /* determine we've had a problem */ \
90
+ return; \
91
+ } \
92
+ } while (false)
93
+ #else
94
+ #define CUDA_KERNEL_ASSERT2(condition) assert(condition)
95
+ #endif
96
+
97
+ } // namespace cuda
98
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/CUDADeviceAssertionHost.h ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ #ifdef USE_CUDA
11
+ #define TORCH_USE_CUDA_DSA
12
+ #endif
13
+
14
+ /// Number of assertion failure messages we can store. If this is too small
15
+ /// threads will fail silently.
16
+ constexpr int C10_CUDA_DSA_ASSERTION_COUNT = 10;
17
+ constexpr int C10_CUDA_DSA_MAX_STR_LEN = 512;
18
+
19
+ namespace c10 {
20
+ namespace cuda {
21
+
22
+ /// Holds information about any device-side assertions that fail.
23
+ /// Held in managed memory and access by both the CPU and the GPU.
24
+ struct DeviceAssertionData {
25
+ /// Stringification of the assertion
26
+ char assertion_msg[C10_CUDA_DSA_MAX_STR_LEN];
27
+ /// File the assertion was in
28
+ char filename[C10_CUDA_DSA_MAX_STR_LEN];
29
+ /// Name of the function the assertion was in
30
+ char function_name[C10_CUDA_DSA_MAX_STR_LEN];
31
+ /// Line number the assertion was at
32
+ int line_number;
33
+ /// Number uniquely identifying the kernel launch that triggered the assertion
34
+ uint32_t caller;
35
+ /// block_id of the thread that failed the assertion
36
+ int32_t block_id[3];
37
+ /// third_id of the thread that failed the assertion
38
+ int32_t thread_id[3];
39
+ };
40
+
41
+ /// Used to hold assertions generated by the device
42
+ /// Held in managed memory and access by both the CPU and the GPU.
43
+ struct DeviceAssertionsData {
44
+ /// Total number of assertions found; a subset of thse will be recorded
45
+ /// in `assertions`
46
+ int32_t assertion_count;
47
+ /// An array of assertions that will be written to in a race-free manner
48
+ DeviceAssertionData assertions[C10_CUDA_DSA_ASSERTION_COUNT];
49
+ };
50
+
51
+ /// Use to hold info about kernel launches so that we can run kernels
52
+ /// asynchronously and still associate launches with device-side
53
+ /// assertion failures
54
+ struct CUDAKernelLaunchInfo {
55
+ /// Filename of the code where the kernel was launched from
56
+ const char* launch_filename;
57
+ /// Function from which the kernel was launched
58
+ const char* launch_function;
59
+ /// Line number of where the code was launched from
60
+ uint32_t launch_linenum;
61
+ /// Backtrace of where the kernel was launched from, only populated if
62
+ /// CUDAKernelLaunchRegistry::gather_launch_stacktrace is True
63
+ std::string launch_stacktrace;
64
+ /// Kernel that was launched
65
+ const char* kernel_name;
66
+ /// Device the kernel was launched on
67
+ int device;
68
+ /// Stream the kernel was launched on
69
+ int32_t stream;
70
+ /// A number that uniquely identifies the kernel launch
71
+ uint64_t generation_number;
72
+ };
73
+
74
+ /// Circular buffer used to hold information about kernel launches
75
+ /// this is later used to reconstruct how a device-side kernel assertion failure
76
+ /// occurred CUDAKernelLaunchRegistry is used as a singleton
77
+ class C10_CUDA_API CUDAKernelLaunchRegistry {
78
+ private:
79
+ /// Assume that this is the max number of kernel launches that might ever be
80
+ /// enqueued across all streams on a single device
81
+ static constexpr int max_kernel_launches = 1024;
82
+ /// How many kernel launch infos we've inserted. Used to ensure that circular
83
+ /// queue doesn't provide false information by always increasing, but also to
84
+ /// mark where we are inserting into the queue
85
+ #ifdef TORCH_USE_CUDA_DSA
86
+ uint64_t generation_number = 0;
87
+ #endif
88
+ /// Shared mutex between writer and accessor to ensure multi-threaded safety.
89
+ mutable std::mutex read_write_mutex;
90
+ /// Used to ensure prevent race conditions in GPU memory allocation
91
+ mutable std::mutex gpu_alloc_mutex;
92
+ /// Pointer to managed memory keeping track of device-side assertions. There
93
+ /// is one entry for each possible device the process might work with. Unused
94
+ /// entries are nullptrs. We could also use an unordered_set here, but this
95
+ /// vector design will be faster and the wasted memory is small since we
96
+ /// expect the number of GPUs per node will always be small
97
+ std::vector<
98
+ std::unique_ptr<DeviceAssertionsData, void (*)(DeviceAssertionsData*)>>
99
+ uvm_assertions;
100
+ /// A single circular buffer holds information about every kernel launch the
101
+ /// process makes across all devices.
102
+ std::vector<CUDAKernelLaunchInfo> kernel_launches;
103
+ bool check_env_for_enable_launch_stacktracing() const;
104
+ bool check_env_for_dsa_enabled() const;
105
+
106
+ public:
107
+ CUDAKernelLaunchRegistry();
108
+ /// Register a new kernel launch and obtain a generation number back to be
109
+ /// passed to the kernel
110
+ uint32_t insert(
111
+ const char* launch_filename,
112
+ const char* launch_function,
113
+ const uint32_t launch_linenum,
114
+ const char* kernel_name,
115
+ const int32_t stream_id);
116
+ /// Get copies of the kernel launch registry and each device's assertion
117
+ /// failure buffer so they can be inspected without raising race conditions
118
+ std::
119
+ pair<std::vector<DeviceAssertionsData>, std::vector<CUDAKernelLaunchInfo>>
120
+ snapshot() const;
121
+ /// Get a pointer to the current device's assertion failure buffer. If no such
122
+ /// buffer exists then one is created. This means that the first kernel launch
123
+ /// made on each device will be slightly slower because memory allocations are
124
+ /// required
125
+ DeviceAssertionsData* get_uvm_assertions_ptr_for_current_device();
126
+ /// Gets the global singleton of the registry
127
+ static CUDAKernelLaunchRegistry& get_singleton_ref();
128
+ /// If not all devices support DSA, we disable it
129
+ const bool do_all_devices_support_managed_memory = false;
130
+ /// Whether or not to gather stack traces when launching kernels
131
+ bool gather_launch_stacktrace = false;
132
+ /// Whether or not host-side DSA is enabled or disabled at run-time
133
+ /// Note: Device-side code cannot be enabled/disabled at run-time
134
+ bool enabled_at_runtime = false;
135
+ /// Whether or not a device has indicated a failure
136
+ bool has_failed() const;
137
+ #ifdef TORCH_USE_CUDA_DSA
138
+ const bool enabled_at_compile_time = true;
139
+ #else
140
+ const bool enabled_at_compile_time = false;
141
+ #endif
142
+ };
143
+
144
+ std::string c10_retrieve_device_side_assertion_info();
145
+
146
+ } // namespace cuda
147
+ } // namespace c10
148
+
149
+ // Each kernel launched with TORCH_DSA_KERNEL_LAUNCH
150
+ // requires the same input arguments. We introduce the following macro to
151
+ // standardize these.
152
+ #define TORCH_DSA_KERNEL_ARGS \
153
+ [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, \
154
+ [[maybe_unused]] uint32_t assertion_caller_id
155
+
156
+ // This macro can be used to pass the DSA arguments onward to another
157
+ // function
158
+ #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id