Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h +349 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h +9 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h +15 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h +170 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h +126 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h +124 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h +103 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h +321 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h +129 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h +173 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CPUBlas.h +180 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h +45 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h +34 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ConvolutionMM3d.h +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h +20 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h +229 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h +315 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h +20 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h +385 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h +518 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h +139 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h +20 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h +298 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h +109 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h +16 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h +160 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Math.h +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h +71 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h +157 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MaxPooling.h +42 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h +27 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h +62 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h +47 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h +340 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h +69 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h +12 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h +16 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h +448 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h +40 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h +48 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h +172 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h +75 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h +128 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <string>
|
4 |
+
|
5 |
+
#include <ATen/cuda/CUDAContext.h>
|
6 |
+
#include <ATen/cuda/Exceptions.h>
|
7 |
+
|
8 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
9 |
+
#include <ATen/cudnn/Utils.h>
|
10 |
+
#include <ATen/core/Tensor.h>
|
11 |
+
#include <ATen/TensorUtils.h>
|
12 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
13 |
+
#include <cuda.h>
|
14 |
+
|
15 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
16 |
+
#include <ATen/Functions.h>
|
17 |
+
#else
|
18 |
+
#include <ATen/ops/empty.h>
|
19 |
+
#endif
|
20 |
+
|
21 |
+
namespace at { namespace native {
|
22 |
+
|
23 |
+
std::string cudnnTypeToString(cudnnDataType_t dtype);
|
24 |
+
|
25 |
+
// TODO: Add constructors for all of the descriptors
|
26 |
+
|
27 |
+
inline int dataSize(cudnnDataType_t dataType)
|
28 |
+
{
|
29 |
+
switch (dataType) {
|
30 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200
|
31 |
+
case CUDNN_DATA_BFLOAT16:
|
32 |
+
#endif
|
33 |
+
case CUDNN_DATA_HALF: return 2;
|
34 |
+
case CUDNN_DATA_FLOAT: return 4;
|
35 |
+
default: return 8;
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
// The stride for a size-1 dimensions is not uniquely determined; in
|
40 |
+
// fact, it can be anything you want, because the fact that the
|
41 |
+
// tensor is size 1 at this dimension means that you will never actually
|
42 |
+
// try advancing your pointer by this stride.
|
43 |
+
//
|
44 |
+
// However, CuDNN has a much more stringent requirement on strides:
|
45 |
+
// if you are passing a contiguous input, it better be the case
|
46 |
+
// that the stride for dim i is the product of the sizes of dims
|
47 |
+
// i+1 to the end. This stride is indeed uniquely determined. This
|
48 |
+
// function modifies 'stride' in place so this invariant holds.
|
49 |
+
template <typename T>
|
50 |
+
static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) {
|
51 |
+
int64_t z = 1;
|
52 |
+
int index = 0;
|
53 |
+
std::vector<int> permutation(dim);
|
54 |
+
|
55 |
+
if (nhwc) {
|
56 |
+
permutation[index++] = 1;
|
57 |
+
}
|
58 |
+
for (int d = dim-1; d > 1; d--) {
|
59 |
+
permutation[index++] = d;
|
60 |
+
}
|
61 |
+
if (!nhwc) {
|
62 |
+
permutation[index++] = 1;
|
63 |
+
}
|
64 |
+
permutation[index++] = 0;
|
65 |
+
for (int d : permutation) {
|
66 |
+
if (size[d] == 1) {
|
67 |
+
stride[d] = z;
|
68 |
+
} else {
|
69 |
+
z *= size[d];
|
70 |
+
}
|
71 |
+
}
|
72 |
+
}
|
73 |
+
|
74 |
+
template <typename T, cudnnStatus_t (*dtor)(T*)>
|
75 |
+
struct DescriptorDeleter {
|
76 |
+
void operator()(T* x) {
|
77 |
+
if (x != nullptr) {
|
78 |
+
AT_CUDNN_CHECK(dtor(x));
|
79 |
+
}
|
80 |
+
}
|
81 |
+
};
|
82 |
+
|
83 |
+
// A generic class for wrapping cuDNN descriptor types. All you need
|
84 |
+
// is to give the underlying type the Descriptor_t points to (usually,
|
85 |
+
// if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct),
|
86 |
+
// the constructor and the destructor. Subclasses are responsible
|
87 |
+
// for defining a set() function to actually set the descriptor.
|
88 |
+
//
|
89 |
+
// Descriptors default construct to a nullptr, and have a descriptor
|
90 |
+
// initialized the first time you call set() or any other initializing
|
91 |
+
// function.
|
92 |
+
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
|
93 |
+
class TORCH_CUDA_CPP_API Descriptor {
|
94 |
+
public:
|
95 |
+
// TODO: Figure out why const-correctness doesn't work here
|
96 |
+
|
97 |
+
// Use desc() to access the underlying descriptor pointer in
|
98 |
+
// a read-only fashion. Most client code should use this.
|
99 |
+
// If the descriptor was never initialized, this will return
|
100 |
+
// nullptr.
|
101 |
+
T* desc() const { return desc_.get(); }
|
102 |
+
T* desc() { return desc_.get(); }
|
103 |
+
|
104 |
+
// Use mut_desc() to access the underlying descriptor pointer
|
105 |
+
// if you intend to modify what it points to (e.g., using
|
106 |
+
// cudnnSetFooDescriptor). This will ensure that the descriptor
|
107 |
+
// is initialized. Code in this file will use this function.
|
108 |
+
T* mut_desc() { init(); return desc_.get(); }
|
109 |
+
protected:
|
110 |
+
void init() {
|
111 |
+
if (desc_ == nullptr) {
|
112 |
+
T* raw_desc;
|
113 |
+
AT_CUDNN_CHECK(ctor(&raw_desc));
|
114 |
+
desc_.reset(raw_desc);
|
115 |
+
}
|
116 |
+
}
|
117 |
+
private:
|
118 |
+
std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
|
119 |
+
};
|
120 |
+
|
121 |
+
class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor<
|
122 |
+
cudnnTensorStruct,
|
123 |
+
&cudnnCreateTensorDescriptor,
|
124 |
+
&cudnnDestroyTensorDescriptor> {
|
125 |
+
public:
|
126 |
+
TensorDescriptor() = default;
|
127 |
+
explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
|
128 |
+
set(t, pad);
|
129 |
+
}
|
130 |
+
|
131 |
+
// Note [CuDNN broadcast padding]
|
132 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
133 |
+
// pad specifies the minimum dimensionality of the tensor descriptor
|
134 |
+
// we produce (it doesn't have anything to do with, e.g., convolution
|
135 |
+
// padding). If 't' is lower-dimensional than 'pad', the remaining
|
136 |
+
// dimensions (on the right) are padded with ones. This doesn't
|
137 |
+
// affect the underlying data layout. This is particularly useful for
|
138 |
+
// dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is
|
139 |
+
// done in two steps: first, the client code is expected to pad out
|
140 |
+
// (the dimensions) input tensors to be the same dimension as the
|
141 |
+
// target broadcast, and then second, CuDNN takes of actually
|
142 |
+
// broadcasting size 1 dimensions.
|
143 |
+
|
144 |
+
void set(const at::Tensor &t, size_t pad = 0);
|
145 |
+
void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0);
|
146 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
147 |
+
|
148 |
+
void print();
|
149 |
+
|
150 |
+
private:
|
151 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc);
|
152 |
+
|
153 |
+
void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) {
|
154 |
+
fixSizeOneDimStride<int>(dim, size, stride, nhwc);
|
155 |
+
AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride));
|
156 |
+
}
|
157 |
+
};
|
158 |
+
|
159 |
+
std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
|
160 |
+
|
161 |
+
class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor<
|
162 |
+
cudnnFilterStruct,
|
163 |
+
&cudnnCreateFilterDescriptor,
|
164 |
+
&cudnnDestroyFilterDescriptor> {
|
165 |
+
public:
|
166 |
+
void set(const at::Tensor &t, int64_t pad = 0) {
|
167 |
+
set(t, at::MemoryFormat::Contiguous, pad);
|
168 |
+
}
|
169 |
+
|
170 |
+
void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
|
171 |
+
|
172 |
+
void print();
|
173 |
+
private:
|
174 |
+
void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) {
|
175 |
+
AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size));
|
176 |
+
}
|
177 |
+
};
|
178 |
+
|
179 |
+
std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d);
|
180 |
+
|
181 |
+
struct TORCH_CUDA_CPP_API ConvolutionDescriptor
|
182 |
+
: public Descriptor<
|
183 |
+
cudnnConvolutionStruct,
|
184 |
+
&cudnnCreateConvolutionDescriptor,
|
185 |
+
&cudnnDestroyConvolutionDescriptor> {
|
186 |
+
void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) {
|
187 |
+
cudnnDataType_t mathType = dataType;
|
188 |
+
if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT;
|
189 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale,
|
190 |
+
CUDNN_CROSS_CORRELATION, mathType));
|
191 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups));
|
192 |
+
// See Note [behavior of cudnnFind and cudnnGet]
|
193 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH));
|
194 |
+
if(dataType == CUDNN_DATA_HALF) {
|
195 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH));
|
196 |
+
} else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) {
|
197 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000
|
198 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH));
|
199 |
+
#endif
|
200 |
+
}
|
201 |
+
}
|
202 |
+
};
|
203 |
+
|
204 |
+
struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
|
205 |
+
: public Descriptor<
|
206 |
+
cudnnSpatialTransformerStruct,
|
207 |
+
&cudnnCreateSpatialTransformerDescriptor,
|
208 |
+
&cudnnDestroySpatialTransformerDescriptor> {
|
209 |
+
void set(cudnnDataType_t dataType, int dim, int* size) {
|
210 |
+
AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size));
|
211 |
+
}
|
212 |
+
};
|
213 |
+
|
214 |
+
struct TORCH_CUDA_CPP_API DropoutDescriptor
|
215 |
+
: public Descriptor<
|
216 |
+
cudnnDropoutStruct,
|
217 |
+
&cudnnCreateDropoutDescriptor,
|
218 |
+
&cudnnDestroyDropoutDescriptor> {
|
219 |
+
at::Tensor state;
|
220 |
+
|
221 |
+
// Initialize a dropout descriptor's RNG state.
|
222 |
+
// WARNING: This function is very expensive, avoid calling this function!
|
223 |
+
void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) {
|
224 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
225 |
+
size_t state_size;
|
226 |
+
AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size));
|
227 |
+
AT_ASSERT(options.device().type() == kCUDA);
|
228 |
+
AT_ASSERT(options.dtype() == kByte);
|
229 |
+
state = at::empty({static_cast<int64_t>(state_size)}, options);
|
230 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed));
|
231 |
+
}
|
232 |
+
|
233 |
+
// Restore a dropout descriptor given a dropout probability and existing RNG state.
|
234 |
+
void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
|
235 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
236 |
+
state = state_;
|
237 |
+
void *state_ptr = state.data_ptr();
|
238 |
+
size_t state_size = state.size(0);
|
239 |
+
// NB: The seed doesn't actually matter, so we give a dummy value
|
240 |
+
AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */));
|
241 |
+
}
|
242 |
+
|
243 |
+
// Restore a dropout descriptor corresponding to no dropout
|
244 |
+
void set_no_dropout(cudnnHandle_t handle) {
|
245 |
+
// NB: seed doesn't matter when dropout = 0, because no random number
|
246 |
+
// initialization actually takes place when there is no dropout.
|
247 |
+
// NB: Empirically, cudnnSetDropoutDescriptor is cheap when
|
248 |
+
// dropout == 0
|
249 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */));
|
250 |
+
}
|
251 |
+
};
|
252 |
+
|
253 |
+
struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor<
|
254 |
+
cudnnRNNStruct,
|
255 |
+
&cudnnCreateRNNDescriptor,
|
256 |
+
&cudnnDestroyRNNDescriptor> {
|
257 |
+
DropoutDescriptor dropout_desc_;
|
258 |
+
void set(cudnnHandle_t handle, int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc,
|
259 |
+
cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional,
|
260 |
+
cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) {
|
261 |
+
dropout_desc_ = std::move(dropout_desc);
|
262 |
+
|
263 |
+
AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
|
264 |
+
handle,
|
265 |
+
mut_desc(),
|
266 |
+
hidden_size,
|
267 |
+
num_layers,
|
268 |
+
dropout_desc_.desc(),
|
269 |
+
input_mode,
|
270 |
+
bidirectional,
|
271 |
+
mode,
|
272 |
+
algo,
|
273 |
+
datatype));
|
274 |
+
if (proj_size != 0) {
|
275 |
+
AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers(
|
276 |
+
handle,
|
277 |
+
/*rnnDesc=*/mut_desc(),
|
278 |
+
/*recProjSize=*/proj_size,
|
279 |
+
/*outProjSize=*/0));
|
280 |
+
}
|
281 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
282 |
+
if (prop->major >= 7) {
|
283 |
+
if (input_type == CUDNN_DATA_HALF) {
|
284 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH);
|
285 |
+
}
|
286 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000
|
287 |
+
else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) {
|
288 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH);
|
289 |
+
}
|
290 |
+
#endif
|
291 |
+
else {
|
292 |
+
// Technically, as the default it's not necessary to explicitly
|
293 |
+
// set this.
|
294 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH);
|
295 |
+
}
|
296 |
+
}
|
297 |
+
}
|
298 |
+
};
|
299 |
+
|
300 |
+
struct TORCH_CUDA_CPP_API CTCLossDescriptor
|
301 |
+
: public Descriptor<
|
302 |
+
cudnnCTCLossStruct,
|
303 |
+
&cudnnCreateCTCLossDescriptor,
|
304 |
+
&cudnnDestroyCTCLossDescriptor> {
|
305 |
+
void set(cudnnDataType_t datatype) {
|
306 |
+
AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype));
|
307 |
+
}
|
308 |
+
#if CUDNN_VERSION >= 7600
|
309 |
+
void setEx(
|
310 |
+
cudnnDataType_t datatype,
|
311 |
+
cudnnLossNormalizationMode_t normMode,
|
312 |
+
cudnnNanPropagation_t gradMode) {
|
313 |
+
AT_CUDNN_CHECK(
|
314 |
+
cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode));
|
315 |
+
}
|
316 |
+
#endif
|
317 |
+
};
|
318 |
+
|
319 |
+
struct TORCH_CUDA_CPP_API ActivationDescriptor
|
320 |
+
: public Descriptor<
|
321 |
+
cudnnActivationStruct,
|
322 |
+
&cudnnCreateActivationDescriptor,
|
323 |
+
&cudnnDestroyActivationDescriptor> {
|
324 |
+
void set(cudnnActivationMode_t mode) {
|
325 |
+
AT_ASSERT(
|
326 |
+
mode == CUDNN_ACTIVATION_RELU,
|
327 |
+
"TODO: support more cuDNN activation modes");
|
328 |
+
AT_CUDNN_CHECK(cudnnSetActivationDescriptor(
|
329 |
+
mut_desc(),
|
330 |
+
mode,
|
331 |
+
cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
|
332 |
+
std::numeric_limits<double>::max()));
|
333 |
+
}
|
334 |
+
};
|
335 |
+
|
336 |
+
union Constant
|
337 |
+
{
|
338 |
+
float f;
|
339 |
+
double d;
|
340 |
+
Constant(cudnnDataType_t dataType, double value) {
|
341 |
+
if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
|
342 |
+
f = static_cast<float>(value);
|
343 |
+
} else {
|
344 |
+
d = value;
|
345 |
+
}
|
346 |
+
}
|
347 |
+
};
|
348 |
+
|
349 |
+
}} // namespace
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
4 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
5 |
+
|
6 |
+
namespace at { namespace native {
|
7 |
+
|
8 |
+
TORCH_CUDA_CPP_API cudnnHandle_t getCudnnHandle();
|
9 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
4 |
+
#include <ATen/Tensor.h>
|
5 |
+
|
6 |
+
namespace at { namespace native {
|
7 |
+
|
8 |
+
TORCH_CUDA_CPP_API cudnnDataType_t
|
9 |
+
getCudnnDataTypeFromScalarType(const at::ScalarType dtype);
|
10 |
+
cudnnDataType_t getCudnnDataType(const at::Tensor& tensor);
|
11 |
+
|
12 |
+
int64_t cudnn_version();
|
13 |
+
|
14 |
+
}} // namespace at::cudnn
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/cuda/Exceptions.h>
|
5 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
6 |
+
#include <ATen/cudnn/Handle.h>
|
7 |
+
|
8 |
+
namespace at { namespace native {
|
9 |
+
|
10 |
+
// cuDNN has a buggy check for tensor being contiguous (that is, it does
|
11 |
+
// not ignore stride for dimension that is equal to 0). This function
|
12 |
+
// makes tensors which have zero stride contiguous, by setting the
|
13 |
+
// strides to 1 as cuDNN likes.
|
14 |
+
inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
|
15 |
+
for (auto s : t.strides()) {
|
16 |
+
if (s == 0) return t.contiguous();
|
17 |
+
}
|
18 |
+
return t;
|
19 |
+
}
|
20 |
+
|
21 |
+
}}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cudnn.h>
|
4 |
+
|
5 |
+
#define STRINGIFY(x) #x
|
6 |
+
#define STRING(x) STRINGIFY(x)
|
7 |
+
|
8 |
+
#if CUDNN_MAJOR < 6
|
9 |
+
#pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0")
|
10 |
+
#pragma message "We strongly encourage you to move to 6.0 and above."
|
11 |
+
#pragma message "This message is intended to annoy you enough to update."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#undef STRINGIFY
|
15 |
+
#undef STRING
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
//
|
4 |
+
// This source code is licensed under the BSD-style license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
#pragma once
|
8 |
+
|
9 |
+
#include <bitset>
|
10 |
+
#include <utility>
|
11 |
+
|
12 |
+
#include <ATen/ArrayRef.h>
|
13 |
+
#include <ATen/SmallVector.h>
|
14 |
+
#include <ATen/Tensor.h>
|
15 |
+
|
16 |
+
namespace at::functorch {
|
17 |
+
|
18 |
+
using Tensor = at::Tensor;
|
19 |
+
|
20 |
+
// We assume this in a few other places in the codebase,
|
21 |
+
// but there isn't a centralized definition.
|
22 |
+
constexpr int64_t kVmapMaxTensorDims = 64;
|
23 |
+
|
24 |
+
// The valid vmap levels range from [0, 64). This effectively means that we
|
25 |
+
// support a maximum of 64 nested vmaps.
|
26 |
+
constexpr int64_t kVmapNumLevels = 64;
|
27 |
+
|
28 |
+
// Store this number of elements of BatchDims on the stack. Most people will
|
29 |
+
// probably use <= 5 nested vmaps, but adjust this number as necessary.
|
30 |
+
constexpr int64_t kBatchDimsStackSize = 5;
|
31 |
+
|
32 |
+
// A BatchedTensorImpl holds an underlying Tensor and a single batch dim
|
33 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
34 |
+
// BatchedTensorImpl.
|
35 |
+
//
|
36 |
+
// The batch dimensions are treated as being "private"; they are not user-visible.
|
37 |
+
// For example, in the following Tensor,
|
38 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
|
39 |
+
// dimension 0 is batch dimension.
|
40 |
+
//
|
41 |
+
// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
|
42 |
+
// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor.
|
43 |
+
struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
|
44 |
+
explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level);
|
45 |
+
|
46 |
+
// Returns batch dimension of this tensor
|
47 |
+
int64_t bdim() const { return bdim_; }
|
48 |
+
|
49 |
+
// Returns batch dimension of this tensor
|
50 |
+
int64_t level() const { return level_; }
|
51 |
+
|
52 |
+
// BatchedTensorImpl wraps a Tensor
|
53 |
+
const Tensor& value() const { return value_; }
|
54 |
+
|
55 |
+
// Given a public dimension index, return the dimension index in the underlying
|
56 |
+
// value() tensor.
|
57 |
+
// For example, if we have
|
58 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
|
59 |
+
// bt.actualDim(0) -> 1
|
60 |
+
// bt.actualDim(1) -> 2
|
61 |
+
// bt.actualDim(2) -> 3
|
62 |
+
// bt.actualDim(3) -> Error
|
63 |
+
int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
|
64 |
+
|
65 |
+
IntArrayRef sizes_custom() const override;
|
66 |
+
SymIntArrayRef sym_sizes_custom() const override;
|
67 |
+
int64_t size_custom(int64_t d) const override;
|
68 |
+
c10::SymInt sym_size_custom(int64_t d) const override;
|
69 |
+
// We have to override this because we opted into CustomStrides
|
70 |
+
IntArrayRef strides_custom() const override;
|
71 |
+
SymIntArrayRef sym_strides_custom() const override;
|
72 |
+
// Override a bunch of methods inherited from TensorImpl to return error messages.
|
73 |
+
bool is_contiguous_custom(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const override;
|
74 |
+
void set_size(int64_t dim, int64_t new_size) override;
|
75 |
+
void set_stride(int64_t dim, int64_t new_stride) override;
|
76 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
77 |
+
const c10::VariableVersion& version_counter,
|
78 |
+
bool allow_tensor_metadata_change) const override;
|
79 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
80 |
+
c10::VariableVersion&& version_counter,
|
81 |
+
bool allow_tensor_metadata_change) const override;
|
82 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
|
83 |
+
#ifdef DEBUG
|
84 |
+
bool has_storage() const override;
|
85 |
+
#endif
|
86 |
+
|
87 |
+
void refreshTensorMetadata();
|
88 |
+
|
89 |
+
// Used in torchdim. torchdim uses non-lexical BatchedTensor; the way it
|
90 |
+
// accomplishes this is a hack where it is able to modify the levels of
|
91 |
+
// BatchedTensor to match the level of the current vmap transform.
|
92 |
+
void _unsafe_set_level(int64_t level) {
|
93 |
+
level_ = level;
|
94 |
+
}
|
95 |
+
|
96 |
+
// Used in batching rule for in-place view operations that can change
|
97 |
+
// the index of the bdim (think squeeze_, unsqueeze_)
|
98 |
+
void unsafe_set_bdim(int64_t bdim) {
|
99 |
+
// NB: you MUST call refreshTensorMetadata after doing this.
|
100 |
+
bdim_ = bdim;
|
101 |
+
}
|
102 |
+
private:
|
103 |
+
// see NOTE: [BatchedTensorImpl levels invariant]
|
104 |
+
void checkInvariants() const;
|
105 |
+
const char* tensorimpl_type_name() const override;
|
106 |
+
|
107 |
+
Tensor value_;
|
108 |
+
|
109 |
+
int64_t level_;
|
110 |
+
int64_t bdim_;
|
111 |
+
};
|
112 |
+
|
113 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
114 |
+
// BatchedTensorImpl.
|
115 |
+
inline bool isBatchedTensor(const Tensor& tensor) {
|
116 |
+
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::FuncTorchBatched) ||
|
117 |
+
tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::BatchedNestedTensor);
|
118 |
+
}
|
119 |
+
|
120 |
+
// It is unsafe to call this on a Tensor that is not backed by a
|
121 |
+
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
|
122 |
+
inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) {
|
123 |
+
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
124 |
+
}
|
125 |
+
|
126 |
+
inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) {
|
127 |
+
if (!isBatchedTensor(tensor)) {
|
128 |
+
return nullptr;
|
129 |
+
}
|
130 |
+
return unsafeGetBatchedImpl(std::move(tensor));
|
131 |
+
}
|
132 |
+
|
133 |
+
// Returns a bitset. If bit i is set, then that means dim i is a batchdim.
|
134 |
+
inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(int64_t dim) {
|
135 |
+
std::bitset<kVmapMaxTensorDims> is_bdim;
|
136 |
+
is_bdim.set(dim);
|
137 |
+
return is_bdim;
|
138 |
+
}
|
139 |
+
|
140 |
+
// Creates a bitset for the given level
|
141 |
+
inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(int64_t level) {
|
142 |
+
std::bitset<kVmapNumLevels> result;
|
143 |
+
result.set(level);
|
144 |
+
return result;
|
145 |
+
}
|
146 |
+
|
147 |
+
// Use this to construct a BatchedTensor from a regular Tensor
|
148 |
+
TORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level);
|
149 |
+
|
150 |
+
// Adds a batch dim to `tensor`, returning a BatchedTensor
|
151 |
+
TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level);
|
152 |
+
|
153 |
+
// Certain dispatch keys must be propagated to the BatchedTensor (or, in general,
|
154 |
+
// any wrapper Tensor subclasses). This is because there are methods on Tensor
|
155 |
+
// that skip dispatch and check for the presence of a dispatch key (e.g. is_cpu()).
|
156 |
+
// TODO: should probably contain more (or all?) backend keys
|
157 |
+
constexpr DispatchKeySet kKeysToPropagateToWrapper({
|
158 |
+
DispatchKey::Negative,
|
159 |
+
DispatchKey::Conjugate,
|
160 |
+
DispatchKey::XLA,
|
161 |
+
DispatchKey::CUDA,
|
162 |
+
DispatchKey::CPU,
|
163 |
+
});
|
164 |
+
|
165 |
+
inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) {
|
166 |
+
auto key_set = tensor.unsafeGetTensorImpl()->key_set();
|
167 |
+
return key_set & kKeysToPropagateToWrapper;
|
168 |
+
}
|
169 |
+
|
170 |
+
} // namespace at::functorch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
//
|
4 |
+
// This source code is licensed under the BSD-style license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
#pragma once
|
8 |
+
#include <ATen/Tensor.h>
|
9 |
+
#include <ATen/VmapGeneratedPlumbing.h>
|
10 |
+
|
11 |
+
// This file contains template metaprogramming things that are used for our
|
12 |
+
// batching rules.
|
13 |
+
//
|
14 |
+
// See NOTE: [vmap plumbing] for more details on why this is necessary.
|
15 |
+
// The plumbing has a bunch of metaprogramming hacks for determining the signature
|
16 |
+
// of a batching rule from the signature of the operator, many of which use the
|
17 |
+
// helper functions in this file.
|
18 |
+
|
19 |
+
namespace at::functorch {
|
20 |
+
|
21 |
+
// Metaprogramming things
|
22 |
+
template <class... Items> using typelist = c10::guts::typelist::typelist<Items...>;
|
23 |
+
template <class TypeList> using head_t = c10::guts::typelist::head_t<TypeList>;
|
24 |
+
template <class TL1, class TL2> using concat_t = c10::guts::typelist::concat_t<TL1, TL2>;
|
25 |
+
template <typename T> class debug_t;
|
26 |
+
|
27 |
+
// tail operation
|
28 |
+
template<class TypeList>
|
29 |
+
struct tail final {
|
30 |
+
static_assert(c10::guts::false_t<TypeList>::value,
|
31 |
+
"In typelist::tail<T>, the T argument must be typelist<...>.");
|
32 |
+
};
|
33 |
+
template<class Head, class... Tail>
|
34 |
+
struct tail<typelist<Head, Tail...>> final {
|
35 |
+
using type = typelist<Tail...>;
|
36 |
+
};
|
37 |
+
template<class TypeList> using tail_t = typename tail<TypeList>::type;
|
38 |
+
|
39 |
+
template <class First, class Second, class Next, class Tail>
|
40 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext {
|
41 |
+
using type = Next;
|
42 |
+
};
|
43 |
+
template <class Next, class Tail>
|
44 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor, optional<int64_t>, Next, Tail> {
|
45 |
+
using type = Tail;
|
46 |
+
};
|
47 |
+
template <class Next, class Tail>
|
48 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const Tensor&, optional<int64_t>, Next, Tail> {
|
49 |
+
using type = Tail;
|
50 |
+
};
|
51 |
+
template <class Next, class Tail>
|
52 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor&, optional<int64_t>, Next, Tail> {
|
53 |
+
using type = Tail;
|
54 |
+
};
|
55 |
+
template <class Next, class Tail>
|
56 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>, optional<int64_t>, Next, Tail> {
|
57 |
+
using type = Tail;
|
58 |
+
};
|
59 |
+
template <class Next, class Tail>
|
60 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const optional<Tensor>&, optional<int64_t>, Next, Tail> {
|
61 |
+
using type = Tail;
|
62 |
+
};
|
63 |
+
template <class Next, class Tail>
|
64 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>&, optional<int64_t>, Next, Tail> {
|
65 |
+
using type = Tail;
|
66 |
+
};
|
67 |
+
template <class Next, class Tail>
|
68 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::vector<Tensor>, optional<int64_t>, Next, Tail> {
|
69 |
+
using type = Tail;
|
70 |
+
};
|
71 |
+
template <class TypeList> struct RemoveBatchDimAfterTensor {
|
72 |
+
using first = head_t<TypeList>;
|
73 |
+
using next = tail_t<TypeList>;
|
74 |
+
using second = head_t<next>;
|
75 |
+
using tail = tail_t<next>;
|
76 |
+
|
77 |
+
using type = concat_t<
|
78 |
+
typelist<first>,
|
79 |
+
typename RemoveBatchDimAfterTensor<
|
80 |
+
typename IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<first, second, next, tail>::type
|
81 |
+
>::type
|
82 |
+
>;
|
83 |
+
};
|
84 |
+
template <class Type> struct RemoveBatchDimAfterTensor<typelist<Type>> {
|
85 |
+
using type = typelist<Type>;
|
86 |
+
};
|
87 |
+
template <> struct RemoveBatchDimAfterTensor<typelist<>> {
|
88 |
+
using type = typelist<>;
|
89 |
+
};
|
90 |
+
template<class TypeList> using remove_batch_dim_after_tensor_t = typename RemoveBatchDimAfterTensor<TypeList>::type;
|
91 |
+
|
92 |
+
template <typename T> struct UnpackSingleItemTuple {
|
93 |
+
using type = T;
|
94 |
+
};
|
95 |
+
template <typename T> struct UnpackSingleItemTuple<std::tuple<T>> {
|
96 |
+
using type = T;
|
97 |
+
};
|
98 |
+
template <typename T> using unpack_single_item_tuple_t = typename UnpackSingleItemTuple<T>::type;
|
99 |
+
|
100 |
+
template <typename Return, typename TupleArgs> struct BuildFunctionHelper;
|
101 |
+
template <typename Return, typename... Args> struct BuildFunctionHelper<Return, std::tuple<Args...>> {
|
102 |
+
using type = Return(Args...);
|
103 |
+
};
|
104 |
+
template <typename Return, typename TL>
|
105 |
+
struct BuildFunction {
|
106 |
+
using type = typename BuildFunctionHelper<Return, c10::guts::typelist::to_tuple_t<TL>>::type;
|
107 |
+
};
|
108 |
+
template <typename Return, typename TL> using build_function_t = typename BuildFunction<Return, TL>::type;
|
109 |
+
|
110 |
+
|
111 |
+
template <typename batch_rule_t> struct ToOperatorType {
|
112 |
+
using batch_rule_return_type = typename c10::guts::function_traits<batch_rule_t>::return_type;
|
113 |
+
using batch_rule_parameter_types = typename c10::guts::function_traits<batch_rule_t>::parameter_types;
|
114 |
+
|
115 |
+
using operator_parameter_types = remove_batch_dim_after_tensor_t<batch_rule_parameter_types>;
|
116 |
+
using operator_return_type =
|
117 |
+
unpack_single_item_tuple_t<
|
118 |
+
c10::guts::typelist::to_tuple_t<
|
119 |
+
remove_batch_dim_after_tensor_t<
|
120 |
+
c10::guts::typelist::from_tuple_t<batch_rule_return_type>>>>;
|
121 |
+
|
122 |
+
using type = build_function_t<operator_return_type, operator_parameter_types>;
|
123 |
+
};
|
124 |
+
template <typename batch_rule_t> using to_operator_t = typename ToOperatorType<batch_rule_t>::type;
|
125 |
+
|
126 |
+
} // namespace at::functorch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
//
|
4 |
+
// This source code is licensed under the BSD-style license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
#pragma once
|
8 |
+
#include <ATen/functorch/Macros.h>
|
9 |
+
#include <c10/core/DispatchKey.h>
|
10 |
+
#include <ATen/core/function_schema.h>
|
11 |
+
#include <c10/util/Optional.h>
|
12 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
13 |
+
#include <ATen/functorch/Interpreter.h>
|
14 |
+
#include <ATen/functorch/VmapInterpreter.h>
|
15 |
+
#include <ATen/functorch/ADInterpreters.h>
|
16 |
+
#include <ATen/functorch/FunctionalizeInterpreter.h>
|
17 |
+
|
18 |
+
// Forward declared
|
19 |
+
namespace c10 { struct AutogradMetaInterface; }
|
20 |
+
|
21 |
+
namespace at::functorch {
|
22 |
+
|
23 |
+
// This file contains the implementation of functorch's interpreter stack.
|
24 |
+
// See NOTE: [functorch interpreter stack] first before reading on.
|
25 |
+
//
|
26 |
+
// NB: the functorch interpreter stack is also referred to as:
|
27 |
+
// - the "dynamic layer stack" -- an older name for "interpreter" was
|
28 |
+
// "dynamic layer".
|
29 |
+
// - the "functorch mode stack". You can think of each functorch transform as a
|
30 |
+
// "mode" (in the same sense as torch_dispatch mode or torch_function mode),
|
31 |
+
// and functorch being an implementation of a "mode stack" where the modes
|
32 |
+
// may be arbitrary composed.
|
33 |
+
|
34 |
+
// DynamicLayer is basically the same thing as an Interpreter.
|
35 |
+
// It represents a functorch transform and it holds an Interpreter,
|
36 |
+
// which contains metadata related to the transform and instructions on
|
37 |
+
// how to perform the transform.
|
38 |
+
//
|
39 |
+
// TODO: we can excise DynamicLayer in favor of Interpreter,
|
40 |
+
// But I am going to leave it for now as a compatiblity shim to avoid
|
41 |
+
// needing to refactor a lot of callsites...
|
42 |
+
struct TORCH_API DynamicLayer {
|
43 |
+
explicit DynamicLayer(
|
44 |
+
TransformType transform_type,
|
45 |
+
int64_t layerId,
|
46 |
+
optional<c10::SymInt> batchSize = nullopt,
|
47 |
+
optional<RandomnessType> randomness = nullopt,
|
48 |
+
optional<bool> prev_grad_mode = nullopt,
|
49 |
+
optional<bool> pre_fwd_grad_mode = nullopt,
|
50 |
+
optional<bool> functionalize_add_back_views = nullopt);
|
51 |
+
|
52 |
+
TransformType key() const;
|
53 |
+
int64_t layerId() const;
|
54 |
+
|
55 |
+
const Interpreter& interpreter() const { return interpreter_; }
|
56 |
+
Interpreter& interpreter() { return interpreter_; }
|
57 |
+
|
58 |
+
// Only valid for vmap
|
59 |
+
c10::SymInt batchSize() const;
|
60 |
+
RandomnessType randomness() const;
|
61 |
+
|
62 |
+
private:
|
63 |
+
Interpreter interpreter_;
|
64 |
+
};
|
65 |
+
|
66 |
+
TORCH_API int64_t initAndPushDynamicLayer(
|
67 |
+
TransformType transform_type,
|
68 |
+
optional<c10::SymInt> batch_size = nullopt,
|
69 |
+
optional<RandomnessType> randomness = nullopt,
|
70 |
+
optional<bool> prev_grad_mode = nullopt,
|
71 |
+
optional<bool> prev_fwd_grad_mode = nullopt,
|
72 |
+
optional<bool> functionalize_add_back_views = nullopt);
|
73 |
+
TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata();
|
74 |
+
TORCH_API c10::optional<DynamicLayer> maybeCurrentDynamicLayer();
|
75 |
+
TORCH_API const std::vector<DynamicLayer>& getDynamicLayerStack();
|
76 |
+
TORCH_API void setDynamicLayerStack(const std::vector<DynamicLayer>& stack);
|
77 |
+
TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included);
|
78 |
+
|
79 |
+
// NOTE: [Life handles and lexically scoped transforms]
|
80 |
+
// functorch transforms are lexically scoped.
|
81 |
+
// Given a level, we store a "life handle" that is a boolean that tells us if the
|
82 |
+
// transform with that level is active or not.
|
83 |
+
//
|
84 |
+
// functorch's TensorWrapper (for grad transforms) stores a life handle.
|
85 |
+
// If a TensorWrapper escapes from the scope of the transform, then somehow
|
86 |
+
// it must know it escaped; it can tell by querying the life handle.
|
87 |
+
TORCH_API const std::shared_ptr<bool>& getLifeHandleForLevel(int64_t level);
|
88 |
+
|
89 |
+
// Returns if an operator is in-place. An operator is inplace if:
|
90 |
+
// 1. The first argument is a Tensor and it is being written to
|
91 |
+
// 2. The first argument is being returned
|
92 |
+
// 3. No other arguments are aliased
|
93 |
+
// Here is an example of an in-place operator:
|
94 |
+
// add_(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
|
95 |
+
TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema);
|
96 |
+
|
97 |
+
// Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped
|
98 |
+
TORCH_API c10::optional<size_t> findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input);
|
99 |
+
|
100 |
+
TORCH_API Tensor unwrapIfDead(const Tensor& tensor);
|
101 |
+
TORCH_API bool isDeadTensorWrapper(const Tensor& tensor);
|
102 |
+
|
103 |
+
// Pretty printers
|
104 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const DynamicLayer& layer);
|
105 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const std::vector<DynamicLayer>& dynamicLayerStack);
|
106 |
+
|
107 |
+
// While a functorch transform is active, torch.autograd.function._SingleLevelFunction
|
108 |
+
// is disabled by default. The following two APIs are APIs for enabling
|
109 |
+
// it. These are not user-facing APIs. We can delete this in the future, but
|
110 |
+
// it is useful for debugging when something goes wrong with the
|
111 |
+
// autograd.Function <> functorch interaction, which uses _SingleLevelFunction,
|
112 |
+
// because it leads to loud errors if something is incorrect.
|
113 |
+
TORCH_API void setSingleLevelAutogradFunctionAllowed(bool allowed);
|
114 |
+
TORCH_API bool getSingleLevelAutogradFunctionAllowed();
|
115 |
+
|
116 |
+
// While a functorch grad transform is active, Tensor.requires_grad_() gets
|
117 |
+
// disabled. These two functions are the mechanism to controlling that.
|
118 |
+
TORCH_API void setInplaceRequiresGradAllowed(bool allowed);
|
119 |
+
TORCH_API bool getInplaceRequiresGradAllowed();
|
120 |
+
|
121 |
+
TORCH_API DynamicLayer popDynamicLayer();
|
122 |
+
TORCH_API int64_t pushDynamicLayer(DynamicLayer&& layer);
|
123 |
+
|
124 |
+
} // namespace at::functorch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
//
|
4 |
+
// This source code is licensed under the BSD-style license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
#pragma once
|
8 |
+
|
9 |
+
#include <ATen/functorch/Macros.h>
|
10 |
+
#include <ATen/Tensor.h>
|
11 |
+
#include <ATen/functorch/Interpreter.h>
|
12 |
+
|
13 |
+
namespace at::functorch {
|
14 |
+
|
15 |
+
// NOTE: [functorch's TensorWrapper]
|
16 |
+
//
|
17 |
+
// Taking better suggestions for a name. TensorWrapper is the wrapper Tensor
|
18 |
+
// Subclass for functorch's grad-based transforms (grad, vjp, jvp). It is
|
19 |
+
// analogous to how vmap uses BatchedTensor as the wrapper Tensor subclass.
|
20 |
+
//
|
21 |
+
// If you're familiar with the Tensor-Variable merge, TensorWrapper is effectively
|
22 |
+
// another Variable.
|
23 |
+
//
|
24 |
+
// Consider grad(grad(torch.sin))(x). This wraps `x` as TensorWrapper(TensorWrapper(x)).
|
25 |
+
// The reason why is so that each TensorWrapper can hold its own AutogradMeta and
|
26 |
+
// participate in a **separate** autograd graph.
|
27 |
+
//
|
28 |
+
// There are alternative designs we could have chosen (e.g. each grad transform
|
29 |
+
// stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper
|
30 |
+
// design is that we can re-use existing VariableType kernels (i.e. Autograd kernels)
|
31 |
+
// without much modification. Since a TensorWrapper looks like a regular Tensor,
|
32 |
+
// the VariableType kernel can pull out the AutogradMeta struct from where it
|
33 |
+
// expects and extend the autograd graph
|
34 |
+
|
35 |
+
struct TORCH_API TensorWrapper : public c10::TensorImpl {
|
36 |
+
explicit TensorWrapper(
|
37 |
+
c10::DispatchKeySet key_set,
|
38 |
+
Tensor value,
|
39 |
+
int64_t level,
|
40 |
+
std::shared_ptr<bool> is_alive,
|
41 |
+
bool is_immutable = false, // if true, this came from an operation that aliases an immutable tensor
|
42 |
+
bool use_value_sizes_strides = true);
|
43 |
+
|
44 |
+
void refreshMetadata();
|
45 |
+
|
46 |
+
const Tensor& value() const {
|
47 |
+
return value_;
|
48 |
+
}
|
49 |
+
optional<int64_t> level() const {
|
50 |
+
if (is_alive()) {
|
51 |
+
return level_;
|
52 |
+
}
|
53 |
+
return {};
|
54 |
+
}
|
55 |
+
bool is_immutable() const {
|
56 |
+
return is_immutable_;
|
57 |
+
}
|
58 |
+
bool is_alive() const;
|
59 |
+
|
60 |
+
// Overrides necessary for autograd
|
61 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
62 |
+
const c10::VariableVersion& version_counter,
|
63 |
+
bool allow_tensor_metadata_change) const override;
|
64 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
65 |
+
c10::VariableVersion&& version_counter,
|
66 |
+
bool allow_tensor_metadata_change) const override;
|
67 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
|
68 |
+
|
69 |
+
private:
|
70 |
+
const char* tensorimpl_type_name() const override;
|
71 |
+
Tensor value_;
|
72 |
+
int64_t level_;
|
73 |
+
bool is_immutable_;
|
74 |
+
|
75 |
+
// TensorWrapper receives a boolean flag on whether or not the Grad Interpreter
|
76 |
+
// that created it is still alive or not.
|
77 |
+
// If the Grad Interpreter is no longer alive then it attempts to behave like
|
78 |
+
// a regular Tensor.
|
79 |
+
//
|
80 |
+
// When we exit the level, this wrapper may be marked as "not alive".
|
81 |
+
// Wrappers that are not alive:
|
82 |
+
// 1) May still have autograd metadata on them
|
83 |
+
// 2) Forward dispatches to the underlying value()
|
84 |
+
std::shared_ptr<bool> is_alive_;
|
85 |
+
};
|
86 |
+
|
87 |
+
// There are two variants of makeTensorWrapper: one that accepts a level
|
88 |
+
// and one that accepts an Interpreter.
|
89 |
+
//
|
90 |
+
// The one that accepts a level tries to automatically get the life handle from the
|
91 |
+
// interpreter on the DynamicLayerStack.
|
92 |
+
// It needs to be used with caution: if the interpreter is not on the
|
93 |
+
// DynamicLayerStack, then we won't be able to find the life handle.
|
94 |
+
//
|
95 |
+
// In practice this isn't a problem: when we're constructing TensorWrapper in
|
96 |
+
// Python, the corresponding interpreter is on the stack.
|
97 |
+
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, int64_t level, bool is_immutable=false);
|
98 |
+
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, const Interpreter& interpreter, bool is_immutable=false);
|
99 |
+
TORCH_API TensorWrapper* maybeGetTensorWrapper(const Tensor& tensor);
|
100 |
+
TORCH_API void dumpTensor(std::ostream & ss, const Tensor& tensor);
|
101 |
+
TORCH_API void dumpTensorCout(const Tensor& tensor);
|
102 |
+
|
103 |
+
} // namespace at::functorch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
#include <c10/util/ArrayRef.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <cmath>
|
8 |
+
|
9 |
+
namespace at::native {
|
10 |
+
|
11 |
+
using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
|
12 |
+
using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
|
13 |
+
DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
|
14 |
+
DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
|
15 |
+
|
16 |
+
using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
|
17 |
+
using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
|
18 |
+
DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
|
19 |
+
DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
|
20 |
+
|
21 |
+
static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
|
22 |
+
return (a / b) * c + ((a % b) * c) / b;
|
23 |
+
}
|
24 |
+
|
25 |
+
static inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
|
26 |
+
return 1 + ((a + 1) * c - 1) / b;
|
27 |
+
}
|
28 |
+
|
29 |
+
static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
|
30 |
+
int64_t ndim = gradOutput_.ndimension();
|
31 |
+
for (const auto i : c10::irange(1, ndim)) {
|
32 |
+
TORCH_CHECK(gradOutput_.size(i) > 0,
|
33 |
+
arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
|
34 |
+
"but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
|
35 |
+
" being empty");
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/Optional.h>
|
4 |
+
#include <c10/util/string_view.h>
|
5 |
+
#include <ATen/Config.h>
|
6 |
+
#include <ATen/native/DispatchStub.h>
|
7 |
+
|
8 |
+
// Forward declare TI
|
9 |
+
namespace at {
|
10 |
+
class Tensor;
|
11 |
+
struct TensorIterator;
|
12 |
+
|
13 |
+
namespace native {
|
14 |
+
enum class TransposeType;
|
15 |
+
}
|
16 |
+
|
17 |
+
}
|
18 |
+
|
19 |
+
namespace at::native {
|
20 |
+
|
21 |
+
enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss};
|
22 |
+
|
23 |
+
#if AT_BUILD_WITH_LAPACK()
|
24 |
+
// Define per-batch functions to be used in the implementation of batched
|
25 |
+
// linear algebra operations
|
26 |
+
|
27 |
+
template <class scalar_t>
|
28 |
+
void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info);
|
29 |
+
|
30 |
+
template <class scalar_t>
|
31 |
+
void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info);
|
32 |
+
|
33 |
+
template <class scalar_t, class value_t=scalar_t>
|
34 |
+
void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info);
|
35 |
+
|
36 |
+
template <class scalar_t>
|
37 |
+
void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
|
38 |
+
|
39 |
+
template <class scalar_t>
|
40 |
+
void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
|
41 |
+
|
42 |
+
template <class scalar_t>
|
43 |
+
void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info);
|
44 |
+
|
45 |
+
template <class scalar_t, class value_t = scalar_t>
|
46 |
+
void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info);
|
47 |
+
|
48 |
+
template <class scalar_t>
|
49 |
+
void lapackGels(char trans, int m, int n, int nrhs,
|
50 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
51 |
+
scalar_t *work, int lwork, int *info);
|
52 |
+
|
53 |
+
template <class scalar_t, class value_t = scalar_t>
|
54 |
+
void lapackGelsd(int m, int n, int nrhs,
|
55 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
56 |
+
value_t *s, value_t rcond, int *rank,
|
57 |
+
scalar_t* work, int lwork,
|
58 |
+
value_t *rwork, int* iwork, int *info);
|
59 |
+
|
60 |
+
template <class scalar_t, class value_t = scalar_t>
|
61 |
+
void lapackGelsy(int m, int n, int nrhs,
|
62 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
63 |
+
int *jpvt, value_t rcond, int *rank,
|
64 |
+
scalar_t *work, int lwork, value_t* rwork, int *info);
|
65 |
+
|
66 |
+
template <class scalar_t, class value_t = scalar_t>
|
67 |
+
void lapackGelss(int m, int n, int nrhs,
|
68 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
69 |
+
value_t *s, value_t rcond, int *rank,
|
70 |
+
scalar_t *work, int lwork,
|
71 |
+
value_t *rwork, int *info);
|
72 |
+
|
73 |
+
template <LapackLstsqDriverType, class scalar_t, class value_t = scalar_t>
|
74 |
+
struct lapackLstsq_impl;
|
75 |
+
|
76 |
+
template <class scalar_t, class value_t>
|
77 |
+
struct lapackLstsq_impl<LapackLstsqDriverType::Gels, scalar_t, value_t> {
|
78 |
+
static void call(
|
79 |
+
char trans, int m, int n, int nrhs,
|
80 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
81 |
+
scalar_t *work, int lwork, int *info, // Gels flavor
|
82 |
+
int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
|
83 |
+
value_t *s, // Gelss flavor
|
84 |
+
int *iwork // Gelsd flavor
|
85 |
+
) {
|
86 |
+
lapackGels<scalar_t>(
|
87 |
+
trans, m, n, nrhs,
|
88 |
+
a, lda, b, ldb,
|
89 |
+
work, lwork, info);
|
90 |
+
}
|
91 |
+
};
|
92 |
+
|
93 |
+
template <class scalar_t, class value_t>
|
94 |
+
struct lapackLstsq_impl<LapackLstsqDriverType::Gelsy, scalar_t, value_t> {
|
95 |
+
static void call(
|
96 |
+
char trans, int m, int n, int nrhs,
|
97 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
98 |
+
scalar_t *work, int lwork, int *info, // Gels flavor
|
99 |
+
int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
|
100 |
+
value_t *s, // Gelss flavor
|
101 |
+
int *iwork // Gelsd flavor
|
102 |
+
) {
|
103 |
+
lapackGelsy<scalar_t, value_t>(
|
104 |
+
m, n, nrhs,
|
105 |
+
a, lda, b, ldb,
|
106 |
+
jpvt, rcond, rank,
|
107 |
+
work, lwork, rwork, info);
|
108 |
+
}
|
109 |
+
};
|
110 |
+
|
111 |
+
template <class scalar_t, class value_t>
|
112 |
+
struct lapackLstsq_impl<LapackLstsqDriverType::Gelsd, scalar_t, value_t> {
|
113 |
+
static void call(
|
114 |
+
char trans, int m, int n, int nrhs,
|
115 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
116 |
+
scalar_t *work, int lwork, int *info, // Gels flavor
|
117 |
+
int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
|
118 |
+
value_t *s, // Gelss flavor
|
119 |
+
int *iwork // Gelsd flavor
|
120 |
+
) {
|
121 |
+
lapackGelsd<scalar_t, value_t>(
|
122 |
+
m, n, nrhs,
|
123 |
+
a, lda, b, ldb,
|
124 |
+
s, rcond, rank,
|
125 |
+
work, lwork,
|
126 |
+
rwork, iwork, info);
|
127 |
+
}
|
128 |
+
};
|
129 |
+
|
130 |
+
template <class scalar_t, class value_t>
|
131 |
+
struct lapackLstsq_impl<LapackLstsqDriverType::Gelss, scalar_t, value_t> {
|
132 |
+
static void call(
|
133 |
+
char trans, int m, int n, int nrhs,
|
134 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
135 |
+
scalar_t *work, int lwork, int *info, // Gels flavor
|
136 |
+
int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
|
137 |
+
value_t *s, // Gelss flavor
|
138 |
+
int *iwork // Gelsd flavor
|
139 |
+
) {
|
140 |
+
lapackGelss<scalar_t, value_t>(
|
141 |
+
m, n, nrhs,
|
142 |
+
a, lda, b, ldb,
|
143 |
+
s, rcond, rank,
|
144 |
+
work, lwork,
|
145 |
+
rwork, info);
|
146 |
+
}
|
147 |
+
};
|
148 |
+
|
149 |
+
template <LapackLstsqDriverType driver_type, class scalar_t, class value_t = scalar_t>
|
150 |
+
void lapackLstsq(
|
151 |
+
char trans, int m, int n, int nrhs,
|
152 |
+
scalar_t *a, int lda, scalar_t *b, int ldb,
|
153 |
+
scalar_t *work, int lwork, int *info, // Gels flavor
|
154 |
+
int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
|
155 |
+
value_t *s, // Gelss flavor
|
156 |
+
int *iwork // Gelsd flavor
|
157 |
+
) {
|
158 |
+
lapackLstsq_impl<driver_type, scalar_t, value_t>::call(
|
159 |
+
trans, m, n, nrhs,
|
160 |
+
a, lda, b, ldb,
|
161 |
+
work, lwork, info,
|
162 |
+
jpvt, rcond, rank, rwork,
|
163 |
+
s,
|
164 |
+
iwork);
|
165 |
+
}
|
166 |
+
|
167 |
+
template <class scalar_t>
|
168 |
+
void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info);
|
169 |
+
|
170 |
+
template <class scalar_t>
|
171 |
+
void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info);
|
172 |
+
|
173 |
+
template <class scalar_t>
|
174 |
+
void lapackLdlHermitian(
|
175 |
+
char uplo,
|
176 |
+
int n,
|
177 |
+
scalar_t* a,
|
178 |
+
int lda,
|
179 |
+
int* ipiv,
|
180 |
+
scalar_t* work,
|
181 |
+
int lwork,
|
182 |
+
int* info);
|
183 |
+
|
184 |
+
template <class scalar_t>
|
185 |
+
void lapackLdlSymmetric(
|
186 |
+
char uplo,
|
187 |
+
int n,
|
188 |
+
scalar_t* a,
|
189 |
+
int lda,
|
190 |
+
int* ipiv,
|
191 |
+
scalar_t* work,
|
192 |
+
int lwork,
|
193 |
+
int* info);
|
194 |
+
|
195 |
+
template <class scalar_t>
|
196 |
+
void lapackLdlSolveHermitian(
|
197 |
+
char uplo,
|
198 |
+
int n,
|
199 |
+
int nrhs,
|
200 |
+
scalar_t* a,
|
201 |
+
int lda,
|
202 |
+
int* ipiv,
|
203 |
+
scalar_t* b,
|
204 |
+
int ldb,
|
205 |
+
int* info);
|
206 |
+
|
207 |
+
template <class scalar_t>
|
208 |
+
void lapackLdlSolveSymmetric(
|
209 |
+
char uplo,
|
210 |
+
int n,
|
211 |
+
int nrhs,
|
212 |
+
scalar_t* a,
|
213 |
+
int lda,
|
214 |
+
int* ipiv,
|
215 |
+
scalar_t* b,
|
216 |
+
int ldb,
|
217 |
+
int* info);
|
218 |
+
|
219 |
+
template<class scalar_t, class value_t=scalar_t>
|
220 |
+
void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info);
|
221 |
+
#endif
|
222 |
+
|
223 |
+
#if AT_BUILD_WITH_BLAS()
|
224 |
+
template <class scalar_t>
|
225 |
+
void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb);
|
226 |
+
#endif
|
227 |
+
|
228 |
+
using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/);
|
229 |
+
DECLARE_DISPATCH(cholesky_fn, cholesky_stub);
|
230 |
+
|
231 |
+
using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/);
|
232 |
+
|
233 |
+
DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub);
|
234 |
+
|
235 |
+
using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/);
|
236 |
+
|
237 |
+
DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub);
|
238 |
+
|
239 |
+
using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/);
|
240 |
+
DECLARE_DISPATCH(geqrf_fn, geqrf_stub);
|
241 |
+
|
242 |
+
using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/);
|
243 |
+
DECLARE_DISPATCH(orgqr_fn, orgqr_stub);
|
244 |
+
|
245 |
+
using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/);
|
246 |
+
DECLARE_DISPATCH(ormqr_fn, ormqr_stub);
|
247 |
+
|
248 |
+
using linalg_eigh_fn = void (*)(
|
249 |
+
const Tensor& /*eigenvalues*/,
|
250 |
+
const Tensor& /*eigenvectors*/,
|
251 |
+
const Tensor& /*infos*/,
|
252 |
+
bool /*upper*/,
|
253 |
+
bool /*compute_eigenvectors*/);
|
254 |
+
DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub);
|
255 |
+
|
256 |
+
using lstsq_fn = void (*)(
|
257 |
+
const Tensor& /*a*/,
|
258 |
+
Tensor& /*b*/,
|
259 |
+
Tensor& /*rank*/,
|
260 |
+
Tensor& /*singular_values*/,
|
261 |
+
Tensor& /*infos*/,
|
262 |
+
double /*rcond*/,
|
263 |
+
std::string /*driver_name*/);
|
264 |
+
DECLARE_DISPATCH(lstsq_fn, lstsq_stub);
|
265 |
+
|
266 |
+
using triangular_solve_fn = void (*)(
|
267 |
+
const Tensor& /*A*/,
|
268 |
+
const Tensor& /*B*/,
|
269 |
+
bool /*left*/,
|
270 |
+
bool /*upper*/,
|
271 |
+
TransposeType /*transpose*/,
|
272 |
+
bool /*unitriangular*/);
|
273 |
+
DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub);
|
274 |
+
|
275 |
+
using lu_factor_fn = void (*)(
|
276 |
+
const Tensor& /*input*/,
|
277 |
+
const Tensor& /*pivots*/,
|
278 |
+
const Tensor& /*infos*/,
|
279 |
+
bool /*compute_pivots*/);
|
280 |
+
DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub);
|
281 |
+
|
282 |
+
using unpack_pivots_fn = void(*)(
|
283 |
+
TensorIterator& iter,
|
284 |
+
const int64_t dim_size,
|
285 |
+
const int64_t max_pivot);
|
286 |
+
DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub);
|
287 |
+
|
288 |
+
using lu_solve_fn = void (*)(
|
289 |
+
const Tensor& /*LU*/,
|
290 |
+
const Tensor& /*pivots*/,
|
291 |
+
const Tensor& /*B*/,
|
292 |
+
TransposeType /*trans*/);
|
293 |
+
DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub);
|
294 |
+
|
295 |
+
using ldl_factor_fn = void (*)(
|
296 |
+
const Tensor& /*LD*/,
|
297 |
+
const Tensor& /*pivots*/,
|
298 |
+
const Tensor& /*info*/,
|
299 |
+
bool /*upper*/,
|
300 |
+
bool /*hermitian*/);
|
301 |
+
DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub);
|
302 |
+
|
303 |
+
using svd_fn = void (*)(
|
304 |
+
const Tensor& /*A*/,
|
305 |
+
const bool /*full_matrices*/,
|
306 |
+
const bool /*compute_uv*/,
|
307 |
+
const c10::optional<c10::string_view>& /*driver*/,
|
308 |
+
const Tensor& /*U*/,
|
309 |
+
const Tensor& /*S*/,
|
310 |
+
const Tensor& /*Vh*/,
|
311 |
+
const Tensor& /*info*/);
|
312 |
+
DECLARE_DISPATCH(svd_fn, svd_stub);
|
313 |
+
|
314 |
+
using ldl_solve_fn = void (*)(
|
315 |
+
const Tensor& /*LD*/,
|
316 |
+
const Tensor& /*pivots*/,
|
317 |
+
const Tensor& /*result*/,
|
318 |
+
bool /*upper*/,
|
319 |
+
bool /*hermitian*/);
|
320 |
+
DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub);
|
321 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/TensorBase.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/util/TypeSafeSignMath.h>
|
7 |
+
#if defined(__CUDA_ARCH__)
|
8 |
+
#include <c10/cuda/CUDAMathCompat.h>
|
9 |
+
#define compat_copysign c10::cuda::compat::copysign
|
10 |
+
#elif defined(__HIPCC__)
|
11 |
+
#include <c10/hip/HIPMathCompat.h>
|
12 |
+
#define compat_copysign c10::hip::compat::copysign
|
13 |
+
#else
|
14 |
+
#include <c10/util/copysign.h>
|
15 |
+
#define compat_copysign c10::copysign
|
16 |
+
#endif
|
17 |
+
|
18 |
+
|
19 |
+
namespace at {
|
20 |
+
struct TensorIterator;
|
21 |
+
struct TensorIteratorBase;
|
22 |
+
}
|
23 |
+
|
24 |
+
namespace at::native {
|
25 |
+
|
26 |
+
inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
|
27 |
+
TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
|
28 |
+
"Boolean alpha only supported for Boolean results.");
|
29 |
+
TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype)
|
30 |
+
|| alpha.isIntegral(true),
|
31 |
+
"For integral input tensors, argument alpha must not be a floating point number.");
|
32 |
+
TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(),
|
33 |
+
"For non-complex input tensors, argument alpha must not be a complex number.")
|
34 |
+
}
|
35 |
+
|
36 |
+
// Basic checking for all sub functions.
|
37 |
+
inline void sub_check(const TensorBase& self, const TensorBase& other) {
|
38 |
+
TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
|
39 |
+
"Subtraction, the `-` operator, with two bool tensors is not supported. "
|
40 |
+
"Use the `^` or `logical_xor()` operator instead.")
|
41 |
+
TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
|
42 |
+
"Subtraction, the `-` operator, with a bool tensor is not supported. "
|
43 |
+
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
|
44 |
+
}
|
45 |
+
|
46 |
+
inline void sub_check(const TensorBase& self, const Scalar& scalar) {
|
47 |
+
TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(),
|
48 |
+
"Subtraction, the `-` operator, with two bool tensors is not supported. "
|
49 |
+
"Use the `^` or `logical_xor()` operator instead.")
|
50 |
+
TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(),
|
51 |
+
"Subtraction, the `-` operator, with a bool tensor is not supported. "
|
52 |
+
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
|
53 |
+
}
|
54 |
+
|
55 |
+
using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
|
56 |
+
using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
|
57 |
+
using structured_binary_fn = void(*)(TensorIteratorBase&);
|
58 |
+
|
59 |
+
using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
|
60 |
+
using binary_fn_double = void(*)(TensorIterator&, double);
|
61 |
+
using binary_fn = void(*)(TensorIterator&);
|
62 |
+
using binary_clamp_fn_alpha =
|
63 |
+
void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val);
|
64 |
+
|
65 |
+
// NB: codegenned
|
66 |
+
DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
|
67 |
+
|
68 |
+
DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub);
|
69 |
+
DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub);
|
70 |
+
DECLARE_DISPATCH(structured_binary_fn, mul_stub);
|
71 |
+
DECLARE_DISPATCH(structured_binary_fn, div_true_stub);
|
72 |
+
DECLARE_DISPATCH(structured_binary_fn, div_floor_stub);
|
73 |
+
DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub);
|
74 |
+
DECLARE_DISPATCH(structured_binary_fn, atan2_stub);
|
75 |
+
DECLARE_DISPATCH(structured_binary_fn, remainder_stub);
|
76 |
+
DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub);
|
77 |
+
DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub);
|
78 |
+
DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub);
|
79 |
+
DECLARE_DISPATCH(structured_binary_fn, lshift_stub);
|
80 |
+
DECLARE_DISPATCH(structured_binary_fn, rshift_stub);
|
81 |
+
DECLARE_DISPATCH(binary_fn, logical_xor_stub);
|
82 |
+
DECLARE_DISPATCH(binary_fn, logical_and_stub);
|
83 |
+
DECLARE_DISPATCH(binary_fn, logical_or_stub);
|
84 |
+
DECLARE_DISPATCH(structured_binary_fn, lt_stub);
|
85 |
+
DECLARE_DISPATCH(structured_binary_fn, le_stub);
|
86 |
+
DECLARE_DISPATCH(structured_binary_fn, gt_stub);
|
87 |
+
DECLARE_DISPATCH(structured_binary_fn, ge_stub);
|
88 |
+
DECLARE_DISPATCH(structured_binary_fn, eq_stub);
|
89 |
+
DECLARE_DISPATCH(structured_binary_fn, ne_stub);
|
90 |
+
DECLARE_DISPATCH(binary_fn, max_elementwise_stub);
|
91 |
+
DECLARE_DISPATCH(binary_fn, min_elementwise_stub);
|
92 |
+
DECLARE_DISPATCH(structured_binary_fn, maximum_stub);
|
93 |
+
DECLARE_DISPATCH(structured_binary_fn, minimum_stub);
|
94 |
+
DECLARE_DISPATCH(structured_binary_fn, fmax_stub);
|
95 |
+
DECLARE_DISPATCH(structured_binary_fn, fmin_stub);
|
96 |
+
DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub);
|
97 |
+
DECLARE_DISPATCH(binary_fn_double, huber_stub);
|
98 |
+
DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub);
|
99 |
+
DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub);
|
100 |
+
DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub);
|
101 |
+
DECLARE_DISPATCH(structured_binary_fn, mse_stub);
|
102 |
+
DECLARE_DISPATCH(structured_binary_fn, fmod_stub);
|
103 |
+
DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub);
|
104 |
+
DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub);
|
105 |
+
DECLARE_DISPATCH(structured_binary_fn, gcd_stub);
|
106 |
+
DECLARE_DISPATCH(structured_binary_fn, lcm_stub);
|
107 |
+
DECLARE_DISPATCH(structured_binary_fn, hypot_stub);
|
108 |
+
DECLARE_DISPATCH(structured_binary_fn, igamma_stub);
|
109 |
+
DECLARE_DISPATCH(structured_binary_fn, igammac_stub);
|
110 |
+
DECLARE_DISPATCH(structured_binary_fn, nextafter_stub);
|
111 |
+
DECLARE_DISPATCH(structured_binary_fn, heaviside_stub);
|
112 |
+
DECLARE_DISPATCH(structured_binary_fn, copysign_stub);
|
113 |
+
DECLARE_DISPATCH(structured_binary_fn, xlogy_stub);
|
114 |
+
DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub);
|
115 |
+
DECLARE_DISPATCH(structured_binary_fn, zeta_stub);
|
116 |
+
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub);
|
117 |
+
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub);
|
118 |
+
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub);
|
119 |
+
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub);
|
120 |
+
DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub);
|
121 |
+
DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub);
|
122 |
+
DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub);
|
123 |
+
DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub);
|
124 |
+
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub);
|
125 |
+
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
|
126 |
+
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
|
127 |
+
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
|
128 |
+
|
129 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/TypeProperties.h>
|
5 |
+
#include <ATen/ScalarOps.h>
|
6 |
+
|
7 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
8 |
+
#include <ATen/NativeFunctions.h>
|
9 |
+
#else
|
10 |
+
#include <ATen/ops/result_type.h>
|
11 |
+
#endif
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
|
15 |
+
// original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to
|
16 |
+
// the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not
|
17 |
+
// match, will change them to be a common super type so comparisons are done between the same types.
|
18 |
+
// For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the
|
19 |
+
// corresponding raw_* version should be used since it was already contiguous of the right type.
|
20 |
+
inline void searchsorted_maybe_trim_input_tensors(
|
21 |
+
Tensor& trimmed_input,
|
22 |
+
Tensor& trimmed_boundaries,
|
23 |
+
Tensor& trimmed_sorter,
|
24 |
+
const Tensor& raw_input,
|
25 |
+
const Tensor& raw_boundaries,
|
26 |
+
const Tensor& raw_sorter) {
|
27 |
+
bool in_is_contiguous = raw_input.is_contiguous();
|
28 |
+
bool bd_is_contiguous = raw_boundaries.is_contiguous();
|
29 |
+
bool sort_is_contiguous = raw_sorter.is_contiguous();
|
30 |
+
|
31 |
+
if (!in_is_contiguous) {
|
32 |
+
TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due "
|
33 |
+
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value "
|
34 |
+
"tensor if possible. This message will only appear once per program.");
|
35 |
+
trimmed_input = raw_input.contiguous();
|
36 |
+
}
|
37 |
+
if (!bd_is_contiguous) {
|
38 |
+
TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due "
|
39 |
+
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary "
|
40 |
+
"tensor if possible. This message will only appear once per program.");
|
41 |
+
trimmed_boundaries = raw_boundaries.contiguous();
|
42 |
+
}
|
43 |
+
if (!sort_is_contiguous) {
|
44 |
+
TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due "
|
45 |
+
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter "
|
46 |
+
"tensor if possible. This message will only appear once per program.");
|
47 |
+
trimmed_sorter = raw_sorter.contiguous();
|
48 |
+
}
|
49 |
+
if (raw_input.dtype() != raw_boundaries.dtype()) {
|
50 |
+
at::native::ResultTypeState state = {};
|
51 |
+
state = at::native::update_result_type_state(raw_boundaries, state);
|
52 |
+
state = at::native::update_result_type_state(raw_input, state);
|
53 |
+
ScalarType common_stype = at::native::result_type(state);
|
54 |
+
|
55 |
+
TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined);
|
56 |
+
if (common_stype != raw_input.scalar_type()) {
|
57 |
+
trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype);
|
58 |
+
}
|
59 |
+
if (common_stype != raw_boundaries.scalar_type()) {
|
60 |
+
trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype);
|
61 |
+
}
|
62 |
+
}
|
63 |
+
}
|
64 |
+
|
65 |
+
/* unused but needed for internal jagged tensor class */
|
66 |
+
inline void searchsorted_maybe_trim_input_tensors(
|
67 |
+
Tensor& trimmed_input,
|
68 |
+
Tensor& trimmed_boundaries,
|
69 |
+
const Tensor& raw_input,
|
70 |
+
const Tensor& raw_boundaries) {
|
71 |
+
Tensor trimmed_sorter;
|
72 |
+
Tensor raw_sorter;
|
73 |
+
return searchsorted_maybe_trim_input_tensors(
|
74 |
+
trimmed_input,
|
75 |
+
trimmed_boundaries,
|
76 |
+
trimmed_sorter,
|
77 |
+
raw_input,
|
78 |
+
raw_boundaries,
|
79 |
+
raw_sorter);
|
80 |
+
}
|
81 |
+
|
82 |
+
inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) {
|
83 |
+
if (boundaries.dim() != input.dim()) {
|
84 |
+
return false;
|
85 |
+
}
|
86 |
+
const auto& dims_bd = boundaries.sizes();
|
87 |
+
const auto& dims_in = input.sizes();
|
88 |
+
for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) {
|
89 |
+
if (dims_bd[dim] != dims_in[dim]) {
|
90 |
+
return false;
|
91 |
+
}
|
92 |
+
}
|
93 |
+
return true;
|
94 |
+
}
|
95 |
+
|
96 |
+
inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) {
|
97 |
+
auto tensor = c10::scalar_to_tensor(scalar, device);
|
98 |
+
// This is to adopt the scalar promotion rules defined in native/TypeProperties.h
|
99 |
+
// So we have the same type promotion rules as binary operations.
|
100 |
+
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
|
101 |
+
return tensor;
|
102 |
+
}
|
103 |
+
|
104 |
+
inline void searchsorted_pre_check(
|
105 |
+
const Tensor& boundaries,
|
106 |
+
const Tensor& input,
|
107 |
+
const Tensor& output,
|
108 |
+
const bool out_int32,
|
109 |
+
const bool right,
|
110 |
+
const c10::optional<c10::string_view> side_opt,
|
111 |
+
const Tensor& sorter) {
|
112 |
+
if (side_opt) {
|
113 |
+
const c10::string_view side = *side_opt;
|
114 |
+
TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ",
|
115 |
+
"got ", side);
|
116 |
+
|
117 |
+
// assume the user has not explicitly set (right=False, side="right")
|
118 |
+
TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side "
|
119 |
+
"of ", side, " while right was True");
|
120 |
+
}
|
121 |
+
|
122 |
+
TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ",
|
123 |
+
"should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ",
|
124 |
+
"tensor device type ", input.device());
|
125 |
+
|
126 |
+
if (sorter.defined()) {
|
127 |
+
TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ",
|
128 |
+
"have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ",
|
129 |
+
"device type ", boundaries.device());
|
130 |
+
|
131 |
+
TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same "
|
132 |
+
"size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes());
|
133 |
+
|
134 |
+
TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ",
|
135 |
+
"dtype but got dtype ", sorter.scalar_type());
|
136 |
+
|
137 |
+
if (sorter.numel() > 0) {
|
138 |
+
auto minmax = sorter.aminmax();
|
139 |
+
int64_t vmin = std::get<0>(minmax).item().toLong();
|
140 |
+
int64_t vmax = std::get<1>(minmax).item().toLong();
|
141 |
+
TORCH_CHECK(vmin >= 0 && vmax < sorter.sizes().back(), "torch.searchsorted(): sorter index out of range");
|
142 |
+
}
|
143 |
+
}
|
144 |
+
|
145 |
+
TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1),
|
146 |
+
"torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ",
|
147 |
+
"boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(",
|
148 |
+
input.numel(), ")");
|
149 |
+
|
150 |
+
TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ",
|
151 |
+
"got 0 dimension");
|
152 |
+
|
153 |
+
TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input),
|
154 |
+
"torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ",
|
155 |
+
"and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ",
|
156 |
+
input.sizes());
|
157 |
+
|
158 |
+
ScalarType output_dtype = output.scalar_type();
|
159 |
+
TORCH_CHECK(
|
160 |
+
(output_dtype == ScalarType::Long && !out_int32) ||
|
161 |
+
(output_dtype == ScalarType::Int && out_int32),
|
162 |
+
"torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ",
|
163 |
+
"whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype,
|
164 |
+
" and out_int32 flag is ", (out_int32 ? "True" : "False"));
|
165 |
+
|
166 |
+
if (out_int32) {
|
167 |
+
TORCH_CHECK(boundaries.sizes().back() < INT_MAX,
|
168 |
+
"torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ",
|
169 |
+
boundaries.sizes().back());
|
170 |
+
}
|
171 |
+
}
|
172 |
+
|
173 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CPUBlas.h
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/OpMathType.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
#include <ATen/native/TransposeType.h>
|
6 |
+
#include <c10/util/complex.h>
|
7 |
+
#include <c10/core/ScalarType.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
|
10 |
+
namespace at::native::cpublas {
|
11 |
+
|
12 |
+
namespace internal {
|
13 |
+
void normalize_last_dims(
|
14 |
+
TransposeType transa, TransposeType transb,
|
15 |
+
int64_t m, int64_t n, int64_t k,
|
16 |
+
int64_t *lda, int64_t *ldb, int64_t *ldc);
|
17 |
+
} // namespace internal
|
18 |
+
|
19 |
+
using gemm_fn = void(*)(
|
20 |
+
at::ScalarType type,
|
21 |
+
TransposeType transa, TransposeType transb,
|
22 |
+
int64_t m, int64_t n, int64_t k,
|
23 |
+
const Scalar& alpha,
|
24 |
+
const void *a, int64_t lda,
|
25 |
+
const void *b, int64_t ldb,
|
26 |
+
const Scalar& beta,
|
27 |
+
void *c, int64_t ldc);
|
28 |
+
|
29 |
+
DECLARE_DISPATCH(gemm_fn, gemm_stub);
|
30 |
+
|
31 |
+
template <typename scalar_t>
|
32 |
+
void gemm(
|
33 |
+
TransposeType transa, TransposeType transb,
|
34 |
+
int64_t m, int64_t n, int64_t k,
|
35 |
+
at::opmath_type<scalar_t> alpha,
|
36 |
+
const scalar_t *a, int64_t lda,
|
37 |
+
const scalar_t *b, int64_t ldb,
|
38 |
+
at::opmath_type<scalar_t> beta,
|
39 |
+
scalar_t *c, int64_t ldc) {
|
40 |
+
internal::normalize_last_dims(transa, transb, m, n, k, &lda, &ldb, &ldc);
|
41 |
+
gemm_stub(
|
42 |
+
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
|
43 |
+
transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
|
44 |
+
}
|
45 |
+
|
46 |
+
void gemm(
|
47 |
+
TransposeType transa, TransposeType transb,
|
48 |
+
int64_t m, int64_t n, int64_t k,
|
49 |
+
double alpha,
|
50 |
+
const double *a, int64_t lda,
|
51 |
+
const double *b, int64_t ldb,
|
52 |
+
double beta,
|
53 |
+
double *c, int64_t ldc);
|
54 |
+
|
55 |
+
void gemm(
|
56 |
+
TransposeType transa, TransposeType transb,
|
57 |
+
int64_t m, int64_t n, int64_t k,
|
58 |
+
float alpha,
|
59 |
+
const float *a, int64_t lda,
|
60 |
+
const float *b, int64_t ldb,
|
61 |
+
float beta,
|
62 |
+
float *c, int64_t ldc);
|
63 |
+
|
64 |
+
void gemm(
|
65 |
+
TransposeType transa, TransposeType transb,
|
66 |
+
int64_t m, int64_t n, int64_t k,
|
67 |
+
float alpha,
|
68 |
+
const at::BFloat16 *a, int64_t lda,
|
69 |
+
const at::BFloat16 *b, int64_t ldb,
|
70 |
+
float beta,
|
71 |
+
at::BFloat16 *c, int64_t ldc);
|
72 |
+
|
73 |
+
void gemm(
|
74 |
+
TransposeType transa, TransposeType transb,
|
75 |
+
int64_t m, int64_t n, int64_t k,
|
76 |
+
const float alpha,
|
77 |
+
const at::BFloat16 *a, int64_t lda,
|
78 |
+
const at::BFloat16 *b, int64_t ldb,
|
79 |
+
const float beta,
|
80 |
+
float *c, int64_t ldc);
|
81 |
+
|
82 |
+
void gemm(
|
83 |
+
TransposeType transa, TransposeType transb,
|
84 |
+
int64_t m, int64_t n, int64_t k,
|
85 |
+
float alpha,
|
86 |
+
const at::Half *a, int64_t lda,
|
87 |
+
const at::Half *b, int64_t ldb,
|
88 |
+
float beta,
|
89 |
+
at::Half *c, int64_t ldc);
|
90 |
+
|
91 |
+
void gemm(
|
92 |
+
TransposeType transa, TransposeType transb,
|
93 |
+
int64_t m, int64_t n, int64_t k,
|
94 |
+
c10::complex<double> alpha,
|
95 |
+
const c10::complex<double> *a, int64_t lda,
|
96 |
+
const c10::complex<double> *b, int64_t ldb,
|
97 |
+
c10::complex<double> beta,
|
98 |
+
c10::complex<double> *c, int64_t ldc);
|
99 |
+
|
100 |
+
void gemm(
|
101 |
+
TransposeType transa, TransposeType transb,
|
102 |
+
int64_t m, int64_t n, int64_t k,
|
103 |
+
c10::complex<float> alpha,
|
104 |
+
const c10::complex<float> *a, int64_t lda,
|
105 |
+
const c10::complex<float> *b, int64_t ldb,
|
106 |
+
c10::complex<float> beta,
|
107 |
+
c10::complex<float> *c, int64_t ldc);
|
108 |
+
|
109 |
+
void gemm(
|
110 |
+
TransposeType transa, TransposeType transb,
|
111 |
+
int64_t m, int64_t n, int64_t k,
|
112 |
+
int64_t alpha,
|
113 |
+
const int64_t *a, int64_t lda,
|
114 |
+
const int64_t *b, int64_t ldb,
|
115 |
+
int64_t beta,
|
116 |
+
int64_t *c, int64_t ldc);
|
117 |
+
|
118 |
+
template <typename scalar_t>
|
119 |
+
void gemm_batched(
|
120 |
+
TransposeType transa, TransposeType transb,
|
121 |
+
int64_t batch_size, int64_t m, int64_t n, int64_t k,
|
122 |
+
scalar_t alpha,
|
123 |
+
const scalar_t * const *a, int64_t lda,
|
124 |
+
const scalar_t * const *b, int64_t ldb,
|
125 |
+
const scalar_t beta,
|
126 |
+
scalar_t * const *c, int64_t ldc);
|
127 |
+
|
128 |
+
template <typename scalar_t>
|
129 |
+
void gemm_batched_with_stride(
|
130 |
+
TransposeType transa, TransposeType transb,
|
131 |
+
int64_t batch_size, int64_t m, int64_t n, int64_t k,
|
132 |
+
scalar_t alpha,
|
133 |
+
const scalar_t *a, int64_t lda, int64_t batch_stride_a,
|
134 |
+
const scalar_t *b, int64_t ldb, int64_t batch_stride_b,
|
135 |
+
scalar_t beta,
|
136 |
+
scalar_t *c, int64_t ldc, int64_t batch_stride_c);
|
137 |
+
|
138 |
+
using axpy_fn = void(*)(at::ScalarType type, int64_t n, const Scalar& a, const void *x, int64_t incx, void *y, int64_t incy);
|
139 |
+
|
140 |
+
DECLARE_DISPATCH(axpy_fn, axpy_stub);
|
141 |
+
|
142 |
+
template<typename scalar_t>
|
143 |
+
void axpy(int64_t n, scalar_t a, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy){
|
144 |
+
if(n == 1)
|
145 |
+
{
|
146 |
+
incx = 1;
|
147 |
+
incy = 1;
|
148 |
+
}
|
149 |
+
axpy_stub(
|
150 |
+
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
|
151 |
+
n, a, x, incx, y, incy);
|
152 |
+
}
|
153 |
+
|
154 |
+
void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t incy);
|
155 |
+
void axpy(int64_t n, float a, const float *x, int64_t incx, float *y, int64_t incy);
|
156 |
+
void axpy(int64_t n, c10::complex<double> a, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
|
157 |
+
void axpy(int64_t n, c10::complex<float> a, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
|
158 |
+
|
159 |
+
using copy_fn = void(*)(at::ScalarType type, int64_t n, const void *x, int64_t incx, void *y, int64_t incy);
|
160 |
+
|
161 |
+
DECLARE_DISPATCH(copy_fn, copy_stub);
|
162 |
+
|
163 |
+
template<typename scalar_t>
|
164 |
+
void copy(int64_t n, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) {
|
165 |
+
if(n == 1)
|
166 |
+
{
|
167 |
+
incx = 1;
|
168 |
+
incy = 1;
|
169 |
+
}
|
170 |
+
copy_stub(
|
171 |
+
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
|
172 |
+
n, x, incx, y, incy);
|
173 |
+
}
|
174 |
+
|
175 |
+
void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy);
|
176 |
+
void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy);
|
177 |
+
void copy(int64_t n, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
|
178 |
+
void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
|
179 |
+
|
180 |
+
} // namespace at::native::cpublas
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <ATen/core/stack.h>
|
5 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
6 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
7 |
+
#include <c10/util/Metaprogramming.h>
|
8 |
+
#include <torch/library.h>
|
9 |
+
|
10 |
+
namespace at::native {
|
11 |
+
|
12 |
+
// This function implements a boxed fallback to CPU.
|
13 |
+
// External backends can add their own custom logging on top if it to customize their own CPU fallbacks.
|
14 |
+
TORCH_API void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool error_on_views = false);
|
15 |
+
|
16 |
+
// This is a helper function that backends can use to directly call their boxed CPU fallback
|
17 |
+
// TODO: update and add a usage example after https://github.com/pytorch/pytorch/pull/58092 lands.
|
18 |
+
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
|
19 |
+
struct _call_fallback_fn final {};
|
20 |
+
|
21 |
+
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
|
22 |
+
struct _call_fallback_fn<fallback_fn, Op, symint, ReturnType(ParameterTypes...)> final {
|
23 |
+
static ReturnType call(typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
|
24 |
+
auto op = c10::Dispatcher::singleton()
|
25 |
+
// TODO: figure out how to make compiler happy without dynamic casts
|
26 |
+
.findSchemaOrThrow((const char*) Op::name, (const char*) Op::overload_name)
|
27 |
+
//.findSchemaOrThrow("a", "b")
|
28 |
+
.typed<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>();
|
29 |
+
return c10::impl::BoxedKernelWrapper<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>::call(
|
30 |
+
c10::BoxedKernel::makeFromFunction<fallback_fn>(),
|
31 |
+
op,
|
32 |
+
c10::DispatchKeySet(), // we know that the cpu_fallback doesn't use the dispatch keyset.
|
33 |
+
// TODO: get std::forward<> to work
|
34 |
+
args...
|
35 |
+
);
|
36 |
+
}
|
37 |
+
};
|
38 |
+
|
39 |
+
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
|
40 |
+
using call_fallback_fn_symint = _call_fallback_fn<fallback_fn, Op, true, typename Op::schema>;
|
41 |
+
|
42 |
+
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
|
43 |
+
using call_fallback_fn = _call_fallback_fn<fallback_fn, Op, false, typename Op::schema>;
|
44 |
+
|
45 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/CompositeRandomAccessorCommon.h>
|
4 |
+
|
5 |
+
namespace at::native {
|
6 |
+
|
7 |
+
struct TupleInfoCPU {
|
8 |
+
template <typename ...Types>
|
9 |
+
using tuple = std::tuple<Types...>;
|
10 |
+
|
11 |
+
template <typename ...Types>
|
12 |
+
static constexpr auto tie(Types&... args) noexcept {
|
13 |
+
return std::tie(args...);
|
14 |
+
}
|
15 |
+
};
|
16 |
+
|
17 |
+
template <typename KeyAccessor, typename ValueAccessor>
|
18 |
+
using CompositeRandomAccessorCPU =
|
19 |
+
CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
|
20 |
+
|
21 |
+
template <typename Values, typename References>
|
22 |
+
void swap(
|
23 |
+
references_holder<Values, References> rh1,
|
24 |
+
references_holder<Values, References> rh2
|
25 |
+
) {
|
26 |
+
return std::swap(rh1.data(), rh2.data());
|
27 |
+
}
|
28 |
+
|
29 |
+
template <int N, typename Values, typename References>
|
30 |
+
auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.data())) {
|
31 |
+
return std::get<N>(rh.data());
|
32 |
+
}
|
33 |
+
|
34 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ConvolutionMM3d.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/Tensor.h>
|
2 |
+
|
3 |
+
namespace at::native {
|
4 |
+
|
5 |
+
std::tuple<Tensor, Tensor, Tensor> slow_conv3d_backward_cpu(
|
6 |
+
const Tensor& grad_output,
|
7 |
+
const Tensor& self,
|
8 |
+
const Tensor& weight,
|
9 |
+
IntArrayRef kernel_size,
|
10 |
+
IntArrayRef stride,
|
11 |
+
IntArrayRef padding,
|
12 |
+
std::array<bool, 3> output_mask);
|
13 |
+
|
14 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
|
7 |
+
class Tensor;
|
8 |
+
struct TensorIterator;
|
9 |
+
class TensorBase;
|
10 |
+
|
11 |
+
namespace native {
|
12 |
+
|
13 |
+
using copy_fn = void (*)(TensorIterator&, bool non_blocking);
|
14 |
+
|
15 |
+
DECLARE_DISPATCH(copy_fn, copy_stub);
|
16 |
+
|
17 |
+
TORCH_API void copy_ignoring_overlaps(const TensorBase &dst, const TensorBase &src);
|
18 |
+
|
19 |
+
} // namespace native
|
20 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
class Tensor;
|
7 |
+
|
8 |
+
namespace native {
|
9 |
+
|
10 |
+
using cross_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const int64_t d);
|
11 |
+
|
12 |
+
DECLARE_DISPATCH(cross_fn, cross_stub);
|
13 |
+
|
14 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <algorithm>
|
4 |
+
#include <vector>
|
5 |
+
|
6 |
+
#include <ATen/div_rtn.h>
|
7 |
+
#include <ATen/core/Tensor.h>
|
8 |
+
#include <c10/util/irange.h>
|
9 |
+
|
10 |
+
#define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \
|
11 |
+
TORCH_CHECK( \
|
12 |
+
T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
|
13 |
+
"Need " #T " of dimension ", \
|
14 |
+
DIM, \
|
15 |
+
" and " #T ".size[", \
|
16 |
+
DIM_SIZE, \
|
17 |
+
"] == ", \
|
18 |
+
SIZE, \
|
19 |
+
" but got input to be of shape ", \
|
20 |
+
T.sizes())
|
21 |
+
|
22 |
+
namespace at::native::internal {
|
23 |
+
namespace {
|
24 |
+
inline bool all_positive(IntArrayRef& arr) {
|
25 |
+
return std::all_of(
|
26 |
+
arr.begin(), arr.end(), [](int64_t item) { return item > 0; });
|
27 |
+
}
|
28 |
+
|
29 |
+
inline bool all_nonnegative(std::vector<int64_t>& arr) {
|
30 |
+
return std::all_of(
|
31 |
+
arr.begin(), arr.end(), [](int64_t item) { return item >= 0; });
|
32 |
+
}
|
33 |
+
|
34 |
+
} // namespace
|
35 |
+
|
36 |
+
// calculate the rear part of output tensor sizes
|
37 |
+
template <int64_t dim>
|
38 |
+
std::vector<int64_t> get_output_size(
|
39 |
+
const Tensor& input,
|
40 |
+
IntArrayRef kernel_size,
|
41 |
+
IntArrayRef stride_size,
|
42 |
+
IntArrayRef pad_size,
|
43 |
+
IntArrayRef dilation_size) {
|
44 |
+
std::vector<int64_t> sizes;
|
45 |
+
for (const auto index : c10::irange(dim)) {
|
46 |
+
sizes.push_back(
|
47 |
+
div_rtn<int64_t>(
|
48 |
+
input.size(index + input.dim() - dim) + 2 * pad_size[index] -
|
49 |
+
(dilation_size[index] * (kernel_size[index] - 1) + 1),
|
50 |
+
stride_size[index]) +
|
51 |
+
1);
|
52 |
+
}
|
53 |
+
return sizes;
|
54 |
+
}
|
55 |
+
|
56 |
+
// calculate the sizes of output tensor
|
57 |
+
template <int64_t dim>
|
58 |
+
std::vector<int64_t> get_output_size(
|
59 |
+
const Tensor& input,
|
60 |
+
const Tensor& weight,
|
61 |
+
IntArrayRef kernel_size,
|
62 |
+
IntArrayRef stride_size,
|
63 |
+
IntArrayRef pad_size,
|
64 |
+
IntArrayRef dilation_size) {
|
65 |
+
auto output_size = get_output_size<dim>(
|
66 |
+
input, kernel_size, stride_size, pad_size, dilation_size);
|
67 |
+
output_size.insert(output_size.begin(), weight.size(0));
|
68 |
+
if (input.dim() == dim + 2) {
|
69 |
+
output_size.insert(output_size.begin(), input.size(0));
|
70 |
+
}
|
71 |
+
return output_size;
|
72 |
+
}
|
73 |
+
/*
|
74 |
+
slow_conv_dilated_shape_check - check user-input to dilated convolution
|
75 |
+
forward and backward functions.
|
76 |
+
*/
|
77 |
+
template <int64_t dim>
|
78 |
+
void slow_conv_dilated_shape_check(
|
79 |
+
const Tensor& input,
|
80 |
+
const Tensor& weight,
|
81 |
+
const Tensor& bias,
|
82 |
+
const Tensor& grad_output,
|
83 |
+
IntArrayRef kernel_size,
|
84 |
+
IntArrayRef stride_size,
|
85 |
+
IntArrayRef pad_size,
|
86 |
+
IntArrayRef dilation_size) {
|
87 |
+
/*
|
88 |
+
When the following tensors are defined:
|
89 |
+
|
90 |
+
bias, grad_weight, grad_output
|
91 |
+
|
92 |
+
then these are assumed to be contiguous without checking
|
93 |
+
because of these tensors are made contiguous by calling
|
94 |
+
.contiguous() method or by resizing of zero-sized tensors in
|
95 |
+
forward/backward functions.
|
96 |
+
|
97 |
+
When grad_weight is defined then it is assumed without
|
98 |
+
checking to have the same shape as weight, see backward
|
99 |
+
functions.
|
100 |
+
*/
|
101 |
+
// Check size arguments
|
102 |
+
TORCH_CHECK(
|
103 |
+
kernel_size.size() == dim,
|
104 |
+
"kernel sizes length should be ",
|
105 |
+
dim,
|
106 |
+
", but got ",
|
107 |
+
kernel_size.size());
|
108 |
+
TORCH_CHECK(
|
109 |
+
stride_size.size() == dim,
|
110 |
+
"strides length should be ",
|
111 |
+
dim,
|
112 |
+
", but got ",
|
113 |
+
stride_size.size());
|
114 |
+
TORCH_CHECK(
|
115 |
+
dilation_size.size() == dim,
|
116 |
+
"dilations length should be ",
|
117 |
+
dim,
|
118 |
+
", but got ",
|
119 |
+
dilation_size.size());
|
120 |
+
TORCH_CHECK(
|
121 |
+
pad_size.size() == dim,
|
122 |
+
"pads length should be ",
|
123 |
+
dim,
|
124 |
+
", but got ",
|
125 |
+
pad_size.size());
|
126 |
+
|
127 |
+
TORCH_CHECK(
|
128 |
+
all_positive(kernel_size),
|
129 |
+
"kernel size should be greater than zero, but got ",
|
130 |
+
kernel_size);
|
131 |
+
TORCH_CHECK(
|
132 |
+
all_positive(stride_size),
|
133 |
+
"stride should be greater than zero, but got ",
|
134 |
+
stride_size);
|
135 |
+
TORCH_CHECK(
|
136 |
+
all_positive(dilation_size),
|
137 |
+
"dilation should be greater than zero, but got ",
|
138 |
+
dilation_size);
|
139 |
+
|
140 |
+
// check input
|
141 |
+
TORCH_CHECK(input.defined(), "input must be defined");
|
142 |
+
bool is_batch = input.dim() == dim + 2;
|
143 |
+
int64_t n = (is_batch ? 2 : 1);
|
144 |
+
int64_t ndim = n + dim;
|
145 |
+
if (!is_batch) {
|
146 |
+
// input dim has to be dim + 1 if not batched
|
147 |
+
TORCH_CHECK(
|
148 |
+
input.dim() == dim + 1,
|
149 |
+
"input must be 4D or 5D tensor but got ",
|
150 |
+
input.dim(),
|
151 |
+
"D tensor");
|
152 |
+
}
|
153 |
+
|
154 |
+
// check output sizes
|
155 |
+
auto output_size = get_output_size<dim>(
|
156 |
+
input, kernel_size, stride_size, pad_size, dilation_size);
|
157 |
+
|
158 |
+
TORCH_CHECK(
|
159 |
+
all_nonnegative(output_size),
|
160 |
+
"calculated output size ",
|
161 |
+
output_size,
|
162 |
+
" is too small (all sizes must be non-negative)");
|
163 |
+
|
164 |
+
// check weight
|
165 |
+
TORCH_CHECK(weight.defined(), "weight must be defined");
|
166 |
+
TORCH_CHECK(
|
167 |
+
weight.dim() == dim + 2,
|
168 |
+
"weight must be ",
|
169 |
+
dim + 2,
|
170 |
+
"D tensor but got ",
|
171 |
+
weight.dim(),
|
172 |
+
"D tensor dim=",
|
173 |
+
dim);
|
174 |
+
TORCH_CHECK(
|
175 |
+
weight.sizes().slice(2) == kernel_size,
|
176 |
+
"weight[2:] shape ",
|
177 |
+
weight.sizes().slice(2),
|
178 |
+
" must be equal to kernel_size ",
|
179 |
+
kernel_size);
|
180 |
+
|
181 |
+
TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1));
|
182 |
+
|
183 |
+
// check bias when present
|
184 |
+
if (bias.defined()) {
|
185 |
+
TORCH_CHECK(
|
186 |
+
bias.dim() == 1,
|
187 |
+
"bias must be 1D tensor but got ",
|
188 |
+
bias.dim(),
|
189 |
+
"D tensor");
|
190 |
+
TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0));
|
191 |
+
}
|
192 |
+
|
193 |
+
// check grad_output when present
|
194 |
+
if (grad_output.defined()) {
|
195 |
+
TORCH_CHECK(
|
196 |
+
grad_output.dim() == ndim,
|
197 |
+
"grad_output must be ",
|
198 |
+
ndim,
|
199 |
+
"D tensor but got ",
|
200 |
+
grad_output.dim(),
|
201 |
+
"D tensor");
|
202 |
+
if (is_batch) {
|
203 |
+
TORCH_CHECK(
|
204 |
+
grad_output.size(0) == input.size(0),
|
205 |
+
"grad_output.size(0)=",
|
206 |
+
grad_output.size(0),
|
207 |
+
" must be input.size(0)=",
|
208 |
+
input.size(0));
|
209 |
+
}
|
210 |
+
TORCH_CHECK(
|
211 |
+
grad_output.size(n - 1) == weight.size(0),
|
212 |
+
"grad_output.size(",
|
213 |
+
n - 1,
|
214 |
+
")=",
|
215 |
+
grad_output.size(n - 1),
|
216 |
+
" must be weight.size(0)=",
|
217 |
+
weight.size(0));
|
218 |
+
TORCH_CHECK(
|
219 |
+
grad_output.sizes().slice(n) == output_size,
|
220 |
+
"grad_output[",
|
221 |
+
n,
|
222 |
+
":] shape",
|
223 |
+
grad_output.sizes().slice(n),
|
224 |
+
" must be equal to output size ",
|
225 |
+
output_size);
|
226 |
+
}
|
227 |
+
}
|
228 |
+
|
229 |
+
} // namespace at::native::internal
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h
ADDED
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceType.h>
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
|
6 |
+
#include <atomic>
|
7 |
+
#include <utility>
|
8 |
+
|
9 |
+
// Implements instruction set specific function dispatch.
|
10 |
+
//
|
11 |
+
// Kernels that may make use of specialized instruction sets (e.g. AVX2) are
|
12 |
+
// compiled multiple times with different compiler flags (e.g. -mavx2). A
|
13 |
+
// DispatchStub contains a table of function pointers for a kernel. At runtime,
|
14 |
+
// the fastest available kernel is chosen based on the features reported by
|
15 |
+
// cpuinfo.
|
16 |
+
//
|
17 |
+
// Example:
|
18 |
+
//
|
19 |
+
// In native/MyKernel.h:
|
20 |
+
// using fn_type = void(*)(const Tensor& x);
|
21 |
+
// DECLARE_DISPATCH(fn_type, stub);
|
22 |
+
//
|
23 |
+
// In native/MyKernel.cpp
|
24 |
+
// DEFINE_DISPATCH(stub);
|
25 |
+
//
|
26 |
+
// In native/cpu/MyKernel.cpp:
|
27 |
+
// namespace {
|
28 |
+
// // use anonymous namespace so that different cpu versions won't conflict
|
29 |
+
// void kernel(const Tensor& x) { ... }
|
30 |
+
// }
|
31 |
+
// REGISTER_DISPATCH(stub, &kernel);
|
32 |
+
//
|
33 |
+
// To call:
|
34 |
+
// stub(kCPU, tensor);
|
35 |
+
//
|
36 |
+
// TODO: CPU instruction set selection should be folded into whatever
|
37 |
+
// the main dispatch mechanism is.
|
38 |
+
|
39 |
+
// ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere
|
40 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
41 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wundefined-var-template")
|
42 |
+
|
43 |
+
namespace at::native {
|
44 |
+
|
45 |
+
enum class CPUCapability {
|
46 |
+
DEFAULT = 0,
|
47 |
+
#if defined(HAVE_VSX_CPU_DEFINITION)
|
48 |
+
VSX = 1,
|
49 |
+
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
|
50 |
+
ZVECTOR = 1,
|
51 |
+
#else
|
52 |
+
AVX2 = 1,
|
53 |
+
AVX512 = 2,
|
54 |
+
#endif
|
55 |
+
NUM_OPTIONS
|
56 |
+
};
|
57 |
+
|
58 |
+
CPUCapability get_cpu_capability();
|
59 |
+
|
60 |
+
template <typename FnPtr, typename T>
|
61 |
+
struct DispatchStub;
|
62 |
+
|
63 |
+
/**
|
64 |
+
* The sole purpose of this class is to outline methods that don't need to be
|
65 |
+
* specialized or otherwise inlined and duplicated (by the compiler due to
|
66 |
+
* template expansion), since it causes size bloat if there are a significant
|
67 |
+
* number of specialization of the DispatchStub<> class.
|
68 |
+
*/
|
69 |
+
struct TORCH_API DispatchStubImpl {
|
70 |
+
void* get_call_ptr(
|
71 |
+
c10::DeviceType device_type
|
72 |
+
, void *DEFAULT
|
73 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
74 |
+
, void *AVX512
|
75 |
+
#endif
|
76 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
77 |
+
, void *AVX2
|
78 |
+
#endif
|
79 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
80 |
+
, void *VSX
|
81 |
+
#endif
|
82 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
83 |
+
, void *ZVECTOR
|
84 |
+
#endif
|
85 |
+
);
|
86 |
+
|
87 |
+
/**
|
88 |
+
* The CPU Dispatch actual method is chosen in decreasing order of preference by
|
89 |
+
* DispatchStubImpl::choose_cpu_impl() in case none is found by
|
90 |
+
* DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr.
|
91 |
+
*/
|
92 |
+
void* choose_cpu_impl(
|
93 |
+
void *DEFAULT
|
94 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
95 |
+
, void *AVX512
|
96 |
+
#endif
|
97 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
98 |
+
, void *AVX2
|
99 |
+
#endif
|
100 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
101 |
+
, void *VSX
|
102 |
+
#endif
|
103 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
104 |
+
, void *ZVECTOR
|
105 |
+
#endif
|
106 |
+
);
|
107 |
+
|
108 |
+
// Fixing dispatch error in Windows debug builds.
|
109 |
+
// See https://github.com/pytorch/pytorch/issues/22681 for more details.
|
110 |
+
#if defined(_MSC_VER) && defined(_DEBUG)
|
111 |
+
std::atomic<void*> cpu_dispatch_ptr;
|
112 |
+
void* cuda_dispatch_ptr;
|
113 |
+
void* hip_dispatch_ptr;
|
114 |
+
void* mps_dispatch_ptr;
|
115 |
+
void* privateuse1_dispatch_ptr;
|
116 |
+
#else
|
117 |
+
std::atomic<void*> cpu_dispatch_ptr{nullptr};
|
118 |
+
void* cuda_dispatch_ptr = nullptr;
|
119 |
+
void* hip_dispatch_ptr = nullptr;
|
120 |
+
void* mps_dispatch_ptr = nullptr;
|
121 |
+
void* privateuse1_dispatch_ptr = nullptr;
|
122 |
+
#endif
|
123 |
+
};
|
124 |
+
|
125 |
+
template <typename rT, typename T, typename... Args>
|
126 |
+
struct DispatchStub<rT (*)(Args...), T> {
|
127 |
+
using FnPtr = rT (*) (Args...);
|
128 |
+
|
129 |
+
DispatchStub() = default;
|
130 |
+
DispatchStub(const DispatchStub&) = delete;
|
131 |
+
DispatchStub& operator=(const DispatchStub&) = delete;
|
132 |
+
|
133 |
+
private:
|
134 |
+
FnPtr get_call_ptr(c10::DeviceType device_type) {
|
135 |
+
return reinterpret_cast<FnPtr>(
|
136 |
+
impl.get_call_ptr(device_type
|
137 |
+
, reinterpret_cast<void*>(DEFAULT)
|
138 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
139 |
+
, reinterpret_cast<void*>(AVX512)
|
140 |
+
#endif
|
141 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
142 |
+
, reinterpret_cast<void*>(AVX2)
|
143 |
+
#endif
|
144 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
145 |
+
, reinterpret_cast<void*>(VSX)
|
146 |
+
#endif
|
147 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
148 |
+
, reinterpret_cast<void*>(ZVECTOR)
|
149 |
+
#endif
|
150 |
+
)
|
151 |
+
);
|
152 |
+
}
|
153 |
+
|
154 |
+
public:
|
155 |
+
template <typename... ArgTypes>
|
156 |
+
rT operator()(c10::DeviceType device_type, ArgTypes&&... args) {
|
157 |
+
FnPtr call_ptr = get_call_ptr(device_type);
|
158 |
+
return (*call_ptr)(std::forward<ArgTypes>(args)...);
|
159 |
+
}
|
160 |
+
|
161 |
+
void set_cuda_dispatch_ptr(FnPtr fn_ptr) {
|
162 |
+
impl.cuda_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
163 |
+
}
|
164 |
+
|
165 |
+
void set_hip_dispatch_ptr(FnPtr fn_ptr) {
|
166 |
+
impl.hip_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
167 |
+
}
|
168 |
+
|
169 |
+
void set_mps_dispatch_ptr(FnPtr fn_ptr) {
|
170 |
+
impl.mps_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
171 |
+
}
|
172 |
+
|
173 |
+
void set_privateuse1_dispatch_ptr(FnPtr fn_ptr) {
|
174 |
+
impl.privateuse1_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
175 |
+
}
|
176 |
+
|
177 |
+
static TORCH_API FnPtr DEFAULT;
|
178 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
179 |
+
static TORCH_API FnPtr AVX512;
|
180 |
+
#endif
|
181 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
182 |
+
static TORCH_API FnPtr AVX2;
|
183 |
+
#endif
|
184 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
185 |
+
static TORCH_API FnPtr VSX;
|
186 |
+
#endif
|
187 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
188 |
+
static TORCH_API FnPtr ZVECTOR;
|
189 |
+
#endif
|
190 |
+
private:
|
191 |
+
DispatchStubImpl impl;
|
192 |
+
};
|
193 |
+
|
194 |
+
namespace {
|
195 |
+
template <typename DispatchStub>
|
196 |
+
struct RegisterCUDADispatch {
|
197 |
+
RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
198 |
+
stub.set_cuda_dispatch_ptr(value);
|
199 |
+
}
|
200 |
+
};
|
201 |
+
|
202 |
+
template <typename DispatchStub>
|
203 |
+
struct RegisterMPSDispatch {
|
204 |
+
RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
205 |
+
stub.set_mps_dispatch_ptr(value);
|
206 |
+
}
|
207 |
+
};
|
208 |
+
|
209 |
+
template <typename DispatchStub>
|
210 |
+
struct RegisterHIPDispatch {
|
211 |
+
RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
212 |
+
// TODO: make this point at hip_dispatch_ptr
|
213 |
+
stub.set_cuda_dispatch_ptr(value);
|
214 |
+
}
|
215 |
+
};
|
216 |
+
|
217 |
+
template <typename DispatchStub>
|
218 |
+
struct RegisterPRIVATEUSE1Dispatch {
|
219 |
+
RegisterPRIVATEUSE1Dispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
220 |
+
stub.set_privateuse1_dispatch_ptr(value);
|
221 |
+
}
|
222 |
+
};
|
223 |
+
|
224 |
+
} // anonymous namespace
|
225 |
+
// Compiler will complain if you put things like std::tuple<Tensor, Tensor> in
|
226 |
+
// the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g.,
|
227 |
+
// adding parentheses and using helper struct to get rid of the parentheses, do
|
228 |
+
// not work with MSVC. So do a `using`-declaration if you need to pass in such
|
229 |
+
// `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h.
|
230 |
+
#define DECLARE_DISPATCH(fn, name) \
|
231 |
+
struct name : DispatchStub<fn, name> { \
|
232 |
+
name() = default; \
|
233 |
+
name(const name&) = delete; \
|
234 |
+
name& operator=(const name&) = delete; \
|
235 |
+
}; \
|
236 |
+
extern TORCH_API struct name name
|
237 |
+
|
238 |
+
#define DEFINE_DISPATCH(name) struct name name
|
239 |
+
|
240 |
+
#define REGISTER_ARCH_DISPATCH(name, arch, fn) \
|
241 |
+
template <> name::FnPtr TORCH_API DispatchStub<name::FnPtr, struct name>::arch = fn;
|
242 |
+
|
243 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
244 |
+
#define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn)
|
245 |
+
#else
|
246 |
+
#define REGISTER_AVX512_DISPATCH(name, fn)
|
247 |
+
#endif
|
248 |
+
|
249 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
250 |
+
#define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn)
|
251 |
+
#else
|
252 |
+
#define REGISTER_AVX2_DISPATCH(name, fn)
|
253 |
+
#endif
|
254 |
+
|
255 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
256 |
+
#define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn)
|
257 |
+
#else
|
258 |
+
#define REGISTER_VSX_DISPATCH(name, fn)
|
259 |
+
#endif
|
260 |
+
|
261 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
262 |
+
#define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn)
|
263 |
+
#else
|
264 |
+
#define REGISTER_ZVECTOR_DISPATCH(name, fn)
|
265 |
+
#endif
|
266 |
+
|
267 |
+
// Macro to register the same kernel for all CPU arch types. This is useful
|
268 |
+
// if a kernel does not benefit from being recompiled across different arch types.
|
269 |
+
#define REGISTER_ALL_CPU_DISPATCH(name, fn) \
|
270 |
+
REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \
|
271 |
+
REGISTER_AVX512_DISPATCH(name, fn) \
|
272 |
+
REGISTER_AVX2_DISPATCH(name, fn) \
|
273 |
+
REGISTER_VSX_DISPATCH(name, fn) \
|
274 |
+
REGISTER_ZVECTOR_DISPATCH(name, fn)
|
275 |
+
|
276 |
+
#define REGISTER_NO_CPU_DISPATCH(name) \
|
277 |
+
REGISTER_ALL_CPU_DISPATCH(name, nullptr)
|
278 |
+
|
279 |
+
#define REGISTER_CUDA_DISPATCH(name, fn) \
|
280 |
+
static RegisterCUDADispatch<struct name> name ## __register(name, fn);
|
281 |
+
|
282 |
+
#define REGISTER_HIP_DISPATCH(name, fn) \
|
283 |
+
static RegisterHIPDispatch<struct name> name ## __register(name, fn);
|
284 |
+
|
285 |
+
#define REGISTER_MPS_DISPATCH(name, fn) \
|
286 |
+
static RegisterMPSDispatch<struct name> name ## __register(name, fn);
|
287 |
+
|
288 |
+
#define REGISTER_PRIVATEUSE1_DISPATCH(name, fn) \
|
289 |
+
static RegisterPRIVATEUSE1Dispatch<struct name> name ## __register(name, fn);
|
290 |
+
|
291 |
+
// NB: This macro must be used in an actual 'cu' file; if you try using
|
292 |
+
// it from a 'cpp' file it will not work!
|
293 |
+
#if defined(__CUDACC__)
|
294 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
|
295 |
+
#elif defined(__HIPCC__)
|
296 |
+
// TODO: cut this over to HIP dispatch once we stop pretending that CUDA
|
297 |
+
// is HIP in the PyTorch HIPify build.
|
298 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
|
299 |
+
// #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn)
|
300 |
+
#elif defined(__OBJC__) && defined(USE_MPS)
|
301 |
+
// NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel
|
302 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn)
|
303 |
+
#elif defined(CPU_CAPABILITY)
|
304 |
+
// REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches.
|
305 |
+
// ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others.
|
306 |
+
#ifdef CPU_CAPABILITY_AVX512
|
307 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, nullptr)
|
308 |
+
#else
|
309 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
|
310 |
+
#endif
|
311 |
+
#define ALSO_REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
|
312 |
+
#endif
|
313 |
+
} // namespace at::native
|
314 |
+
|
315 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
class Tensor;
|
7 |
+
|
8 |
+
namespace native {
|
9 |
+
|
10 |
+
using pdist_forward_fn = void(*)(Tensor&, const Tensor&, const double p);
|
11 |
+
using pdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
|
12 |
+
using cdist_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p);
|
13 |
+
using cdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
|
14 |
+
|
15 |
+
DECLARE_DISPATCH(pdist_forward_fn, pdist_forward_stub);
|
16 |
+
DECLARE_DISPATCH(pdist_backward_fn, pdist_backward_stub);
|
17 |
+
DECLARE_DISPATCH(cdist_fn, cdist_stub);
|
18 |
+
DECLARE_DISPATCH(cdist_backward_fn, cdist_backward_stub);
|
19 |
+
|
20 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h
ADDED
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/Dispatch.h>
|
5 |
+
#include <ATen/Generator.h>
|
6 |
+
#include <ATen/ExpandUtils.h>
|
7 |
+
#include <ATen/Tensor.h>
|
8 |
+
#include <ATen/MemoryOverlap.h>
|
9 |
+
#include <ATen/NamedTensorUtils.h>
|
10 |
+
#include <ATen/native/Resize.h>
|
11 |
+
#include <ATen/native/TensorIterator.h>
|
12 |
+
#include <c10/util/Optional.h>
|
13 |
+
#include <limits>
|
14 |
+
#include <cmath>
|
15 |
+
|
16 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
17 |
+
#include <ATen/Functions.h>
|
18 |
+
#else
|
19 |
+
#include <ATen/ops/empty_like.h>
|
20 |
+
#include <ATen/ops/empty.h>
|
21 |
+
#include <ATen/ops/full.h>
|
22 |
+
#include <ATen/ops/view_as_real.h>
|
23 |
+
#endif
|
24 |
+
|
25 |
+
namespace at::native::templates {
|
26 |
+
|
27 |
+
// ==================================================== Random ========================================================
|
28 |
+
|
29 |
+
// The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`.
|
30 |
+
// The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t).
|
31 |
+
// This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance:
|
32 |
+
//
|
33 |
+
// auto actual = torch::empty({3, 3}, torch::half);
|
34 |
+
// actual.random_(0, 65504);
|
35 |
+
//
|
36 |
+
// If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504
|
37 |
+
// and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to`
|
38 |
+
// moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to
|
39 |
+
// the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous
|
40 |
+
// available number for torch::half dtype.
|
41 |
+
template<typename scalar_t>
|
42 |
+
int64_t update_from(int64_t from) {
|
43 |
+
static_assert(
|
44 |
+
std::is_floating_point<scalar_t>::value ||
|
45 |
+
std::is_same<scalar_t, at::Half>::value ||
|
46 |
+
std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
|
47 |
+
const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
|
48 |
+
if (from_plus_1 < from) {
|
49 |
+
int64_t from_ = std::abs(from + 1);
|
50 |
+
int n = 0;
|
51 |
+
while (from_ >>= 1) ++n;
|
52 |
+
// NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
|
53 |
+
from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
|
54 |
+
}
|
55 |
+
return from;
|
56 |
+
}
|
57 |
+
|
58 |
+
template<typename scalar_t>
|
59 |
+
int64_t update_to(int64_t to) {
|
60 |
+
static_assert(
|
61 |
+
std::is_floating_point<scalar_t>::value ||
|
62 |
+
std::is_same<scalar_t, at::Half>::value ||
|
63 |
+
std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
|
64 |
+
const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
|
65 |
+
if (to_minus_1 >= to) {
|
66 |
+
int64_t to_ = std::abs(to - 1);
|
67 |
+
int n = 0;
|
68 |
+
while (to_ >>= 1) ++n;
|
69 |
+
// NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
|
70 |
+
to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
|
71 |
+
}
|
72 |
+
return to;
|
73 |
+
}
|
74 |
+
|
75 |
+
// Return earlier for not invoking kernel.
|
76 |
+
// See https://github.com/pytorch/pytorch/issues/103418 for more details
|
77 |
+
#define CHECK_EMPTY_AND_RETURN(tensor) \
|
78 |
+
if (tensor.numel() == 0) { \
|
79 |
+
return tensor; \
|
80 |
+
}
|
81 |
+
|
82 |
+
template<template<typename> class random_kernel, typename RNG>
|
83 |
+
at::Tensor& random_impl(at::Tensor& self, c10::optional<Generator> generator) {
|
84 |
+
CHECK_EMPTY_AND_RETURN(self);
|
85 |
+
auto iter = at::TensorIterator::borrowing_nullary_op(self);
|
86 |
+
random_kernel<RNG>()(iter, generator);
|
87 |
+
return self;
|
88 |
+
}
|
89 |
+
|
90 |
+
#define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \
|
91 |
+
TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \
|
92 |
+
|
93 |
+
#define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \
|
94 |
+
if (var < -(1LL << digits) || var > (1LL << digits)) { \
|
95 |
+
TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \
|
96 |
+
"Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \
|
97 |
+
"This warning will become an error in version 1.7 release, please fix the code in advance"); \
|
98 |
+
}
|
99 |
+
|
100 |
+
static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
|
101 |
+
const auto scalar_type = typeMetaToScalarType(dtype);
|
102 |
+
if (isFloatingType(scalar_type)) {
|
103 |
+
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
|
104 |
+
const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
|
105 |
+
const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
|
106 |
+
CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
|
107 |
+
CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
|
108 |
+
|
109 |
+
constexpr auto digits = std::numeric_limits<scalar_t>::digits;
|
110 |
+
WARN_OUT_OF_BOUNDS(from, "from", digits, dtype);
|
111 |
+
WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype);
|
112 |
+
});
|
113 |
+
} else if (isIntegralType(scalar_type, /*includeBool=*/true)) {
|
114 |
+
AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, scalar_type, "check_random_integral_bounds", [&]() {
|
115 |
+
const auto min = static_cast<int64_t>(std::numeric_limits<scalar_t>::lowest());
|
116 |
+
const auto max = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
|
117 |
+
CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
|
118 |
+
CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
|
119 |
+
});
|
120 |
+
} else {
|
121 |
+
TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types");
|
122 |
+
}
|
123 |
+
}
|
124 |
+
|
125 |
+
template<template<typename> class random_from_to_kernel, typename RNG>
|
126 |
+
at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> generator) {
|
127 |
+
uint64_t range = 0;
|
128 |
+
auto iter = at::TensorIterator::borrowing_nullary_op(self);
|
129 |
+
if (to_opt.has_value()) {
|
130 |
+
// [from, to)
|
131 |
+
int64_t to = *to_opt;
|
132 |
+
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
|
133 |
+
if (isFloatingType(iter.dtype())) {
|
134 |
+
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] {
|
135 |
+
from = update_from<scalar_t>(from);
|
136 |
+
to = update_to<scalar_t>(to);
|
137 |
+
TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to);
|
138 |
+
});
|
139 |
+
}
|
140 |
+
check_from_to_in_range(from, to - 1, self.dtype());
|
141 |
+
CHECK_EMPTY_AND_RETURN(self);
|
142 |
+
range = static_cast<uint64_t>(to) - static_cast<uint64_t>(from);
|
143 |
+
random_from_to_kernel<RNG>()(iter, range, from, generator);
|
144 |
+
} else if (from != std::numeric_limits<int64_t>::lowest()) {
|
145 |
+
// [from, std::numeric_limits<int64_t>::max()]
|
146 |
+
int64_t to_inc = 0;
|
147 |
+
if (isFloatingType(iter.dtype())) {
|
148 |
+
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] {
|
149 |
+
constexpr int64_t scalar_t_max = static_cast<int64_t>(1) << std::numeric_limits<scalar_t>::digits;
|
150 |
+
to_inc = scalar_t_max > std::numeric_limits<int64_t>::max() ? std::numeric_limits<int64_t>::max() : static_cast<int64_t>(scalar_t_max);
|
151 |
+
from = update_from<scalar_t>(from);
|
152 |
+
TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc);
|
153 |
+
});
|
154 |
+
} else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) {
|
155 |
+
AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, self.scalar_type(), "random_from_to_range_calc", [&] {
|
156 |
+
if constexpr (std::is_same_v<scalar_t, bool>) {
|
157 |
+
to_inc = static_cast<int64_t>(true);
|
158 |
+
} else {
|
159 |
+
to_inc = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
|
160 |
+
}
|
161 |
+
});
|
162 |
+
} else {
|
163 |
+
TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types");
|
164 |
+
}
|
165 |
+
check_from_to_in_range(from, to_inc, self.dtype());
|
166 |
+
CHECK_EMPTY_AND_RETURN(self);
|
167 |
+
range = static_cast<uint64_t>(to_inc) - static_cast<uint64_t>(from) + 1;
|
168 |
+
random_from_to_kernel<RNG>()(iter, range, from, generator);
|
169 |
+
} else {
|
170 |
+
// [std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()]
|
171 |
+
// range = 2^64
|
172 |
+
CHECK_EMPTY_AND_RETURN(self);
|
173 |
+
random_from_to_kernel<RNG>()(iter, generator);
|
174 |
+
}
|
175 |
+
return self;
|
176 |
+
}
|
177 |
+
|
178 |
+
// ==================================================== Normal ========================================================
|
179 |
+
|
180 |
+
#define CHECK_NORMAL_TENSOR_STD(std) \
|
181 |
+
do { \
|
182 |
+
TORCH_CHECK( \
|
183 |
+
!std.is_complex(), \
|
184 |
+
"normal expects standard deviation to be non-complex"); \
|
185 |
+
TORCH_CHECK( \
|
186 |
+
std.numel() == 0 || std.is_meta() || std.min().ge(0).item<bool>(), \
|
187 |
+
"normal expects all elements of std >= 0.0"); \
|
188 |
+
} while (0)
|
189 |
+
|
190 |
+
#define CHECK_NORMAL_STD(std) \
|
191 |
+
TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
|
192 |
+
|
193 |
+
template<template<typename> class normal_kernel, typename RNG>
|
194 |
+
Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
195 |
+
CHECK_NORMAL_STD(std);
|
196 |
+
CHECK_EMPTY_AND_RETURN(self);
|
197 |
+
|
198 |
+
if (self.is_complex()) {
|
199 |
+
auto float_tensor = at::view_as_real(self);
|
200 |
+
// variance for normal distribution of the real and imaginary values
|
201 |
+
// is half of the input variance
|
202 |
+
normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen);
|
203 |
+
} else {
|
204 |
+
normal_kernel<RNG>()(self, mean, std, gen);
|
205 |
+
}
|
206 |
+
return self;
|
207 |
+
}
|
208 |
+
|
209 |
+
template<template<typename> class normal_kernel, typename RNG>
|
210 |
+
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional<Generator> gen) {
|
211 |
+
CHECK_NORMAL_STD(std);
|
212 |
+
auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
|
213 |
+
auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
|
214 |
+
at::native::resize_output(output, shape);
|
215 |
+
normal_impl_<normal_kernel, RNG>(output, 0, std, gen);
|
216 |
+
output.add_(mean);
|
217 |
+
return output;
|
218 |
+
}
|
219 |
+
|
220 |
+
template<template<typename> class normal_kernel, typename RNG>
|
221 |
+
Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional<Generator> gen) {
|
222 |
+
CHECK_NORMAL_TENSOR_STD(std);
|
223 |
+
auto mean_tensor = at::full({}, mean, output.options());
|
224 |
+
auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
|
225 |
+
at::native::resize_output(output, shape);
|
226 |
+
normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
|
227 |
+
// CUDA NB: addcmul_out copies the tensor to be added into the output.
|
228 |
+
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
|
229 |
+
// The third argument is not a constant reference and hence the samples in output are overwritten.
|
230 |
+
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
|
231 |
+
output.mul_(std).add_(mean_tensor);
|
232 |
+
return output;
|
233 |
+
}
|
234 |
+
|
235 |
+
template<template<typename> class normal_kernel, typename RNG>
|
236 |
+
Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
237 |
+
CHECK_NORMAL_TENSOR_STD(std);
|
238 |
+
auto shape = at::infer_size(mean.sizes(), std.sizes());
|
239 |
+
at::native::resize_output(output, shape);
|
240 |
+
normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
|
241 |
+
// CUDA NB: addcmul_out copies the tensor to be added into the output.
|
242 |
+
// The previous function here was addcmul_out(output, mean, output, std, 1);
|
243 |
+
// The third argument is not a constant reference and hence the samples in output are overwritten.
|
244 |
+
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
|
245 |
+
output.mul_(std).add_(mean);
|
246 |
+
return output;
|
247 |
+
}
|
248 |
+
|
249 |
+
template<template<typename> class normal_kernel, typename RNG>
|
250 |
+
Tensor normal_impl(const Tensor& mean, double std, c10::optional<Generator> gen) {
|
251 |
+
CHECK_NORMAL_STD(std);
|
252 |
+
Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
|
253 |
+
normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
|
254 |
+
return ret;
|
255 |
+
}
|
256 |
+
|
257 |
+
template<template<typename> class normal_kernel, typename RNG>
|
258 |
+
Tensor normal_impl(double mean, const Tensor& std, c10::optional<Generator> gen) {
|
259 |
+
CHECK_NORMAL_TENSOR_STD(std);
|
260 |
+
Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
|
261 |
+
normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
|
262 |
+
return ret;
|
263 |
+
}
|
264 |
+
|
265 |
+
template<template<typename> class normal_kernel, typename RNG>
|
266 |
+
Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
|
267 |
+
CHECK_NORMAL_TENSOR_STD(std);
|
268 |
+
auto shape = at::infer_size(mean.sizes(), std.sizes());
|
269 |
+
Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
|
270 |
+
normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
|
271 |
+
return ret;
|
272 |
+
}
|
273 |
+
|
274 |
+
// ==================================================== Uniform =======================================================
|
275 |
+
|
276 |
+
template<template<typename> class uniform_kernel, typename RNG>
|
277 |
+
at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional<Generator> generator) {
|
278 |
+
if (self.is_complex()) {
|
279 |
+
CHECK_EMPTY_AND_RETURN(self);
|
280 |
+
auto float_tensor = at::view_as_real(self);
|
281 |
+
uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
|
282 |
+
} else {
|
283 |
+
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
|
284 |
+
const auto dtype = self.dtype();
|
285 |
+
const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
|
286 |
+
const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
|
287 |
+
CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
|
288 |
+
CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype);
|
289 |
+
TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to);
|
290 |
+
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
|
291 |
+
"uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()),
|
292 |
+
">::max(), but found to=", to, " and from=", from,
|
293 |
+
" which result in to-from to exceed the limit");
|
294 |
+
from = std::min(std::max(from, min), max);
|
295 |
+
to = std::max(std::min(to, max), min);
|
296 |
+
});
|
297 |
+
CHECK_EMPTY_AND_RETURN(self);
|
298 |
+
auto iter = at::TensorIterator::borrowing_nullary_op(self);
|
299 |
+
uniform_kernel<RNG>()(iter, from, to, generator);
|
300 |
+
}
|
301 |
+
return self;
|
302 |
+
}
|
303 |
+
|
304 |
+
// ================================================== LogNormal =======================================================
|
305 |
+
|
306 |
+
template<template<typename> class log_normal_kernel, typename RNG>
|
307 |
+
at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional<Generator> gen) {
|
308 |
+
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
|
309 |
+
CHECK_EMPTY_AND_RETURN(self);
|
310 |
+
auto iter = TensorIterator::borrowing_nullary_op(self);
|
311 |
+
log_normal_kernel<RNG>()(iter, mean, std, gen);
|
312 |
+
return self;
|
313 |
+
}
|
314 |
+
|
315 |
+
// =================================================== Geometric ======================================================
|
316 |
+
|
317 |
+
template<template<typename> class geometric_kernel, typename RNG>
|
318 |
+
Tensor& geometric_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
|
319 |
+
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
|
320 |
+
CHECK_EMPTY_AND_RETURN(self);
|
321 |
+
auto iter = TensorIterator::borrowing_nullary_op(self);
|
322 |
+
geometric_kernel<RNG>()(iter, p, gen);
|
323 |
+
return self;
|
324 |
+
}
|
325 |
+
|
326 |
+
// ================================================== Exponential =====================================================
|
327 |
+
|
328 |
+
template<template<typename> class exponential_kernel, typename RNG>
|
329 |
+
Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional<Generator> gen) {
|
330 |
+
TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
|
331 |
+
CHECK_EMPTY_AND_RETURN(self);
|
332 |
+
auto iter = TensorIterator::borrowing_nullary_op(self);
|
333 |
+
exponential_kernel<RNG>()(iter, lambda, gen);
|
334 |
+
return self;
|
335 |
+
}
|
336 |
+
|
337 |
+
// ==================================================== Cauchy ========================================================
|
338 |
+
|
339 |
+
template<template<typename> class cauchy_kernel, typename RNG>
|
340 |
+
Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
|
341 |
+
// TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
|
342 |
+
// the variance, squared sigma, is undefined for cauchy distribution
|
343 |
+
TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
|
344 |
+
TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype());
|
345 |
+
CHECK_EMPTY_AND_RETURN(self);
|
346 |
+
auto iter = TensorIterator::borrowing_nullary_op(self);
|
347 |
+
cauchy_kernel<RNG>()(iter, median, sigma, gen);
|
348 |
+
return self;
|
349 |
+
}
|
350 |
+
|
351 |
+
// ==================================================== Bernoulli =====================================================
|
352 |
+
|
353 |
+
template<template<typename> class bernoulli_tensor_kernel, typename RNG>
|
354 |
+
Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
355 |
+
CHECK_EMPTY_AND_RETURN(self);
|
356 |
+
NoNamesGuard guard;
|
357 |
+
at::assert_no_internal_overlap(self);
|
358 |
+
bernoulli_tensor_kernel<RNG>()(self, p_, gen);
|
359 |
+
return self;
|
360 |
+
}
|
361 |
+
|
362 |
+
template<template<typename> class bernoulli_scalar_kernel, typename RNG>
|
363 |
+
Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
|
364 |
+
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
|
365 |
+
CHECK_EMPTY_AND_RETURN(self);
|
366 |
+
at::assert_no_internal_overlap(self);
|
367 |
+
bernoulli_scalar_kernel<RNG>()(self, p, gen);
|
368 |
+
return self;
|
369 |
+
}
|
370 |
+
|
371 |
+
template<template<typename> class bernoulli_tensor_kernel, typename RNG>
|
372 |
+
Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional<Generator> gen) {
|
373 |
+
// result.resize_as_(self) requires self to have same dtype as result, so we
|
374 |
+
// use resize_ instead.
|
375 |
+
// TODO: Fix resize_as_. See pytorch/pytorch#11665.
|
376 |
+
result.resize_(self.sizes());
|
377 |
+
bernoulli_impl_<bernoulli_tensor_kernel, RNG>(result, self, gen);
|
378 |
+
namedinference::propagate_names(result, self);
|
379 |
+
return result;
|
380 |
+
}
|
381 |
+
|
382 |
+
#undef CHECK_OUT_OF_BOUNDS
|
383 |
+
#undef WARN_OUT_OF_BOUNDS
|
384 |
+
|
385 |
+
} // namespace at::native::templates
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h
ADDED
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/Math.h>
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
#include <c10/util/MathConstants.h>
|
6 |
+
|
7 |
+
// ROCM hcc doesn't work well with using std:: in kernel functions
|
8 |
+
#if defined(__CUDA_ARCH__)
|
9 |
+
#include <c10/cuda/CUDAMathCompat.h>
|
10 |
+
#define compat_exp c10::cuda::compat::exp
|
11 |
+
#define compat_ceil c10::cuda::compat::ceil
|
12 |
+
#define compat_floor c10::cuda::compat::floor
|
13 |
+
#define compat_log c10::cuda::compat::log
|
14 |
+
#define compat_pow c10::cuda::compat::pow
|
15 |
+
#define compat_sqrt c10::cuda::compat::sqrt
|
16 |
+
#define compat_tan c10::cuda::compat::tan
|
17 |
+
#define compat_abs c10::cuda::compat::abs
|
18 |
+
#define compat_log1p c10::cuda::compat::log1p
|
19 |
+
#elif defined(__HIPCC__)
|
20 |
+
#include <c10/hip/HIPMathCompat.h>
|
21 |
+
#define compat_exp c10::hip::compat::exp
|
22 |
+
#define compat_ceil c10::hip::compat::ceil
|
23 |
+
#define compat_floor c10::hip::compat::floor
|
24 |
+
#define compat_log c10::hip::compat::log
|
25 |
+
#define compat_pow c10::hip::compat::pow
|
26 |
+
#define compat_sqrt c10::hip::compat::sqrt
|
27 |
+
#define compat_tan c10::hip::compat::tan
|
28 |
+
#define compat_abs c10::hip::compat::abs
|
29 |
+
#define compat_log1p c10::hip::compat::log1p
|
30 |
+
#else
|
31 |
+
#define compat_exp std::exp
|
32 |
+
#define compat_ceil std::ceil
|
33 |
+
#define compat_floor std::floor
|
34 |
+
#define compat_log std::log
|
35 |
+
#define compat_pow std::pow
|
36 |
+
#define compat_sqrt std::sqrt
|
37 |
+
#define compat_tan std::tan
|
38 |
+
#define compat_abs std::abs
|
39 |
+
#define compat_log1p std::log1p
|
40 |
+
#endif
|
41 |
+
|
42 |
+
namespace {
|
43 |
+
|
44 |
+
#if !defined(__CUDA_ARCH__) && !defined(__HIPCC__)
|
45 |
+
// we cannot use std::isnan directly due to some incompatibility of
|
46 |
+
// gcc constexpr'ing and nvcc
|
47 |
+
using std::isnan;
|
48 |
+
#endif
|
49 |
+
|
50 |
+
// Here sampler_t should be function type scalar_t(void). For gpu
|
51 |
+
// "sampler" is a device function, but since ROCM doesn't have
|
52 |
+
// equivalent to nvstd::function, we use a template type parameter to
|
53 |
+
// capture it.
|
54 |
+
template<typename scalar_t, typename sampler_t>
|
55 |
+
struct BaseSampler {
|
56 |
+
sampler_t sampler;
|
57 |
+
C10_DEVICE BaseSampler(const sampler_t& sampler): sampler(sampler) {}
|
58 |
+
C10_DEVICE scalar_t sample() {
|
59 |
+
return sampler();
|
60 |
+
}
|
61 |
+
};
|
62 |
+
|
63 |
+
// The function `sample_gamma` is
|
64 |
+
// is adapted from Numpy's distributions.c implementation.
|
65 |
+
// It is MIT licensed, so here is the copyright:
|
66 |
+
|
67 |
+
/* Copyright 2005 Robert Kern ([email protected])
|
68 |
+
*
|
69 |
+
* Permission is hereby granted, free of charge, to any person obtaining a
|
70 |
+
* copy of this software and associated documentation files (the
|
71 |
+
* "Software"), to deal in the Software without restriction, including
|
72 |
+
* without limitation the rights to use, copy, modify, merge, publish,
|
73 |
+
* distribute, sublicense, and/or sell copies of the Software, and to
|
74 |
+
* permit persons to whom the Software is furnished to do so, subject to
|
75 |
+
* the following conditions:
|
76 |
+
*
|
77 |
+
* The above copyright notice and this permission notice shall be included
|
78 |
+
* in all copies or substantial portions of the Software.
|
79 |
+
*
|
80 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
81 |
+
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
82 |
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
83 |
+
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
84 |
+
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
85 |
+
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
86 |
+
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
87 |
+
*/
|
88 |
+
|
89 |
+
template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t, typename normal_sampler_t>
|
90 |
+
C10_DEVICE scalar_t sample_gamma(scalar_t alpha, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform, BaseSampler<accscalar_t, normal_sampler_t>& standard_normal) {
|
91 |
+
accscalar_t scale = 1.0f;
|
92 |
+
|
93 |
+
// Boost alpha for higher acceptance probability.
|
94 |
+
if (alpha < 1.0f) {
|
95 |
+
if (alpha == 0.f) return 0.f;
|
96 |
+
scale *= compat_pow(1 - standard_uniform.sample(), 1.0f / alpha);
|
97 |
+
alpha += 1.0f;
|
98 |
+
}
|
99 |
+
|
100 |
+
// This implements the acceptance-rejection method of Marsaglia and Tsang (2000)
|
101 |
+
// doi:10.1145/358407.358414
|
102 |
+
const accscalar_t d = alpha - 1.0f / 3.0f;
|
103 |
+
const accscalar_t c = 1.0f / compat_sqrt(9.0f * d);
|
104 |
+
for (;;) {
|
105 |
+
accscalar_t x, y;
|
106 |
+
do {
|
107 |
+
x = standard_normal.sample();
|
108 |
+
y = 1.0f + c * x;
|
109 |
+
} while (y <= 0);
|
110 |
+
const accscalar_t v = y * y * y;
|
111 |
+
const accscalar_t u = 1 - standard_uniform.sample();
|
112 |
+
const accscalar_t xx = x * x;
|
113 |
+
if (u < 1.0f - 0.0331f * xx * xx)
|
114 |
+
return static_cast<scalar_t>(scale * d * v);
|
115 |
+
if (compat_log(u) < 0.5f * xx + d * (1.0f - v + compat_log(v)))
|
116 |
+
return static_cast<scalar_t>(scale * d * v);
|
117 |
+
}
|
118 |
+
}
|
119 |
+
|
120 |
+
/* the functions stirling_approx_tail, binomial_inversion, and btrs are adapted
|
121 |
+
* from TensorFlow's random_binomial_op.cc implementation. That code is under
|
122 |
+
* copyright: 2019 The TensorFlow Authors.
|
123 |
+
*
|
124 |
+
* It was released under the Apache License, Version 2.0 (the "License"), available at:
|
125 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
126 |
+
*/
|
127 |
+
|
128 |
+
template<typename scalar_t>
|
129 |
+
C10_DEVICE scalar_t stirling_approx_tail(scalar_t k) {
|
130 |
+
const static scalar_t kTailValues[] = {
|
131 |
+
0.0810614667953272,
|
132 |
+
0.0413406959554092,
|
133 |
+
0.0276779256849983,
|
134 |
+
0.02079067210376509,
|
135 |
+
0.0166446911898211,
|
136 |
+
0.0138761288230707,
|
137 |
+
0.0118967099458917,
|
138 |
+
0.0104112652619720,
|
139 |
+
0.00925546218271273,
|
140 |
+
0.00833056343336287
|
141 |
+
};
|
142 |
+
if (k <= 9) {
|
143 |
+
return kTailValues[static_cast<size_t>(k)];
|
144 |
+
}
|
145 |
+
scalar_t kp1sq = (k + 1) * (k + 1);
|
146 |
+
return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1);
|
147 |
+
}
|
148 |
+
|
149 |
+
|
150 |
+
template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
|
151 |
+
C10_DEVICE scalar_t binomial_inversion(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
|
152 |
+
accscalar_t U;
|
153 |
+
accscalar_t geom_sum = 0;
|
154 |
+
scalar_t num_geom = 0;
|
155 |
+
|
156 |
+
accscalar_t logprob = compat_log1p(-prob);
|
157 |
+
|
158 |
+
while (1) {
|
159 |
+
U = standard_uniform.sample();
|
160 |
+
accscalar_t geom = compat_ceil(compat_log(U) / logprob);
|
161 |
+
geom_sum += geom;
|
162 |
+
if (geom_sum > count) {
|
163 |
+
break;
|
164 |
+
}
|
165 |
+
num_geom = num_geom + 1;
|
166 |
+
}
|
167 |
+
return num_geom;
|
168 |
+
}
|
169 |
+
|
170 |
+
template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
|
171 |
+
C10_DEVICE scalar_t btrs(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
|
172 |
+
scalar_t k;
|
173 |
+
accscalar_t U, V, us;
|
174 |
+
|
175 |
+
// This is spq in the paper.
|
176 |
+
const accscalar_t stddev = compat_sqrt(count * prob * (1 - prob));
|
177 |
+
|
178 |
+
// Other coefficients for Transformed Rejection sampling.
|
179 |
+
const accscalar_t b = 1.15 + 2.53 * stddev;
|
180 |
+
const accscalar_t a = -0.0873 + 0.0248 * b + 0.01 * prob;
|
181 |
+
const accscalar_t c = count * prob + 0.5;
|
182 |
+
const accscalar_t v_r = 0.92 - 4.2 / b;
|
183 |
+
const accscalar_t r = prob / (1 - prob);
|
184 |
+
|
185 |
+
const accscalar_t alpha = (2.83 + 5.1 / b) * stddev;
|
186 |
+
const accscalar_t m = compat_floor((count + 1) * prob);
|
187 |
+
|
188 |
+
while (1) {
|
189 |
+
U = standard_uniform.sample() - 0.5;
|
190 |
+
V = standard_uniform.sample();
|
191 |
+
|
192 |
+
us = 0.5 - compat_abs(U);
|
193 |
+
k = static_cast<scalar_t>(compat_floor((2 * a / us + b) * U + c));
|
194 |
+
|
195 |
+
// Reject non-sensical answers.
|
196 |
+
if (k < 0 || k > count) {
|
197 |
+
continue;
|
198 |
+
}
|
199 |
+
// Region for which the box is tight, and we can return our calculated value.
|
200 |
+
// This should happen 0.86 * v_r times. In the limit as n * p is large,
|
201 |
+
// the acceptance rate converges to ~79% (and in the lower regime it is ~24%).
|
202 |
+
if (us >= 0.07 && V <= v_r) {
|
203 |
+
return k;
|
204 |
+
}
|
205 |
+
|
206 |
+
// This deviates from Hormann's BTRS algorithm, as there is a log missing.
|
207 |
+
// For all (u, v) pairs outside of the bounding box, this calculates the
|
208 |
+
// transformed-reject ratio.
|
209 |
+
V = compat_log(V * alpha / (a / (us * us) + b));
|
210 |
+
accscalar_t upperbound =
|
211 |
+
((m + 0.5) * compat_log((m + 1) / (r * (count - m + 1))) +
|
212 |
+
(count + 1) * compat_log((count - m + 1) / (count - k + 1)) +
|
213 |
+
(k + 0.5) * compat_log(r * (count - k + 1) / (k + 1)) +
|
214 |
+
stirling_approx_tail<accscalar_t>(m) + stirling_approx_tail<accscalar_t>(count - m) -
|
215 |
+
stirling_approx_tail<accscalar_t>(k) - stirling_approx_tail<accscalar_t>(count - k));
|
216 |
+
|
217 |
+
if (V <= upperbound) {
|
218 |
+
return k;
|
219 |
+
}
|
220 |
+
}
|
221 |
+
}
|
222 |
+
|
223 |
+
template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
|
224 |
+
C10_DEVICE scalar_t sample_binomial(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
|
225 |
+
if (count <= 0.0 || prob <= 0.0) {
|
226 |
+
return 0;
|
227 |
+
} else if (prob >= 1.0) {
|
228 |
+
return count;
|
229 |
+
} else if (prob <= 0.5) {
|
230 |
+
if (count * prob >= 10.0) {
|
231 |
+
// btrs
|
232 |
+
return btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
|
233 |
+
} else {
|
234 |
+
// binomial inversion
|
235 |
+
return binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
|
236 |
+
}
|
237 |
+
} else if (prob > 0.5) {
|
238 |
+
scalar_t qprob = 1.0 - prob;
|
239 |
+
if (count * qprob >= 10.0) {
|
240 |
+
// btrs
|
241 |
+
return count - btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
|
242 |
+
} else {
|
243 |
+
// count - binomial inversion
|
244 |
+
return count - binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
|
245 |
+
}
|
246 |
+
} else {
|
247 |
+
// prob is nan?
|
248 |
+
return static_cast<scalar_t>(NAN);
|
249 |
+
}
|
250 |
+
}
|
251 |
+
|
252 |
+
/*
|
253 |
+
* This function is derived from the implementation of the digamma function in the Cephes Math Library.
|
254 |
+
* See note [3-Clause BSD License for the Cephes Math Library] in ATen/native/Math.h.
|
255 |
+
*/
|
256 |
+
template<typename scalar_t, typename accscalar_t>
|
257 |
+
C10_DEVICE static inline scalar_t digamma_one(scalar_t x) {
|
258 |
+
constexpr accscalar_t PSI_10 = 2.25175258906672110764;
|
259 |
+
if (x == 0) {
|
260 |
+
return INFINITY;
|
261 |
+
}
|
262 |
+
accscalar_t additional_summand = 0;
|
263 |
+
int x_is_integer = x == compat_floor(x);
|
264 |
+
if (x < 0) {
|
265 |
+
if (x_is_integer) {
|
266 |
+
return INFINITY;
|
267 |
+
}
|
268 |
+
// it is more standard to write this as recursion, but
|
269 |
+
// nvcc does not like that
|
270 |
+
additional_summand = -c10::pi<scalar_t> /
|
271 |
+
compat_tan(c10::pi<scalar_t> * x);
|
272 |
+
x = 1 - x;
|
273 |
+
}
|
274 |
+
|
275 |
+
// Push x to be >= 10
|
276 |
+
accscalar_t result = 0;
|
277 |
+
while (x < 10) {
|
278 |
+
result -= 1 / x;
|
279 |
+
x += 1;
|
280 |
+
}
|
281 |
+
if (x == 10) {
|
282 |
+
return result + PSI_10 + additional_summand;
|
283 |
+
}
|
284 |
+
|
285 |
+
// Compute asymptotic digamma
|
286 |
+
static const accscalar_t A[] = {
|
287 |
+
8.33333333333333333333E-2,
|
288 |
+
-2.10927960927960927961E-2,
|
289 |
+
7.57575757575757575758E-3,
|
290 |
+
-4.16666666666666666667E-3,
|
291 |
+
3.96825396825396825397E-3,
|
292 |
+
-8.33333333333333333333E-3,
|
293 |
+
8.33333333333333333333E-2,
|
294 |
+
};
|
295 |
+
|
296 |
+
accscalar_t y = 0;
|
297 |
+
if (x < 1.0e17f) {
|
298 |
+
accscalar_t z = 1.0 / (x * x);
|
299 |
+
y = z * polevl<accscalar_t>(z, A, 6);
|
300 |
+
}
|
301 |
+
return static_cast<scalar_t>(
|
302 |
+
result + compat_log(x) - (0.5f / x) - y + additional_summand);
|
303 |
+
}
|
304 |
+
|
305 |
+
// Computes the reparameterized gradient -(d/dalpha cdf(x;alpha)) / pdf(x;alpha)
|
306 |
+
// for random number x drawn from a standard Gamma distribution Gamma(alpha).
|
307 |
+
template <typename scalar_t, typename accscalar_t>
|
308 |
+
C10_HOST_DEVICE scalar_t standard_gamma_grad_one(scalar_t alpha_, scalar_t x_) {
|
309 |
+
// Use a Taylor series expansion for small x.
|
310 |
+
accscalar_t x = static_cast<accscalar_t>(x_);
|
311 |
+
accscalar_t alpha = static_cast<accscalar_t>(alpha_);
|
312 |
+
if (x < 0.8f) {
|
313 |
+
accscalar_t numer = 1;
|
314 |
+
accscalar_t denom = alpha;
|
315 |
+
auto series1 = numer / denom;
|
316 |
+
auto series2 = numer / (denom * denom);
|
317 |
+
for (int i = 1; i <= 5; ++i) {
|
318 |
+
numer *= -x / static_cast<accscalar_t>(i);
|
319 |
+
denom += 1;
|
320 |
+
series1 += numer / denom;
|
321 |
+
series2 += numer / (denom * denom);
|
322 |
+
}
|
323 |
+
const auto pow_x_alpha = compat_pow(x, alpha);
|
324 |
+
const auto gamma_pdf = compat_pow(x, alpha - 1) * compat_exp(-x);
|
325 |
+
const auto gamma_cdf = pow_x_alpha * series1;
|
326 |
+
const auto gamma_cdf_alpha =
|
327 |
+
(compat_log(x) - digamma_one<accscalar_t, accscalar_t>(alpha)) *
|
328 |
+
gamma_cdf -
|
329 |
+
pow_x_alpha * series2;
|
330 |
+
const auto result = -gamma_cdf_alpha / gamma_pdf;
|
331 |
+
return isnan(result) ? static_cast<scalar_t>( 0.f ) : static_cast<scalar_t>(result);
|
332 |
+
}
|
333 |
+
|
334 |
+
// Use a Rice saddle point expansion for large alpha.
|
335 |
+
if (alpha > 8.0f) {
|
336 |
+
if (0.9f * alpha <= x && x <= 1.1f * alpha) {
|
337 |
+
const auto numer_1 = 1 + 24 * alpha * (1 + 12 * alpha);
|
338 |
+
const auto numer_2 = 1440 * (alpha * alpha) + 6 * x * (53 - 120 * x)
|
339 |
+
- 65 * x * x / alpha + alpha * (107 + 3600 * x);
|
340 |
+
const auto denom = 1244160 * (alpha * alpha) * (alpha * alpha);
|
341 |
+
return static_cast<scalar_t>(numer_1 * numer_2 / denom);
|
342 |
+
}
|
343 |
+
const auto denom = compat_sqrt(8 * alpha);
|
344 |
+
const auto term2 = denom / (alpha - x);
|
345 |
+
const auto term3 = compat_pow(
|
346 |
+
x - alpha - alpha * compat_log(x / alpha),
|
347 |
+
static_cast<accscalar_t>(-1.5));
|
348 |
+
const auto term23 = (x < alpha) ? term2 - term3 : term2 + term3;
|
349 |
+
const auto term1 = compat_log(x / alpha) * term23 -
|
350 |
+
compat_sqrt(2 / alpha) * (alpha + x) / ((alpha - x) * (alpha - x));
|
351 |
+
const auto stirling = 1 + 1 / (12 * alpha) * (1 + 1 / (24 * alpha));
|
352 |
+
const auto numer = x * term1;
|
353 |
+
return static_cast<scalar_t>(-stirling * numer / denom);
|
354 |
+
}
|
355 |
+
|
356 |
+
// Use a bivariate rational approximation to the reparameterized gradient.
|
357 |
+
const auto u = compat_log(x / alpha);
|
358 |
+
const auto v = compat_log(alpha);
|
359 |
+
static const accscalar_t coef_uv[3][8] = {
|
360 |
+
{0.16009398, -0.094634809, 0.025146376, -0.0030648343,
|
361 |
+
1, 0.32668115, 0.10406089, 0.0014179084},
|
362 |
+
{0.53487893, 0.1298071, 0.065735949, -0.0015649758,
|
363 |
+
0.16639465, 0.020070113, -0.0035938915, -0.00058392623},
|
364 |
+
{0.040121004, -0.0065914022, -0.0026286047, -0.0013441777,
|
365 |
+
0.017050642, -0.0021309326, 0.00085092367, -1.5247877e-07},
|
366 |
+
};
|
367 |
+
accscalar_t coef_v[8];
|
368 |
+
for (int i = 0; i < 8; ++ i) {
|
369 |
+
coef_v[i] = coef_uv[0][i] + u * (coef_uv[1][i] + u * coef_uv[2][i]);
|
370 |
+
}
|
371 |
+
const auto p = coef_v[0] + v * (coef_v[1] + v * (coef_v[2] + v * coef_v[3]));
|
372 |
+
const auto q = coef_v[4] + v * (coef_v[5] + v * (coef_v[6] + v * coef_v[7]));
|
373 |
+
return static_cast<scalar_t>(compat_exp(p / q));
|
374 |
+
}
|
375 |
+
|
376 |
+
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
|
377 |
+
// Assumes x is close to zero and uses a Taylor expansion.
|
378 |
+
template <typename scalar_t, typename accscalar_t>
|
379 |
+
C10_DEVICE static inline scalar_t _beta_grad_alpha_small(scalar_t x, scalar_t alpha, scalar_t beta) {
|
380 |
+
const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha)
|
381 |
+
- digamma_one<scalar_t, accscalar_t>(alpha + beta) - compat_log(x);
|
382 |
+
scalar_t numer = 1;
|
383 |
+
scalar_t series = numer / alpha * (factor + 1 / alpha);
|
384 |
+
for (int i = 1; i <= 10; ++i) {
|
385 |
+
scalar_t casted_i = static_cast<scalar_t>(i);
|
386 |
+
numer *= (casted_i - beta) * x / casted_i;
|
387 |
+
const scalar_t denom = alpha + casted_i;
|
388 |
+
series += numer / denom * (factor + 1 / denom);
|
389 |
+
}
|
390 |
+
const scalar_t result = x * compat_pow(1 - x, -beta) * series;
|
391 |
+
return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
|
392 |
+
}
|
393 |
+
|
394 |
+
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta.
|
395 |
+
// Assumes x is close to zero and uses a Taylor expansion.
|
396 |
+
template <typename scalar_t, typename accscalar_t>
|
397 |
+
C10_DEVICE static inline scalar_t _beta_grad_beta_small(scalar_t x, scalar_t alpha, scalar_t beta) {
|
398 |
+
const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha + beta) - digamma_one<scalar_t, accscalar_t>(beta);
|
399 |
+
scalar_t numer = 1, betas = 1, dbetas = 0, series = factor / alpha;
|
400 |
+
for (int i = 1; i <= 8; ++i) {
|
401 |
+
scalar_t casted_i = static_cast<scalar_t>(i);
|
402 |
+
numer *= -x / casted_i;
|
403 |
+
dbetas = dbetas * (beta - casted_i) + betas;
|
404 |
+
betas = betas * (beta - casted_i);
|
405 |
+
series += numer / (alpha + casted_i) * (dbetas + factor * betas);
|
406 |
+
}
|
407 |
+
const scalar_t result = -compat_pow(1 - x, 1 - beta) * series;
|
408 |
+
return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
|
409 |
+
}
|
410 |
+
|
411 |
+
// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
|
412 |
+
// Assumes alpha and beta are both large and uses a Rice saddle point expansion.
|
413 |
+
// To ensure numerical stability, this computation is performed at higher precision.
|
414 |
+
template<typename scalar_t, typename accscalar_t>
|
415 |
+
C10_DEVICE static inline scalar_t _beta_grad_alpha_mid(accscalar_t x, accscalar_t alpha, accscalar_t beta) {
|
416 |
+
const accscalar_t total = alpha + beta;
|
417 |
+
const accscalar_t mean = alpha / total;
|
418 |
+
const accscalar_t std = compat_sqrt(alpha * beta / (total + 1)) / total;
|
419 |
+
if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) {
|
420 |
+
// Avoid the singularity at x = mean.
|
421 |
+
const accscalar_t poly = 47 * x * (beta * beta) * (beta * beta) + alpha * (
|
422 |
+
(43 + 20 * (16 + 27 * beta) * x) * (beta * beta) * beta + alpha * (
|
423 |
+
3 * (59 + 180 * beta - 90 * x) * (beta * beta) + alpha * (
|
424 |
+
(453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * (
|
425 |
+
8 * (1 - x) * (135 * beta - 11)))));
|
426 |
+
const accscalar_t prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total);
|
427 |
+
const accscalar_t prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total);
|
428 |
+
return prefactor_num / (1 - x) * poly / prefactor_den;
|
429 |
+
}
|
430 |
+
const accscalar_t prefactor = -x / compat_sqrt(2 * alpha * beta / total);
|
431 |
+
const accscalar_t stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha * alpha))
|
432 |
+
* (1 + 1 / (12 * beta) + 1 / (288 * beta * beta))
|
433 |
+
/ (1 + 1 / (12 * total) + 1 / (288 * total * total));
|
434 |
+
const accscalar_t term1_num = 2 * (alpha * alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta * beta);
|
435 |
+
const accscalar_t axbx = alpha * (x - 1) + beta * x;
|
436 |
+
const accscalar_t term1_den = compat_sqrt(2 * alpha / beta) * compat_pow(total, static_cast<accscalar_t>(1.5f)) * axbx * axbx;
|
437 |
+
const accscalar_t term1 = term1_num / term1_den;
|
438 |
+
const accscalar_t term2 = 0.5f * compat_log(alpha / (total * x));
|
439 |
+
const accscalar_t term3_num = compat_sqrt(8 * alpha * beta / total);
|
440 |
+
const accscalar_t term3_den = beta * x + alpha * (x - 1);
|
441 |
+
const accscalar_t term3 = term3_num / term3_den;
|
442 |
+
const accscalar_t term4_base = beta * compat_log(beta / (total * (1 - x))) +
|
443 |
+
alpha * compat_log(alpha / (total * x));
|
444 |
+
const accscalar_t term4 = compat_pow(term4_base, static_cast<accscalar_t>(-1.5f));
|
445 |
+
const accscalar_t term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4));
|
446 |
+
return static_cast<scalar_t>(stirling * prefactor * term1234);
|
447 |
+
}
|
448 |
+
|
449 |
+
// Computes a scaled reparameterized gradient
|
450 |
+
// -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x)
|
451 |
+
// for random number x drawn from a Beta distribution Beta(alpha,beta).
|
452 |
+
// This function inputs total=alpha+beta to make it easy to implement
|
453 |
+
// Dirichlet reparameterized gradients in terms of Betas.
|
454 |
+
template<typename scalar_t, typename accscalar_t>
|
455 |
+
C10_HOST_DEVICE static inline scalar_t dirichlet_grad_one(scalar_t x, scalar_t alpha, scalar_t total) {
|
456 |
+
accscalar_t x_ = static_cast<accscalar_t>(x);
|
457 |
+
accscalar_t alpha_ = static_cast<accscalar_t>(alpha);
|
458 |
+
accscalar_t total_ = static_cast<accscalar_t>(total);
|
459 |
+
|
460 |
+
const scalar_t beta = total - alpha;
|
461 |
+
const accscalar_t beta_ = total_ - alpha_;
|
462 |
+
const scalar_t boundary = total * x * (1 - x);
|
463 |
+
|
464 |
+
// Use an asymptotic approximation for x close to 0.
|
465 |
+
if (x <= 0.5f && boundary < 2.5f) {
|
466 |
+
return _beta_grad_alpha_small<scalar_t, accscalar_t>(x, alpha, beta);
|
467 |
+
}
|
468 |
+
|
469 |
+
// Use an asymptotic approximation for x close to 1.
|
470 |
+
if (x >= 0.5f && boundary < 0.75f) {
|
471 |
+
return -_beta_grad_beta_small<scalar_t, accscalar_t>(1 - x, beta, alpha);
|
472 |
+
}
|
473 |
+
|
474 |
+
// Use an asymptotic approximation when alpha and (total - alpha) are both large.
|
475 |
+
if (alpha > 6 && beta > 6) {
|
476 |
+
return _beta_grad_alpha_mid<scalar_t, accscalar_t>(x_, alpha_, beta_);
|
477 |
+
}
|
478 |
+
|
479 |
+
// Use a rational correction to an analytic approximation.
|
480 |
+
static const accscalar_t c[2][3][3][4] = {
|
481 |
+
{{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863},
|
482 |
+
{0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033},
|
483 |
+
{-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}},
|
484 |
+
{{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814},
|
485 |
+
{-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057},
|
486 |
+
{0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}},
|
487 |
+
{{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565},
|
488 |
+
{0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181},
|
489 |
+
{0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}},
|
490 |
+
{{{1, -0.02924021934, -0.04438342661, 0.007285809825},
|
491 |
+
{0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521},
|
492 |
+
{-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}},
|
493 |
+
{{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273},
|
494 |
+
{0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956},
|
495 |
+
{-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}},
|
496 |
+
{{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05},
|
497 |
+
{0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05},
|
498 |
+
{-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}},
|
499 |
+
};
|
500 |
+
const accscalar_t u = compat_log(x_);
|
501 |
+
const accscalar_t a = compat_log(alpha_) - u;
|
502 |
+
const accscalar_t b = compat_log(total_) - a;
|
503 |
+
const accscalar_t pow_u[3] = {1, u, u * u};
|
504 |
+
const accscalar_t pow_a[3] = {1, a, a * a};
|
505 |
+
accscalar_t p = 0.0;
|
506 |
+
accscalar_t q = 0.0;
|
507 |
+
for (int i = 0; i < 3; ++i) {
|
508 |
+
for (int j = 0; j < 3; ++j) {
|
509 |
+
const accscalar_t ua = pow_u[i] * pow_a[j];
|
510 |
+
p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3])));
|
511 |
+
q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3])));
|
512 |
+
}
|
513 |
+
}
|
514 |
+
const accscalar_t approx = x_ * (digamma_one<scalar_t, accscalar_t>(total_) - digamma_one<scalar_t, accscalar_t>(alpha_)) / beta_;
|
515 |
+
return static_cast<scalar_t>(p / q * approx);
|
516 |
+
}
|
517 |
+
|
518 |
+
} // namespace
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/Tensor.h>
|
2 |
+
#include <ATen/Config.h>
|
3 |
+
#include <cstdint>
|
4 |
+
|
5 |
+
#ifdef USE_FBGEMM
|
6 |
+
#include <fbgemm/FbgemmEmbedding.h>
|
7 |
+
#endif
|
8 |
+
|
9 |
+
namespace at::native {
|
10 |
+
|
11 |
+
void check_arguments(
|
12 |
+
const Tensor& weight,
|
13 |
+
const Tensor& indices,
|
14 |
+
const Tensor& offsets,
|
15 |
+
const int64_t mode,
|
16 |
+
const c10::optional<Tensor>& per_sample_weights,
|
17 |
+
bool include_last_offset);
|
18 |
+
|
19 |
+
void make_bag_size_out(
|
20 |
+
Tensor& bag_size_out,
|
21 |
+
const Tensor& offsets,
|
22 |
+
const Tensor& indices,
|
23 |
+
const int64_t mode,
|
24 |
+
const bool include_last_offset,
|
25 |
+
const bool requires_grad);
|
26 |
+
|
27 |
+
void make_max_indices_out(
|
28 |
+
Tensor& max_indices_out,
|
29 |
+
const Tensor& weight,
|
30 |
+
const Tensor& indices,
|
31 |
+
const Tensor& offsets,
|
32 |
+
const Tensor& bag_size,
|
33 |
+
const int64_t mode,
|
34 |
+
bool include_last_offset);
|
35 |
+
|
36 |
+
void make_offset2bag_out(
|
37 |
+
Tensor& offset2bag,
|
38 |
+
Tensor& output,
|
39 |
+
const Tensor& weight,
|
40 |
+
const Tensor& indices,
|
41 |
+
const Tensor& offsets,
|
42 |
+
const int64_t mode,
|
43 |
+
const c10::optional<Tensor>& per_sample_weights,
|
44 |
+
const int64_t padding_idx = -1);
|
45 |
+
|
46 |
+
#ifdef USE_FBGEMM
|
47 |
+
|
48 |
+
template<bool has_weight, typename TIndex, typename TData>
|
49 |
+
struct _CallbackAndBlockSize {
|
50 |
+
using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature<TData, TIndex, TIndex, TData>::Type;
|
51 |
+
|
52 |
+
int64_t blockSize = -1;
|
53 |
+
TCallback callback = nullptr;
|
54 |
+
|
55 |
+
static TCallback generateCallback(int64_t block_size) {
|
56 |
+
return fbgemm::GenerateEmbeddingSpMDM<TData, TIndex, TIndex, TData>(
|
57 |
+
block_size,
|
58 |
+
has_weight,
|
59 |
+
/* normalize_by_lengths */false,
|
60 |
+
/* prefetch */16,
|
61 |
+
/* is_weight_positional */false,
|
62 |
+
/* use_offsets */true);
|
63 |
+
}
|
64 |
+
|
65 |
+
_CallbackAndBlockSize() = default;
|
66 |
+
|
67 |
+
explicit _CallbackAndBlockSize(c10::optional<int64_t> maybe_block_size)
|
68 |
+
: blockSize(maybe_block_size.value_or(-1))
|
69 |
+
, callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr)
|
70 |
+
{}
|
71 |
+
};
|
72 |
+
|
73 |
+
template<typename... StorageMixins>
|
74 |
+
struct _EmbeddingBagKernelCacheImpl : private StorageMixins... {
|
75 |
+
|
76 |
+
_EmbeddingBagKernelCacheImpl() = default;
|
77 |
+
// use each of the mixins to store corresponding kernel and block size
|
78 |
+
explicit _EmbeddingBagKernelCacheImpl(c10::optional<int64_t> maybe_block_size)
|
79 |
+
: StorageMixins(maybe_block_size)...
|
80 |
+
{}
|
81 |
+
|
82 |
+
// this method is thread safe (call sites may call from different threads)
|
83 |
+
template<bool has_weight, typename TIndex, typename TData>
|
84 |
+
typename _CallbackAndBlockSize<has_weight, TIndex, TData>::TCallback
|
85 |
+
getCallback(int64_t block_size) const {
|
86 |
+
// if the cache doesn't store the kernel for the incoming block size
|
87 |
+
// (so it is different from the one stored in corresponding mixin)
|
88 |
+
// regenerate the kernel (not writing it into the cache so we avoid locks)
|
89 |
+
if (block_size != _CallbackAndBlockSize<has_weight, TIndex, TData>::blockSize) {
|
90 |
+
return _CallbackAndBlockSize<has_weight, TIndex, TData>::generateCallback(block_size);
|
91 |
+
}
|
92 |
+
// else retrieve the cached kernel from the corresponding mixin
|
93 |
+
return _CallbackAndBlockSize<has_weight, TIndex, TData>::callback;
|
94 |
+
}
|
95 |
+
};
|
96 |
+
|
97 |
+
// instantiate the cache with the list of storage mixins
|
98 |
+
// for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file
|
99 |
+
using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl<
|
100 |
+
_CallbackAndBlockSize<true, int32_t, float>,
|
101 |
+
_CallbackAndBlockSize<false, int32_t, float>,
|
102 |
+
_CallbackAndBlockSize<true, int64_t, float>,
|
103 |
+
_CallbackAndBlockSize<false, int64_t, float>,
|
104 |
+
_CallbackAndBlockSize<true, int32_t, unsigned short>,
|
105 |
+
_CallbackAndBlockSize<false, int32_t, unsigned short>,
|
106 |
+
_CallbackAndBlockSize<true, int64_t, unsigned short>,
|
107 |
+
_CallbackAndBlockSize<false, int64_t, unsigned short>>;
|
108 |
+
#else
|
109 |
+
struct _EmbeddingBagKernelCache {
|
110 |
+
explicit _EmbeddingBagKernelCache(c10::optional<int64_t> /* maybe_block_size */) {}
|
111 |
+
};
|
112 |
+
#endif
|
113 |
+
|
114 |
+
void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag,
|
115 |
+
Tensor& bag_size, Tensor* max_indices,
|
116 |
+
const Tensor &weight, const Tensor &indices,
|
117 |
+
const Tensor &offsets, const int64_t mode = 0,
|
118 |
+
const c10::optional<Tensor>& per_sample_weights = c10::nullopt,
|
119 |
+
bool include_last_offset = false,
|
120 |
+
int64_t padding_idx = -1,
|
121 |
+
_EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
|
122 |
+
|
123 |
+
void _embedding_bag_cpu_out(
|
124 |
+
at::Tensor& output,
|
125 |
+
at::Tensor& offset2bag,
|
126 |
+
at::Tensor& bag_size,
|
127 |
+
at::Tensor* p_max_indices,
|
128 |
+
const at::Tensor& weight,
|
129 |
+
const at::Tensor& indices,
|
130 |
+
const at::Tensor& offsets,
|
131 |
+
const bool scale_grad_by_freq,
|
132 |
+
const int64_t mode,
|
133 |
+
const bool sparse,
|
134 |
+
const c10::optional<at::Tensor>& per_sample_weights,
|
135 |
+
const bool include_last_offset,
|
136 |
+
const c10::optional<int64_t>& padding_idx,
|
137 |
+
_EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
|
138 |
+
|
139 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
#include <cstdint>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
struct TensorIterator;
|
8 |
+
|
9 |
+
namespace native {
|
10 |
+
|
11 |
+
using _compute_linear_combination_fn = void(*)(
|
12 |
+
TensorIterator& iter,
|
13 |
+
int64_t in_stride,
|
14 |
+
int64_t coeff_stride,
|
15 |
+
int64_t num_summations
|
16 |
+
);
|
17 |
+
|
18 |
+
DECLARE_DISPATCH(_compute_linear_combination_fn, _compute_linear_combination_stub);
|
19 |
+
|
20 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <algorithm>
|
4 |
+
#include <cmath>
|
5 |
+
#include <cstdint>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
#include <ATen/native/GridSamplerUtils.h>
|
9 |
+
|
10 |
+
namespace at::native {
|
11 |
+
|
12 |
+
using detail::GridSamplerInterpolation;
|
13 |
+
using detail::GridSamplerPadding;
|
14 |
+
|
15 |
+
// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
|
16 |
+
// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
|
17 |
+
// if align_corners: -1 and +1 get sent to the centers of the corner pixels
|
18 |
+
// -1 --> 0
|
19 |
+
// +1 --> (size - 1)
|
20 |
+
// scale_factor = (size - 1) / 2
|
21 |
+
// if not align_corners: -1 and +1 get sent to the image edges
|
22 |
+
// -1 --> -0.5
|
23 |
+
// +1 --> (size - 1) + 0.5 == size - 0.5
|
24 |
+
// scale_factor = size / 2
|
25 |
+
template <typename scalar_t>
|
26 |
+
static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
|
27 |
+
bool align_corners) {
|
28 |
+
if (align_corners) {
|
29 |
+
// unnormalize coord from [-1, 1] to [0, size - 1]
|
30 |
+
return ((coord + 1) / 2) * (size - 1);
|
31 |
+
} else {
|
32 |
+
// unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
|
33 |
+
return ((coord + 1) * size - 1) / 2;
|
34 |
+
}
|
35 |
+
}
|
36 |
+
|
37 |
+
// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
|
38 |
+
// except that it also returns the `d output / d input` via pointer argument
|
39 |
+
// `grad_in`.
|
40 |
+
// This is useful in the backward pass of grid_sampler.
|
41 |
+
template <typename scalar_t>
|
42 |
+
static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
|
43 |
+
bool align_corners, scalar_t *grad_in) {
|
44 |
+
if (align_corners) {
|
45 |
+
// unnormalize coord from [-1, 1] to [0, size - 1]
|
46 |
+
*grad_in = static_cast<scalar_t>(size - 1) / 2;
|
47 |
+
return ((coord + 1) / 2) * (size - 1);
|
48 |
+
} else {
|
49 |
+
// unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
|
50 |
+
*grad_in = static_cast<scalar_t>(size) / 2;
|
51 |
+
return ((coord + 1) * size - 1) / 2;
|
52 |
+
}
|
53 |
+
}
|
54 |
+
|
55 |
+
// Clips coordinates to between 0 and clip_limit - 1
|
56 |
+
template<typename scalar_t>
|
57 |
+
static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
|
58 |
+
return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
|
59 |
+
}
|
60 |
+
|
61 |
+
// clip_coordinates_set_grad works similarly to clip_coordinates except that
|
62 |
+
// it also returns the `d output / d input` via pointer argument `grad_in`.
|
63 |
+
// This is useful in the backward pass of grid_sampler.
|
64 |
+
template<typename scalar_t>
|
65 |
+
static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
|
66 |
+
scalar_t *grad_in) {
|
67 |
+
// Note that it is important for the gradient calculation that borders
|
68 |
+
// are considered out of bounds.
|
69 |
+
if (in <= static_cast<scalar_t>(0)) {
|
70 |
+
*grad_in = static_cast<scalar_t>(0);
|
71 |
+
return static_cast<scalar_t>(0);
|
72 |
+
} else {
|
73 |
+
scalar_t max = static_cast<scalar_t>(clip_limit - 1);
|
74 |
+
if (in >= max) {
|
75 |
+
*grad_in = static_cast<scalar_t>(0);
|
76 |
+
return max;
|
77 |
+
} else {
|
78 |
+
*grad_in = static_cast<scalar_t>(1);
|
79 |
+
return in;
|
80 |
+
}
|
81 |
+
}
|
82 |
+
}
|
83 |
+
|
84 |
+
// Reflects coordinates until they fall between low and high (inclusive).
|
85 |
+
// The bounds are passed as twice their value so that half-integer values
|
86 |
+
// can be represented as ints.
|
87 |
+
template<typename scalar_t>
|
88 |
+
static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
|
89 |
+
int64_t twice_high) {
|
90 |
+
if (twice_low == twice_high) {
|
91 |
+
return static_cast<scalar_t>(0);
|
92 |
+
}
|
93 |
+
scalar_t min = static_cast<scalar_t>(twice_low) / 2;
|
94 |
+
scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
|
95 |
+
in = std::fabs(in - min);
|
96 |
+
// `fmod` returns same sign as `in`, which is positive after the `fabs` above.
|
97 |
+
scalar_t extra = std::fmod(in, span);
|
98 |
+
int flips = static_cast<int>(std::floor(in / span));
|
99 |
+
if (flips % 2 == 0) {
|
100 |
+
return extra + min;
|
101 |
+
} else {
|
102 |
+
return span - extra + min;
|
103 |
+
}
|
104 |
+
}
|
105 |
+
|
106 |
+
// reflect_coordinates_set_grad works similarly to reflect_coordinates except
|
107 |
+
// that it also returns the `d output / d input` via pointer argument
|
108 |
+
// `grad_in`.
|
109 |
+
// This is useful in the backward pass of grid_sampler.
|
110 |
+
template<typename scalar_t>
|
111 |
+
static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
|
112 |
+
int64_t twice_high, scalar_t *grad_in) {
|
113 |
+
if (twice_low == twice_high) {
|
114 |
+
*grad_in = static_cast<scalar_t>(0);
|
115 |
+
return static_cast<scalar_t>(0);
|
116 |
+
}
|
117 |
+
int grad_in_mult_;
|
118 |
+
scalar_t min = static_cast<scalar_t>(twice_low) / 2;
|
119 |
+
scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
|
120 |
+
in = in - min;
|
121 |
+
if (in < static_cast<scalar_t>(0)) {
|
122 |
+
grad_in_mult_ = -1;
|
123 |
+
in = -in;
|
124 |
+
} else {
|
125 |
+
grad_in_mult_ = 1;
|
126 |
+
}
|
127 |
+
// `fmod` returns same sign as `in`, which is positive after the `if` above.
|
128 |
+
scalar_t extra = std::fmod(in, span);
|
129 |
+
int flips = static_cast<int>(std::floor(in / span));
|
130 |
+
if (flips % 2 == 0) {
|
131 |
+
*grad_in = static_cast<scalar_t>(grad_in_mult_);
|
132 |
+
return extra + min;
|
133 |
+
} else {
|
134 |
+
*grad_in = static_cast<scalar_t>(-grad_in_mult_);
|
135 |
+
return span - extra + min;
|
136 |
+
}
|
137 |
+
}
|
138 |
+
|
139 |
+
// Mapping the out-of-boundary points back into boundary
|
140 |
+
// This would only affect padding_mode=border or reflection
|
141 |
+
template<typename scalar_t>
|
142 |
+
static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
|
143 |
+
GridSamplerPadding padding_mode,
|
144 |
+
bool align_corners) {
|
145 |
+
if (padding_mode == GridSamplerPadding::Border) {
|
146 |
+
// clip coordinates to image borders
|
147 |
+
coord = clip_coordinates(coord, size);
|
148 |
+
} else if (padding_mode == GridSamplerPadding::Reflection) {
|
149 |
+
// reflect coordinates by image borders
|
150 |
+
if (align_corners) {
|
151 |
+
coord = reflect_coordinates(coord, 0, 2*(size - 1));
|
152 |
+
} else {
|
153 |
+
coord = reflect_coordinates(coord, -1, 2*size - 1);
|
154 |
+
}
|
155 |
+
// clip coordinates to image borders
|
156 |
+
coord = clip_coordinates(coord, size);
|
157 |
+
}
|
158 |
+
return coord;
|
159 |
+
}
|
160 |
+
|
161 |
+
// Computes the pixel source index value for a grid coordinate
|
162 |
+
template <typename scalar_t>
|
163 |
+
static inline scalar_t grid_sampler_compute_source_index(
|
164 |
+
scalar_t coord,
|
165 |
+
int64_t size,
|
166 |
+
GridSamplerPadding padding_mode,
|
167 |
+
bool align_corners) {
|
168 |
+
coord = grid_sampler_unnormalize(coord, size, align_corners);
|
169 |
+
coord = compute_coordinates(coord, size, padding_mode, align_corners);
|
170 |
+
return coord;
|
171 |
+
}
|
172 |
+
|
173 |
+
// grid_sampler_compute_source_index_set_grad works similarly to
|
174 |
+
// grid_sampler_compute_source_index except that it also returns the
|
175 |
+
// `d output / d input` via pointer argument `grad_in`.
|
176 |
+
// This is useful in the backward pass of grid_sampler.
|
177 |
+
template <typename scalar_t>
|
178 |
+
static inline scalar_t grid_sampler_compute_source_index_set_grad(
|
179 |
+
scalar_t coord,
|
180 |
+
int64_t size,
|
181 |
+
GridSamplerPadding padding_mode,
|
182 |
+
bool align_corners,
|
183 |
+
scalar_t *grad_in) {
|
184 |
+
scalar_t grad_clip, grad_refl;
|
185 |
+
coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
|
186 |
+
if (padding_mode == GridSamplerPadding::Border) {
|
187 |
+
// clip coordinates to image borders
|
188 |
+
coord = clip_coordinates_set_grad(coord, size, &grad_clip);
|
189 |
+
*grad_in = (*grad_in) * grad_clip;
|
190 |
+
} else if (padding_mode == GridSamplerPadding::Reflection) {
|
191 |
+
// reflect coordinates by image borders
|
192 |
+
if (align_corners) {
|
193 |
+
coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
|
194 |
+
} else {
|
195 |
+
coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
|
196 |
+
}
|
197 |
+
// clip coordinates to image borders
|
198 |
+
coord = clip_coordinates_set_grad(coord, size, &grad_clip);
|
199 |
+
*grad_in = (*grad_in) * grad_refl * grad_clip;
|
200 |
+
}
|
201 |
+
return coord;
|
202 |
+
}
|
203 |
+
|
204 |
+
static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
|
205 |
+
return h >= 0 && h < H && w >= 0 && w < W;
|
206 |
+
}
|
207 |
+
|
208 |
+
static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
|
209 |
+
return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
|
210 |
+
}
|
211 |
+
|
212 |
+
template<typename scalar_t>
|
213 |
+
static inline scalar_t get_value_bounded(
|
214 |
+
scalar_t* data,
|
215 |
+
scalar_t x,
|
216 |
+
scalar_t y,
|
217 |
+
int64_t W,
|
218 |
+
int64_t H,
|
219 |
+
int64_t sW,
|
220 |
+
int64_t sH,
|
221 |
+
GridSamplerPadding padding_mode,
|
222 |
+
bool align_corners) {
|
223 |
+
|
224 |
+
x = compute_coordinates(x, W, padding_mode, align_corners);
|
225 |
+
y = compute_coordinates(y, H, padding_mode, align_corners);
|
226 |
+
|
227 |
+
int64_t ix = static_cast<int64_t>(x);
|
228 |
+
int64_t iy = static_cast<int64_t>(y);
|
229 |
+
|
230 |
+
if (within_bounds_2d(iy, ix, H, W)) {
|
231 |
+
return data[iy * sH + ix * sW];
|
232 |
+
}
|
233 |
+
return static_cast<scalar_t>(0);
|
234 |
+
}
|
235 |
+
|
236 |
+
template<typename scalar_t>
|
237 |
+
static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
|
238 |
+
int64_t sH, int64_t sW, int64_t H, int64_t W,
|
239 |
+
scalar_t delta) {
|
240 |
+
if (within_bounds_2d(h, w, H, W)) {
|
241 |
+
data[h * sH + w * sW] += delta;
|
242 |
+
}
|
243 |
+
}
|
244 |
+
|
245 |
+
template<typename scalar_t>
|
246 |
+
static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
|
247 |
+
int64_t sD, int64_t sH, int64_t sW,
|
248 |
+
int64_t D, int64_t H, int64_t W,
|
249 |
+
scalar_t delta) {
|
250 |
+
if (within_bounds_3d(d, h, w, D, H, W)) {
|
251 |
+
data[d * sD + h * sH + w * sW] += delta;
|
252 |
+
}
|
253 |
+
}
|
254 |
+
|
255 |
+
template<typename scalar_t>
|
256 |
+
static inline void add_value_bounded(
|
257 |
+
scalar_t* data,
|
258 |
+
scalar_t x,
|
259 |
+
scalar_t y,
|
260 |
+
int64_t W,
|
261 |
+
int64_t H,
|
262 |
+
int64_t sW,
|
263 |
+
int64_t sH,
|
264 |
+
scalar_t delta,
|
265 |
+
GridSamplerPadding padding_mode,
|
266 |
+
bool align_corners) {
|
267 |
+
|
268 |
+
x = compute_coordinates(x, W, padding_mode, align_corners);
|
269 |
+
y = compute_coordinates(y, H, padding_mode, align_corners);
|
270 |
+
|
271 |
+
int64_t ix = static_cast<int64_t>(x);
|
272 |
+
int64_t iy = static_cast<int64_t>(y);
|
273 |
+
|
274 |
+
safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
|
275 |
+
}
|
276 |
+
|
277 |
+
// Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
|
278 |
+
template<typename scalar_t>
|
279 |
+
static inline void get_cubic_coefficients_grad(
|
280 |
+
scalar_t coeffs[4],
|
281 |
+
scalar_t t) {
|
282 |
+
|
283 |
+
// Must be the same as forward calculation in
|
284 |
+
// aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
|
285 |
+
scalar_t A = -0.75;
|
286 |
+
|
287 |
+
scalar_t x;
|
288 |
+
x = -1 - t; // 1 < x = |-1 - tx| < 2
|
289 |
+
coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
|
290 |
+
x = -t; // x = |0 - tx| <= 1
|
291 |
+
coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
|
292 |
+
x = 1 - t; // x = |1 - tx| <= 1
|
293 |
+
coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
|
294 |
+
x = 2 - t; // 1 < x = |2 - tx| < 2
|
295 |
+
coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
|
296 |
+
}
|
297 |
+
|
298 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// See NOTE: [Tensor vs. TensorBase]
|
4 |
+
// https://github.com/pytorch/pytorch/pull/66979
|
5 |
+
#include <ATen/core/TensorBase.h>
|
6 |
+
#include <ATen/native/TensorProperties.h>
|
7 |
+
#include <ATen/native/CanUse32BitIndexMath.h>
|
8 |
+
|
9 |
+
namespace at::native {
|
10 |
+
|
11 |
+
namespace detail {
|
12 |
+
|
13 |
+
enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic};
|
14 |
+
enum class GridSamplerPadding {Zeros, Border, Reflection};
|
15 |
+
|
16 |
+
} // namespace detail
|
17 |
+
|
18 |
+
using detail::GridSamplerInterpolation;
|
19 |
+
using detail::GridSamplerPadding;
|
20 |
+
|
21 |
+
namespace {
|
22 |
+
|
23 |
+
// See NOTE [ grid_sampler Native Functions ].
|
24 |
+
void check_grid_sampler_common(
|
25 |
+
const TensorBase& input,
|
26 |
+
const TensorBase& grid
|
27 |
+
) {
|
28 |
+
auto input_opt = input.options();
|
29 |
+
auto grid_opt = grid.options();
|
30 |
+
|
31 |
+
TORCH_CHECK(
|
32 |
+
input.defined(),
|
33 |
+
"grid_sampler(): expected input to not be undefined");
|
34 |
+
TORCH_CHECK(
|
35 |
+
grid.defined(),
|
36 |
+
"grid_sampler(): expected grid to not be undefined");
|
37 |
+
TORCH_CHECK(
|
38 |
+
input_opt.device() == grid_opt.device(),
|
39 |
+
"grid_sampler(): expected input and grid to be on same device, but input "
|
40 |
+
"is on ", input_opt.device(), " and grid is on ", grid_opt.device());
|
41 |
+
TORCH_CHECK(
|
42 |
+
input_opt.layout() == kStrided && grid_opt.layout() == kStrided,
|
43 |
+
"grid_sampler(): expected input and grid to have torch.strided layout, but "
|
44 |
+
"input has ", input_opt.layout(), " and grid has ", grid_opt.layout());
|
45 |
+
TORCH_CHECK(
|
46 |
+
input.size(0) == grid.size(0),
|
47 |
+
"grid_sampler(): expected grid and input to have same batch size, but got "
|
48 |
+
"input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes());
|
49 |
+
TORCH_CHECK(
|
50 |
+
grid.size(-1) == input.dim() - 2,
|
51 |
+
"grid_sampler(): expected grid to have size ", input.dim() - 2, " in last "
|
52 |
+
"dimension, but got grid with sizes ", grid.sizes());
|
53 |
+
|
54 |
+
for (const auto i : c10::irange(2, input.dim())) {
|
55 |
+
TORCH_CHECK(input.size(i) > 0,
|
56 |
+
"grid_sampler(): expected input to have non-empty spatial dimensions, "
|
57 |
+
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
|
58 |
+
"empty");
|
59 |
+
}
|
60 |
+
}
|
61 |
+
|
62 |
+
// See NOTE [ grid_sampler Native Functions ].
|
63 |
+
void check_grid_sampler_2d(
|
64 |
+
const TensorBase& input,
|
65 |
+
const TensorBase& grid
|
66 |
+
) {
|
67 |
+
TORCH_CHECK(
|
68 |
+
input.dim() == 4 && input.dim() == grid.dim(),
|
69 |
+
"grid_sampler(): expected 4D input and grid with same number of "
|
70 |
+
"dimensions, but got input with sizes ", input.sizes(),
|
71 |
+
" and grid with sizes ", grid.sizes());
|
72 |
+
}
|
73 |
+
|
74 |
+
// See NOTE [ grid_sampler Native Functions ].
|
75 |
+
void check_grid_sampler_3d(
|
76 |
+
const TensorBase& input,
|
77 |
+
const TensorBase& grid,
|
78 |
+
int64_t interpolation_mode
|
79 |
+
) {
|
80 |
+
TORCH_CHECK(
|
81 |
+
input.dim() == 5 && input.dim() == grid.dim(),
|
82 |
+
"grid_sampler(): expected 5D input and grid with same number of "
|
83 |
+
"dimensions, but got input with sizes ", input.sizes(),
|
84 |
+
" and grid with sizes ", grid.sizes());
|
85 |
+
TORCH_CHECK(
|
86 |
+
!(input.dim() == 5 &&
|
87 |
+
static_cast<GridSamplerInterpolation>(interpolation_mode) ==
|
88 |
+
GridSamplerInterpolation::Bicubic),
|
89 |
+
"grid_sampler(): bicubic interpolation only supports 4D input");
|
90 |
+
}
|
91 |
+
|
92 |
+
// See NOTE [ grid_sampler Native Functions ].
|
93 |
+
// cudnn does not support inputs larger than 1024.
|
94 |
+
bool cond_cudnn_grid_sampler(
|
95 |
+
const TensorBase& input,
|
96 |
+
const TensorBase& grid
|
97 |
+
) {
|
98 |
+
return (
|
99 |
+
at::native::cudnn_is_acceptable(input) &&
|
100 |
+
at::native::cudnn_is_acceptable(grid) &&
|
101 |
+
at::native::canUse32BitIndexMath(input) &&
|
102 |
+
at::native::canUse32BitIndexMath(grid) &&
|
103 |
+
input.dim() == 4 &&
|
104 |
+
input.sym_size(1) <= 1024);
|
105 |
+
}
|
106 |
+
|
107 |
+
} // anonymous namespace
|
108 |
+
|
109 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
|
6 |
+
namespace at::native {
|
7 |
+
|
8 |
+
using histogramdd_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&);
|
9 |
+
using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&, bool);
|
10 |
+
using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges);
|
11 |
+
|
12 |
+
DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub);
|
13 |
+
DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub);
|
14 |
+
DECLARE_DISPATCH(histogram_select_outer_bin_edges_fn, histogram_select_outer_bin_edges_stub);
|
15 |
+
|
16 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/ExpandUtils.h>
|
3 |
+
#include <ATen/native/CanUse32BitIndexMath.h>
|
4 |
+
#include <ATen/native/TensorIterator.h>
|
5 |
+
#include <ATen/core/IListRef.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
namespace at::native {
|
9 |
+
|
10 |
+
[[noreturn]]
|
11 |
+
static void invalid_mask(const Tensor & self, int64_t idx, const Tensor & mask, int64_t maskIdx) {
|
12 |
+
TORCH_CHECK_INDEX(false, "The shape of the mask ", mask.sizes(), " at index ", maskIdx,
|
13 |
+
" does not match the shape of the indexed tensor ", self.sizes(), " at index ", idx);
|
14 |
+
}
|
15 |
+
|
16 |
+
|
17 |
+
static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTensorListRef indices) {
|
18 |
+
// If indices come in as ByteTensor or BoolTensor (masks), expand them into the equivalent indexing by LongTensors
|
19 |
+
std::vector<Tensor> result;
|
20 |
+
for (const auto& index_opt : indices) {
|
21 |
+
if (!index_opt.has_value()) {
|
22 |
+
result.emplace_back();
|
23 |
+
} else {
|
24 |
+
const auto& index = *index_opt;
|
25 |
+
if (index.scalar_type() == kByte || index.scalar_type() == kBool) {
|
26 |
+
if (index.scalar_type() == kByte) {
|
27 |
+
TORCH_WARN("indexing with dtype torch.uint8 is now deprecated," \
|
28 |
+
" please use a dtype torch.bool instead.");
|
29 |
+
}
|
30 |
+
// The sizes of the ByteTensor mask or bool tensor must match the sizes of the
|
31 |
+
// corresponding dimensions in self
|
32 |
+
for (const auto j : c10::irange(index.dim())) {
|
33 |
+
int64_t srcIdx = static_cast<int64_t>(result.size() + j);
|
34 |
+
if (index.size(j) != self.size(srcIdx)) {
|
35 |
+
invalid_mask(self, srcIdx, index, j);
|
36 |
+
}
|
37 |
+
}
|
38 |
+
// Replace with nonzeros
|
39 |
+
auto nonzero = index.nonzero();
|
40 |
+
for (const auto j : c10::irange(index.dim())) {
|
41 |
+
result.emplace_back(nonzero.select(1, j));
|
42 |
+
}
|
43 |
+
} else {
|
44 |
+
result.emplace_back(index);
|
45 |
+
}
|
46 |
+
}
|
47 |
+
}
|
48 |
+
return result;
|
49 |
+
}
|
50 |
+
|
51 |
+
static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices, bool allow_int=false) {
|
52 |
+
for (const auto& tensor : indices) {
|
53 |
+
if (tensor.has_value() && tensor->defined()) {
|
54 |
+
auto scalarType = tensor->scalar_type();
|
55 |
+
if (allow_int) {
|
56 |
+
if (scalarType != kLong && scalarType != kByte && scalarType != kBool && scalarType != kInt) {
|
57 |
+
TORCH_CHECK_INDEX(false, "tensors used as indices must be long, int, byte or bool tensors");
|
58 |
+
}
|
59 |
+
} else {
|
60 |
+
if (scalarType != kLong && scalarType != kByte && scalarType != kBool) {
|
61 |
+
TORCH_CHECK_INDEX(false, "tensors used as indices must be long, byte or bool tensors");
|
62 |
+
}
|
63 |
+
}
|
64 |
+
}
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<Tensor> list) {
|
69 |
+
torch::List<c10::optional<Tensor>> result;
|
70 |
+
result.reserve(list.size());
|
71 |
+
for (const Tensor& a : list) {
|
72 |
+
result.push_back(a);
|
73 |
+
}
|
74 |
+
return result;
|
75 |
+
}
|
76 |
+
|
77 |
+
inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<IValue> list) {
|
78 |
+
torch::List<c10::optional<Tensor>> result;
|
79 |
+
result.reserve(list.size());
|
80 |
+
for (const IValue& a : list) {
|
81 |
+
result.push_back(a.isTensor() ? c10::optional<Tensor>(a.toTensor()) : c10::optional<Tensor>());
|
82 |
+
}
|
83 |
+
return result;
|
84 |
+
}
|
85 |
+
|
86 |
+
static C10_UNUSED bool hasContiguousSubspace(TensorList tl) {
|
87 |
+
// true if all the non-null tensors are adjacent
|
88 |
+
auto isDefined = [](const Tensor & tensor){ return tensor.defined(); };
|
89 |
+
auto isNull = [](const Tensor & tensor){ return !tensor.defined(); };
|
90 |
+
auto start = std::find_if(tl.begin(), tl.end(), isDefined);
|
91 |
+
auto stop = std::find_if(tl.rbegin(), tl.rend(), isDefined);
|
92 |
+
auto it = std::find_if(start, stop.base(), isNull);
|
93 |
+
return it == stop.base();
|
94 |
+
}
|
95 |
+
|
96 |
+
|
97 |
+
// Transposes the tensor and indices together so that all the non-null indices
|
98 |
+
// index the first k dimensions of the tensor. Returns the transposed tensor
|
99 |
+
// and the reordered indices. For example:
|
100 |
+
// transposeToFront(tensor, {nullptr, a, nullptr, b})
|
101 |
+
// returns
|
102 |
+
// tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr}
|
103 |
+
static C10_UNUSED std::tuple<Tensor, std::vector<Tensor>>
|
104 |
+
transposeToFront(const Tensor& self, TensorList indices) {
|
105 |
+
std::vector<int64_t> dims;
|
106 |
+
std::vector<Tensor> transposedIndices;
|
107 |
+
dims.reserve(self.dim());
|
108 |
+
for (const auto i : c10::irange(self.dim())) {
|
109 |
+
if (indices[i].defined()) {
|
110 |
+
dims.push_back(i);
|
111 |
+
transposedIndices.emplace_back(indices[i]);
|
112 |
+
}
|
113 |
+
}
|
114 |
+
for (const auto i : c10::irange(self.dim())) {
|
115 |
+
if (!indices[i].defined()) {
|
116 |
+
dims.push_back(i);
|
117 |
+
transposedIndices.emplace_back();
|
118 |
+
}
|
119 |
+
}
|
120 |
+
return std::make_tuple(self.permute(dims), std::move(transposedIndices));
|
121 |
+
}
|
122 |
+
|
123 |
+
inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
|
124 |
+
transposeToFrontAndInvPerm(const Tensor& self, TensorList indices) {
|
125 |
+
std::vector<int64_t> dims;
|
126 |
+
std::vector<int64_t> invPerm;
|
127 |
+
std::vector<Tensor> transposedIndices;
|
128 |
+
dims.reserve(self.dim());
|
129 |
+
invPerm.resize(self.dim());
|
130 |
+
for (const auto i : c10::irange(self.dim())) {
|
131 |
+
if (indices[i].defined()) {
|
132 |
+
dims.push_back(i);
|
133 |
+
transposedIndices.emplace_back(indices[i]);
|
134 |
+
}
|
135 |
+
}
|
136 |
+
for (const auto i : c10::irange(self.dim())) {
|
137 |
+
if (!indices[i].defined()) {
|
138 |
+
dims.push_back(i);
|
139 |
+
transposedIndices.emplace_back();
|
140 |
+
}
|
141 |
+
}
|
142 |
+
for (const auto i : c10::irange(self.dim())) {
|
143 |
+
invPerm[dims[i]] = i;
|
144 |
+
}
|
145 |
+
return std::make_tuple(self.permute(dims), std::move(transposedIndices), std::move(invPerm));
|
146 |
+
}
|
147 |
+
|
148 |
+
struct AdvancedIndex {
|
149 |
+
AdvancedIndex(const Tensor& src, TensorList indices);
|
150 |
+
|
151 |
+
Tensor src;
|
152 |
+
std::vector<Tensor> indices;
|
153 |
+
DimVector indexed_sizes;
|
154 |
+
DimVector indexed_strides;
|
155 |
+
int64_t dims_before;
|
156 |
+
int64_t dims_after;
|
157 |
+
};
|
158 |
+
|
159 |
+
|
160 |
+
} //namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Math.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
namespace at {
|
4 |
+
// views and their in-place version ops
|
5 |
+
#define TORCH_VIEW_FNS(m) \
|
6 |
+
m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \
|
7 |
+
m.impl("detach", torch::CppFunction::makeFallthrough()); \
|
8 |
+
m.impl("detach_", torch::CppFunction::makeFallthrough()); \
|
9 |
+
m.impl("diagonal", torch::CppFunction::makeFallthrough()); \
|
10 |
+
m.impl("expand", torch::CppFunction::makeFallthrough()); \
|
11 |
+
m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
|
12 |
+
m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \
|
13 |
+
m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \
|
14 |
+
m.impl("narrow", torch::CppFunction::makeFallthrough()); \
|
15 |
+
m.impl("permute", torch::CppFunction::makeFallthrough()); \
|
16 |
+
m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \
|
17 |
+
m.impl("select.int", torch::CppFunction::makeFallthrough()); \
|
18 |
+
m.impl("squeeze", torch::CppFunction::makeFallthrough()); \
|
19 |
+
m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \
|
20 |
+
m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \
|
21 |
+
m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \
|
22 |
+
m.impl("transpose_", torch::CppFunction::makeFallthrough()); \
|
23 |
+
m.impl("t", torch::CppFunction::makeFallthrough()); \
|
24 |
+
m.impl("t_", torch::CppFunction::makeFallthrough()); \
|
25 |
+
m.impl("real", torch::CppFunction::makeFallthrough()); \
|
26 |
+
m.impl("imag", torch::CppFunction::makeFallthrough()); \
|
27 |
+
m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \
|
28 |
+
m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \
|
29 |
+
m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \
|
30 |
+
m.impl("unfold", torch::CppFunction::makeFallthrough()); \
|
31 |
+
m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \
|
32 |
+
m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \
|
33 |
+
m.impl("view_as", torch::CppFunction::makeFallthrough()); \
|
34 |
+
m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \
|
35 |
+
m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \
|
36 |
+
m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \
|
37 |
+
m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
|
38 |
+
m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \
|
39 |
+
m.impl("swapdims", torch::CppFunction::makeFallthrough()); \
|
40 |
+
m.impl("chunk", torch::CppFunction::makeFallthrough()); \
|
41 |
+
m.impl("reshape", torch::CppFunction::makeFallthrough()); \
|
42 |
+
m.impl("alias", torch::CppFunction::makeFallthrough()); \
|
43 |
+
m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \
|
44 |
+
m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \
|
45 |
+
m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \
|
46 |
+
m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \
|
47 |
+
m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \
|
48 |
+
m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \
|
49 |
+
m.impl("conj", torch::CppFunction::makeFallthrough()); \
|
50 |
+
m.impl("_conj", torch::CppFunction::makeFallthrough()); \
|
51 |
+
m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \
|
52 |
+
m.impl("resize_", torch::CppFunction::makeFallthrough());
|
53 |
+
|
54 |
+
#define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \
|
55 |
+
m.impl("empty_like", torch::CppFunction::makeFallthrough()); \
|
56 |
+
m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \
|
57 |
+
m.impl("empty.out", torch::CppFunction::makeFallthrough()); \
|
58 |
+
m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \
|
59 |
+
m.impl("full_like", torch::CppFunction::makeFallthrough()); \
|
60 |
+
m.impl("stride.int", torch::CppFunction::makeFallthrough()); \
|
61 |
+
m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \
|
62 |
+
m.impl("size.int", torch::CppFunction::makeFallthrough()); \
|
63 |
+
m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \
|
64 |
+
m.impl("is_complex", torch::CppFunction::makeFallthrough()); \
|
65 |
+
m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \
|
66 |
+
m.impl("requires_grad_", torch::CppFunction::makeFallthrough());
|
67 |
+
}
|
68 |
+
|
69 |
+
#define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \
|
70 |
+
m.impl("as_strided", torch::CppFunction::makeFallthrough()); \
|
71 |
+
m.impl("view", torch::CppFunction::makeFallthrough());
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/Tensor.h>
|
2 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
3 |
+
#include <ATen/core/op_registration/op_registration.h>
|
4 |
+
#include <ATen/native/UnaryOps.h>
|
5 |
+
#include <ATen/native/Resize.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <torch/library.h>
|
8 |
+
|
9 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
10 |
+
#include <ATen/Functions.h>
|
11 |
+
#else
|
12 |
+
#include <ATen/ops/clone.h>
|
13 |
+
|
14 |
+
#include <utility>
|
15 |
+
#endif
|
16 |
+
|
17 |
+
namespace at::native {
|
18 |
+
// This fallback should only be used for operations that are self inverse and have a corresponding tensor
|
19 |
+
// bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit.
|
20 |
+
// Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit.
|
21 |
+
// Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called.
|
22 |
+
|
23 |
+
// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit.
|
24 |
+
struct MathOpFallback {
|
25 |
+
MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {}
|
26 |
+
virtual bool is_bit_set(const Tensor&) = 0;
|
27 |
+
void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) {
|
28 |
+
/*
|
29 |
+
Situations to handle:
|
30 |
+
1. Out-of-place operation. Easy: materialize all inputs and
|
31 |
+
call it a day.
|
32 |
+
2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_().
|
33 |
+
Materialize other inputs as in (1).
|
34 |
+
3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2))
|
35 |
+
Materialize other inputs as in (1).
|
36 |
+
|
37 |
+
It is important to be able to tell if we READ from an argument and if we
|
38 |
+
WRITE to an argument. Conservative approach is to assume that we always
|
39 |
+
READ from an argument, but in out= operations you can skip
|
40 |
+
conjugating inputs on entry that never get used. In the current schema we
|
41 |
+
can't easily tell if the operation is in in-place or out= operation.
|
42 |
+
|
43 |
+
Note:
|
44 |
+
1. Mutable tensorlists containing tensors whose math bit set to true are disallowed.
|
45 |
+
2. Mutable tensors with math bit set to true are unconditionally cloned to ensure
|
46 |
+
correct behavior in the case when the mutable tensor shares memory with non mutable arguments.
|
47 |
+
|
48 |
+
If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory
|
49 |
+
with these mutable inputs would read into wrong values in the following cases:
|
50 |
+
1. Non mutable inputs have their math bit set to false.
|
51 |
+
2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory
|
52 |
+
with one or more mutable arg(s)) are cloned.
|
53 |
+
At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs.
|
54 |
+
*/
|
55 |
+
const auto& arguments = op.schema().arguments();
|
56 |
+
const auto num_arguments = arguments.size();
|
57 |
+
const auto stack_start = stack->size() - num_arguments;
|
58 |
+
|
59 |
+
c10::optional<bool> is_write;
|
60 |
+
for (const auto i : c10::irange(num_arguments)) {
|
61 |
+
// Three possible states:
|
62 |
+
// 1. alias_info has no value --> out-of-place operation
|
63 |
+
// 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation
|
64 |
+
// 3. alias_info does have a value, alias_info->is_write=False --> view operation
|
65 |
+
const AliasInfo* alias_info = arguments[i].alias_info();
|
66 |
+
if (alias_info != nullptr) {
|
67 |
+
if (is_write.has_value()) {
|
68 |
+
TORCH_CHECK(*is_write == alias_info->isWrite(),
|
69 |
+
"Unsupported operator for ", op_name, " fallback: ", op.schema().name(),
|
70 |
+
op_name, " fallback doesn't work for operators with a mix "
|
71 |
+
"mutable and non-mutable inputs that alias with outputs, "
|
72 |
+
"this must be implemented manually. "
|
73 |
+
"If you got this error on a core op, please report a bug to PyTorch.");
|
74 |
+
} else {
|
75 |
+
is_write = alias_info->isWrite();
|
76 |
+
}
|
77 |
+
}
|
78 |
+
}
|
79 |
+
|
80 |
+
if (is_write.has_value() && !*is_write) {
|
81 |
+
// We assume that view operators automatically handle the math bit
|
82 |
+
// correctly by propagating the dispatch key in key_set.
|
83 |
+
// This is not necessarily always right, so you should test these cases.
|
84 |
+
op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
|
85 |
+
return;
|
86 |
+
}
|
87 |
+
|
88 |
+
// Mutable inputs with math bit set to True and their clones
|
89 |
+
std::vector<std::pair<Tensor, Tensor>> mutable_inputs_with_their_clones;
|
90 |
+
for (const auto i : c10::irange(num_arguments)) {
|
91 |
+
auto& ivalue = (*stack)[stack_start + i];
|
92 |
+
if (!(ivalue.isTensor() || ivalue.isTensorList())) {
|
93 |
+
continue;
|
94 |
+
}
|
95 |
+
const auto& argument = arguments[i];
|
96 |
+
bool mut_arg = false;
|
97 |
+
if (argument.alias_info()) {
|
98 |
+
// Was already tested by is_write loop above
|
99 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite());
|
100 |
+
mut_arg = true;
|
101 |
+
}
|
102 |
+
if (ivalue.isTensor()) {
|
103 |
+
if (!is_bit_set(ivalue.toTensor())) {
|
104 |
+
continue;
|
105 |
+
}
|
106 |
+
auto tensor = std::move(ivalue).toTensor();
|
107 |
+
auto resolved_tensor = at::clone(tensor);
|
108 |
+
if (mut_arg) {
|
109 |
+
TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ",
|
110 |
+
op_name, "bit set to true.");
|
111 |
+
mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor);
|
112 |
+
}
|
113 |
+
(*stack)[stack_start + i] = std::move(resolved_tensor);
|
114 |
+
} else if (ivalue.isTensorList()) {
|
115 |
+
auto tensors = std::move(ivalue).toTensorList();
|
116 |
+
for(const auto j : c10::irange(tensors.size())) {
|
117 |
+
const auto& tensor = tensors[j];
|
118 |
+
if (!is_bit_set(tensor)) {
|
119 |
+
continue;
|
120 |
+
}
|
121 |
+
TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ",
|
122 |
+
op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ",
|
123 |
+
op.schema().name());
|
124 |
+
tensors[j] = at::clone(tensor);
|
125 |
+
}
|
126 |
+
(*stack)[stack_start + i] = std::move(tensors);
|
127 |
+
}
|
128 |
+
}
|
129 |
+
|
130 |
+
op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
|
131 |
+
|
132 |
+
TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1);
|
133 |
+
|
134 |
+
for (std::pair<Tensor, Tensor> mut_tensors: mutable_inputs_with_their_clones) {
|
135 |
+
auto& mutable_input = mut_tensors.first;
|
136 |
+
auto& cloned_mutable_input = mut_tensors.second;
|
137 |
+
auto& ivalue = (*stack)[stack_start];
|
138 |
+
auto returned_output = std::move(ivalue).toTensor();
|
139 |
+
|
140 |
+
// sanity check to ensure that the tensor in stack aliases the cloned_mutable_input
|
141 |
+
TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output));
|
142 |
+
|
143 |
+
// necessary for out= arg
|
144 |
+
at::native::resize_output(mutable_input, returned_output.sizes());
|
145 |
+
|
146 |
+
mutable_input.copy_(returned_output);
|
147 |
+
(*stack)[stack_start] = std::move(mutable_input);
|
148 |
+
}
|
149 |
+
}
|
150 |
+
|
151 |
+
virtual ~MathOpFallback() = default;
|
152 |
+
|
153 |
+
DispatchKey key;
|
154 |
+
string op_name;
|
155 |
+
};
|
156 |
+
|
157 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/MaxPooling.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/Parallel.h>
|
5 |
+
#include <ATen/native/DispatchStub.h>
|
6 |
+
|
7 |
+
namespace at::native {
|
8 |
+
|
9 |
+
// TODO(Heitor) Template by dimension
|
10 |
+
struct PoolingParams1D {
|
11 |
+
int64_t NB; // Number of batches
|
12 |
+
int64_t NC; // Number of channels
|
13 |
+
int64_t IW; // Input width
|
14 |
+
int64_t OW; // Output width
|
15 |
+
int64_t KW; // Kernel width
|
16 |
+
int64_t SJ; // Column stride
|
17 |
+
int64_t PJ; // Column padding
|
18 |
+
int64_t DJ; // Column dilation
|
19 |
+
|
20 |
+
// Return index of input element for the given kernel and output index
|
21 |
+
inline int64_t index(int64_t kj, int64_t oj) const {
|
22 |
+
return oj * SJ + kj * DJ - PJ;
|
23 |
+
}
|
24 |
+
|
25 |
+
// Return index of first output within bounds for this kernel index
|
26 |
+
inline int64_t valid_output_start(int64_t kj) const {
|
27 |
+
int64_t ij = index(kj, 0);;
|
28 |
+
return ij < 0 ? at::divup(-ij, SJ) : 0;
|
29 |
+
}
|
30 |
+
|
31 |
+
// Return index one past last output within bounds for this kernel index
|
32 |
+
inline int64_t valid_output_end(int64_t kj) const {
|
33 |
+
int64_t ij = index(kj, OW - 1);
|
34 |
+
return ij >= IW ? OW - at::divup(ij - (IW - 1), SJ) : OW;
|
35 |
+
}
|
36 |
+
};
|
37 |
+
|
38 |
+
using pooling_fn = void (*)(Tensor&, const Tensor&, const PoolingParams1D&);
|
39 |
+
|
40 |
+
DECLARE_DISPATCH(pooling_fn, max_pool1d_stub);
|
41 |
+
|
42 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBase.h>
|
2 |
+
#include <algorithm>
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
namespace at::native {
|
6 |
+
|
7 |
+
inline int64_t ensure_nonempty_dim(int64_t dim) {
|
8 |
+
return std::max<int64_t>(dim, 1);
|
9 |
+
}
|
10 |
+
|
11 |
+
inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) {
|
12 |
+
return t.dim() == 0 ? 1 : t.size(dim);
|
13 |
+
}
|
14 |
+
|
15 |
+
inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) {
|
16 |
+
return t.dim() == 0 ? 1 : t.stride(dim);
|
17 |
+
}
|
18 |
+
|
19 |
+
using IdxVec = std::vector<int64_t>;
|
20 |
+
inline IdxVec ensure_nonempty_vec(IdxVec vec) {
|
21 |
+
if (vec.empty()) {
|
22 |
+
vec.push_back(1);
|
23 |
+
}
|
24 |
+
return vec;
|
25 |
+
}
|
26 |
+
|
27 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Tensor.h>
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
#include <ATen/core/IListRef.h>
|
5 |
+
|
6 |
+
namespace at::native {
|
7 |
+
// This file contains non-symbolic signatures for ops that we have sym-intified the signature of.
|
8 |
+
// However, in certain cases (such as static runtime), we call the native versions of the ops directly.
|
9 |
+
// In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation.
|
10 |
+
TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape);
|
11 |
+
TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length);
|
12 |
+
TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt, c10::optional<bool> is_coalesced=c10::nullopt);
|
13 |
+
TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
|
14 |
+
TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
|
15 |
+
// The below ops don't get a duplicated C++ implementation.
|
16 |
+
// They are backward ops, which make them very unlikely to be called directly
|
17 |
+
// by external code (at::native::trace_backward).
|
18 |
+
// They get their own declaration for BC purposes however.
|
19 |
+
TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
|
20 |
+
TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
|
21 |
+
TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim);
|
22 |
+
TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes);
|
23 |
+
TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
|
24 |
+
TORCH_API at::Tensor select(const at::Tensor& self, int64_t dim, int64_t index);
|
25 |
+
TORCH_API std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim);
|
26 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
|
6 |
+
namespace at::native {
|
7 |
+
|
8 |
+
using padding_fn = void (*)(const Tensor&, const Tensor&, IntArrayRef);
|
9 |
+
|
10 |
+
// reflection padding
|
11 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad1d_kernel);
|
12 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad1d_backward_kernel);
|
13 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad2d_kernel);
|
14 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad2d_backward_kernel);
|
15 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad3d_kernel);
|
16 |
+
DECLARE_DISPATCH(padding_fn, reflection_pad3d_backward_kernel);
|
17 |
+
|
18 |
+
// replication padding
|
19 |
+
DECLARE_DISPATCH(padding_fn, replication_pad1d_kernel);
|
20 |
+
DECLARE_DISPATCH(padding_fn, replication_pad1d_backward_kernel);
|
21 |
+
DECLARE_DISPATCH(padding_fn, replication_pad2d_kernel);
|
22 |
+
DECLARE_DISPATCH(padding_fn, replication_pad2d_backward_kernel);
|
23 |
+
DECLARE_DISPATCH(padding_fn, replication_pad3d_kernel);
|
24 |
+
DECLARE_DISPATCH(padding_fn, replication_pad3d_backward_kernel);
|
25 |
+
|
26 |
+
namespace padding {
|
27 |
+
|
28 |
+
template <int dim>
|
29 |
+
static inline void check_valid_input(const Tensor& input, IntArrayRef padding) {
|
30 |
+
|
31 |
+
TORCH_CHECK(padding.size() == 2 * dim,
|
32 |
+
"padding size is expected to be ", 2 * dim,
|
33 |
+
", but got: ", padding.size());
|
34 |
+
|
35 |
+
int input_dim = input.dim();
|
36 |
+
|
37 |
+
bool is_batch_mode = input_dim == (dim + 2);
|
38 |
+
|
39 |
+
bool valid_batch_mode = is_batch_mode;
|
40 |
+
bool valid_non_batch_mode = !is_batch_mode;
|
41 |
+
|
42 |
+
if (is_batch_mode) {
|
43 |
+
// allow batch size of 0-dim.
|
44 |
+
for (const auto d : c10::irange(1, input_dim)) {
|
45 |
+
valid_batch_mode = valid_batch_mode && input.size(d) != 0;
|
46 |
+
}
|
47 |
+
} else {
|
48 |
+
for (const auto d : c10::irange(0, input_dim)) {
|
49 |
+
valid_non_batch_mode = valid_non_batch_mode && input.size(d) != 0;
|
50 |
+
}
|
51 |
+
}
|
52 |
+
|
53 |
+
// allow empty batch size but not other dimensions.
|
54 |
+
TORCH_CHECK(valid_batch_mode || valid_non_batch_mode,
|
55 |
+
"Expected ", dim + 1, "D or ", dim + 2,
|
56 |
+
"D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
|
57 |
+
input.sizes());
|
58 |
+
}
|
59 |
+
|
60 |
+
} // namespace padding
|
61 |
+
|
62 |
+
} // at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/Tensor.h>
|
2 |
+
#include <c10/util/Exception.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
namespace native {
|
6 |
+
|
7 |
+
inline void check_pixel_shuffle_shapes(const Tensor& self, int64_t upscale_factor) {
|
8 |
+
TORCH_CHECK(self.dim() >= 3,
|
9 |
+
"pixel_shuffle expects input to have at least 3 dimensions, but got input with ",
|
10 |
+
self.dim(), " dimension(s)");
|
11 |
+
TORCH_CHECK(upscale_factor > 0,
|
12 |
+
"pixel_shuffle expects a positive upscale_factor, but got ",
|
13 |
+
upscale_factor);
|
14 |
+
int64_t c = self.size(-3);
|
15 |
+
int64_t upscale_factor_squared = upscale_factor * upscale_factor;
|
16 |
+
TORCH_CHECK(c % upscale_factor_squared == 0,
|
17 |
+
"pixel_shuffle expects its input's 'channel' dimension to be divisible by the square of "
|
18 |
+
"upscale_factor, but input.size(-3)=", c, " is not divisible by ", upscale_factor_squared);
|
19 |
+
}
|
20 |
+
|
21 |
+
inline void check_pixel_unshuffle_shapes(const Tensor& self, int64_t downscale_factor) {
|
22 |
+
TORCH_CHECK(
|
23 |
+
self.dim() >= 3,
|
24 |
+
"pixel_unshuffle expects input to have at least 3 dimensions, but got input with ",
|
25 |
+
self.dim(),
|
26 |
+
" dimension(s)");
|
27 |
+
TORCH_CHECK(
|
28 |
+
downscale_factor > 0,
|
29 |
+
"pixel_unshuffle expects a positive downscale_factor, but got ",
|
30 |
+
downscale_factor);
|
31 |
+
int64_t h = self.size(-2);
|
32 |
+
int64_t w = self.size(-1);
|
33 |
+
TORCH_CHECK(
|
34 |
+
h % downscale_factor == 0,
|
35 |
+
"pixel_unshuffle expects height to be divisible by downscale_factor, but input.size(-2)=",
|
36 |
+
h,
|
37 |
+
" is not divisible by ",
|
38 |
+
downscale_factor);
|
39 |
+
TORCH_CHECK(
|
40 |
+
w % downscale_factor == 0,
|
41 |
+
"pixel_unshuffle expects width to be divisible by downscale_factor, but input.size(-1)=",
|
42 |
+
w,
|
43 |
+
" is not divisible by ",
|
44 |
+
downscale_factor);
|
45 |
+
}
|
46 |
+
|
47 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Ternary and higher-order pointwise operations
|
2 |
+
#pragma once
|
3 |
+
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
class Scalar;
|
8 |
+
}
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
struct TensorIterator;
|
13 |
+
struct TensorIteratorBase;
|
14 |
+
|
15 |
+
namespace native {
|
16 |
+
|
17 |
+
using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar);
|
18 |
+
using structured_pointwise_fn = void (*)(TensorIteratorBase&, const Scalar& scalar);
|
19 |
+
using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double);
|
20 |
+
|
21 |
+
DECLARE_DISPATCH(structured_pointwise_fn, addcmul_stub);
|
22 |
+
DECLARE_DISPATCH(structured_pointwise_fn, addcdiv_stub);
|
23 |
+
DECLARE_DISPATCH(pointwise_fn_double, smooth_l1_backward_stub);
|
24 |
+
DECLARE_DISPATCH(pointwise_fn_double, huber_backward_stub);
|
25 |
+
DECLARE_DISPATCH(pointwise_fn, mse_backward_stub);
|
26 |
+
|
27 |
+
} // namespace native
|
28 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/Tensor.h>
|
2 |
+
#include <ATen/div_rtn.h>
|
3 |
+
#include <ATen/TensorUtils.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
#include <c10/util/irange.h>
|
6 |
+
|
7 |
+
#include <utility>
|
8 |
+
|
9 |
+
#pragma once
|
10 |
+
|
11 |
+
namespace at::native {
|
12 |
+
|
13 |
+
using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input,
|
14 |
+
int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
|
15 |
+
using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
|
16 |
+
|
17 |
+
DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel);
|
18 |
+
DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel);
|
19 |
+
|
20 |
+
// averge pooling has same signature for forward and backward
|
21 |
+
using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH,
|
22 |
+
int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
23 |
+
using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH,
|
24 |
+
int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
25 |
+
|
26 |
+
DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel);
|
27 |
+
DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel);
|
28 |
+
|
29 |
+
using max_pool3d_fn = void(*)(Tensor& output, Tensor& indices, const Tensor& input,
|
30 |
+
int kW, int kH, int kD, int dW, int dH, int dD, int pW, int pH, int pD, int dilationW, int dilationH, int dilationD);
|
31 |
+
using max_pool3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
|
32 |
+
|
33 |
+
DECLARE_DISPATCH(max_pool3d_fn, max_pool3d_kernel);
|
34 |
+
DECLARE_DISPATCH(max_pool3d_backward_fn, max_pool3d_backward_kernel);
|
35 |
+
namespace {
|
36 |
+
|
37 |
+
template <typename dest_t, typename src_t>
|
38 |
+
static inline dest_t
|
39 |
+
safe_downcast(src_t v)
|
40 |
+
{
|
41 |
+
TORCH_CHECK(std::numeric_limits<dest_t>::min() <= v && v <= std::numeric_limits<dest_t>::max(),
|
42 |
+
"integer out of range");
|
43 |
+
|
44 |
+
return static_cast<dest_t>(v);
|
45 |
+
}
|
46 |
+
|
47 |
+
template<typename T>
|
48 |
+
static inline T pooling_output_shape_pad_lr(
|
49 |
+
T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation,
|
50 |
+
bool ceil_mode) {
|
51 |
+
T outputSize = div_rtn<T>(
|
52 |
+
inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 +
|
53 |
+
(ceil_mode ? stride - 1 : 0), stride) + 1;
|
54 |
+
if (ceil_mode) {
|
55 |
+
// ensure that the last pooling starts inside the image
|
56 |
+
// needed to avoid problems in ceil mode
|
57 |
+
if ((outputSize - 1) * stride >= inputSize + pad_l) {
|
58 |
+
--outputSize;
|
59 |
+
}
|
60 |
+
}
|
61 |
+
return outputSize;
|
62 |
+
}
|
63 |
+
|
64 |
+
template<typename T>
|
65 |
+
static inline T pooling_output_shape(
|
66 |
+
T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
|
67 |
+
TORCH_CHECK(stride != 0, "stride should not be zero");
|
68 |
+
TORCH_CHECK(pad >= 0,
|
69 |
+
"pad must be non-negative, but got pad: ", pad);
|
70 |
+
TORCH_CHECK(pad <= kernelSize / 2,
|
71 |
+
"pad should be at most half of kernel size, but got pad=",
|
72 |
+
pad, " and kernel_size=", kernelSize)
|
73 |
+
return pooling_output_shape_pad_lr(
|
74 |
+
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode);
|
75 |
+
}
|
76 |
+
|
77 |
+
template <typename T>
|
78 |
+
std::pair<T, T> _pooling_same_mode_padding_lr(
|
79 |
+
T inputSize, T kernelSize, T stride, T dilation) {
|
80 |
+
// NOTE: with strides, the output shape is ceil(inputSize/stride)
|
81 |
+
auto total_padding = T(dilation) * (kernelSize - 1);
|
82 |
+
|
83 |
+
// Prefer symmetric padding if possible
|
84 |
+
if (stride > 2 && (total_padding % 2 == 1)) {
|
85 |
+
// The floor in the output size calculation gives us a little wiggle room
|
86 |
+
auto wiggle_room = inputSize % stride - 1;
|
87 |
+
if (wiggle_room > 0) {
|
88 |
+
total_padding = total_padding - 1;
|
89 |
+
}
|
90 |
+
}
|
91 |
+
|
92 |
+
auto left = total_padding / 2;
|
93 |
+
return {left, total_padding - left};
|
94 |
+
}
|
95 |
+
|
96 |
+
inline std::pair<int64_t, int64_t> pooling_same_mode_padding_lr(
|
97 |
+
int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) {
|
98 |
+
return _pooling_same_mode_padding_lr(inputSize, kernelSize, stride, dilation);
|
99 |
+
}
|
100 |
+
|
101 |
+
inline std::pair<c10::SymInt, c10::SymInt> pooling_same_mode_padding_lr(
|
102 |
+
c10::SymInt inputSize, c10::SymInt kernelSize, c10::SymInt stride, c10::SymInt dilation) {
|
103 |
+
return _pooling_same_mode_padding_lr(std::move(inputSize), std::move(kernelSize), std::move(stride), std::move(dilation));
|
104 |
+
}
|
105 |
+
|
106 |
+
// AveragePool2d/DilatedMaxPool2d (forward)
|
107 |
+
static inline void
|
108 |
+
pool2d_shape_check(
|
109 |
+
const Tensor& input,
|
110 |
+
int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
|
111 |
+
int64_t nInputPlane,
|
112 |
+
int64_t inputHeight, int64_t inputWidth,
|
113 |
+
int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
|
114 |
+
{
|
115 |
+
const int64_t ndim = input.ndimension();
|
116 |
+
const int64_t nOutputPlane = nInputPlane;
|
117 |
+
|
118 |
+
TORCH_CHECK(kW > 0 && kH > 0,
|
119 |
+
"kernel size should be greater than zero, but got ",
|
120 |
+
"kH: ", kH, " kW: ", kW);
|
121 |
+
TORCH_CHECK(dW > 0 && dH > 0,
|
122 |
+
"stride should be greater than zero, but got "
|
123 |
+
"dH: ", dH, " dW: ", dW);
|
124 |
+
TORCH_CHECK(dilationH > 0 && dilationW > 0,
|
125 |
+
"dilation should be greater than zero, but got ",
|
126 |
+
"dilationH: ", dilationH, " dilationW: ", dilationW);
|
127 |
+
|
128 |
+
bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
|
129 |
+
if (memory_format == at::MemoryFormat::ChannelsLast){
|
130 |
+
// Expect tensor in NHWC format and allow 0-dim only for N.
|
131 |
+
TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0),
|
132 |
+
"Expected 4D (batch mode) tensor expected for input with channels_last layout"
|
133 |
+
" with optional 0 dim batch size for input, but got: ", input.sizes());
|
134 |
+
} else {
|
135 |
+
TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) ||
|
136 |
+
(ndim == 4 && valid_dims && input.size(3) != 0),
|
137 |
+
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:",
|
138 |
+
input.sizes());
|
139 |
+
}
|
140 |
+
|
141 |
+
TORCH_CHECK(kW/2 >= padW && kH/2 >= padH,
|
142 |
+
"pad should be smaller than or equal to half of kernel size, but got ",
|
143 |
+
"padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH);
|
144 |
+
|
145 |
+
TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1,
|
146 |
+
"Given input size: (",
|
147 |
+
nInputPlane, "x", inputHeight, "x", inputWidth, "). ",
|
148 |
+
"Calculated output size: (",
|
149 |
+
nOutputPlane, "x", outputHeight, "x", outputWidth, "). ",
|
150 |
+
"Output size is too small");
|
151 |
+
}
|
152 |
+
|
153 |
+
// DilatedMaxPool2d (backward)
|
154 |
+
static inline void
|
155 |
+
max_pool2d_backward_shape_check(
|
156 |
+
const Tensor& input,
|
157 |
+
const Tensor& gradOutput,
|
158 |
+
const Tensor& indices,
|
159 |
+
int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
|
160 |
+
int64_t nInputPlane,
|
161 |
+
int64_t inputHeight, int64_t inputWidth,
|
162 |
+
int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
|
163 |
+
{
|
164 |
+
pool2d_shape_check(
|
165 |
+
input,
|
166 |
+
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
|
167 |
+
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format);
|
168 |
+
|
169 |
+
const int64_t ndim = input.ndimension();
|
170 |
+
const int64_t nOutputPlane = nInputPlane;
|
171 |
+
|
172 |
+
check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
|
173 |
+
check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
|
174 |
+
check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
|
175 |
+
|
176 |
+
check_dim_size(indices, ndim, ndim-3, nOutputPlane);
|
177 |
+
check_dim_size(indices, ndim, ndim-2, outputHeight);
|
178 |
+
check_dim_size(indices, ndim, ndim-1, outputWidth);
|
179 |
+
}
|
180 |
+
|
181 |
+
// AveragePool2d (backward)
|
182 |
+
static inline void
|
183 |
+
avg_pool2d_backward_shape_check(
|
184 |
+
const Tensor& input,
|
185 |
+
const Tensor& gradOutput,
|
186 |
+
int64_t /*nbatch*/,
|
187 |
+
int kH, int kW, int dH, int dW, int padH, int padW,
|
188 |
+
int64_t nInputPlane,
|
189 |
+
int64_t inputHeight, int64_t inputWidth,
|
190 |
+
int64_t outputHeight, int64_t outputWidth,
|
191 |
+
MemoryFormat memory_format)
|
192 |
+
{
|
193 |
+
pool2d_shape_check(
|
194 |
+
input,
|
195 |
+
kH, kW, dH, dW, padH, padW, 1, 1,
|
196 |
+
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
|
197 |
+
memory_format);
|
198 |
+
|
199 |
+
const int64_t ndim = input.ndimension();
|
200 |
+
const int64_t nOutputPlane = nInputPlane;
|
201 |
+
|
202 |
+
check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
|
203 |
+
check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
|
204 |
+
check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
|
205 |
+
}
|
206 |
+
|
207 |
+
// AveragePool3d/DilatedMaxPool3d (forward)
|
208 |
+
static inline void
|
209 |
+
pool3d_shape_check(
|
210 |
+
const Tensor& input,
|
211 |
+
int64_t nslices,
|
212 |
+
int kT, int kH, int kW,
|
213 |
+
int dT, int dH, int dW,
|
214 |
+
int pT, int pH, int pW,
|
215 |
+
int dilationT, int dilationH, int dilationW,
|
216 |
+
int64_t itime, int64_t iheight, int64_t iwidth,
|
217 |
+
int64_t otime, int64_t oheight, int64_t owidth,
|
218 |
+
const char *fn_name,
|
219 |
+
bool check_input_size=false)
|
220 |
+
{
|
221 |
+
const int64_t ndim = input.ndimension();
|
222 |
+
|
223 |
+
TORCH_CHECK(kT > 0 && kW > 0 && kH > 0,
|
224 |
+
"kernel size should be greater than zero, but got ",
|
225 |
+
"kT: ", kT, " kH: ", kH, " kW: ", kW);
|
226 |
+
TORCH_CHECK(dT > 0 && dW > 0 && dH > 0,
|
227 |
+
"stride should be greater than zero, but got ",
|
228 |
+
"dT: ", dT, " dH: ", dH, " dW: ", dW);
|
229 |
+
TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0,
|
230 |
+
"dilation should be greater than zero, but got ",
|
231 |
+
"dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW);
|
232 |
+
|
233 |
+
TORCH_CHECK(ndim == 4 || ndim == 5,
|
234 |
+
fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes());
|
235 |
+
|
236 |
+
for (const auto i : c10::irange(ndim)) {
|
237 |
+
if (ndim == 5 && i == 0) {
|
238 |
+
// size of batch-dim can be 0.
|
239 |
+
continue;
|
240 |
+
}
|
241 |
+
TORCH_CHECK(
|
242 |
+
input.size(i) > 0,
|
243 |
+
fn_name,
|
244 |
+
": Expected input's non-batch dimensions to have positive length,"
|
245 |
+
" but input has a shape of ",
|
246 |
+
input.sizes(),
|
247 |
+
" and non-batch dimension ",
|
248 |
+
input.size(i),
|
249 |
+
" has length zero!")
|
250 |
+
}
|
251 |
+
|
252 |
+
if (check_input_size) { // AveragePool3d
|
253 |
+
TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW,
|
254 |
+
"input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ",
|
255 |
+
"kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")");
|
256 |
+
}
|
257 |
+
|
258 |
+
TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH,
|
259 |
+
"pad should be smaller than or equal to half of kernel size, but got "
|
260 |
+
"kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH);
|
261 |
+
|
262 |
+
TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1,
|
263 |
+
"Given input size: (",
|
264 |
+
nslices,"x", itime, "x", iheight, "x", iwidth, "). ",
|
265 |
+
"Calculated output size: (",
|
266 |
+
nslices, "x", otime, "x", oheight, "x", owidth, "). ",
|
267 |
+
"Output size is too small");
|
268 |
+
}
|
269 |
+
|
270 |
+
static inline void
|
271 |
+
max_pool3d_backward_shape_check(
|
272 |
+
const Tensor& input,
|
273 |
+
const Tensor& gradOutput,
|
274 |
+
const Tensor& indices,
|
275 |
+
int64_t nslices,
|
276 |
+
int kT, int kH, int kW,
|
277 |
+
int dT, int dH, int dW,
|
278 |
+
int pT, int pH, int pW,
|
279 |
+
int dilationT, int dilationH, int dilationW,
|
280 |
+
int64_t itime, int64_t iheight, int64_t iwidth,
|
281 |
+
int64_t otime, int64_t oheight, int64_t owidth,
|
282 |
+
const char* fn_name)
|
283 |
+
{
|
284 |
+
const int64_t ndim = input.ndimension();
|
285 |
+
|
286 |
+
pool3d_shape_check(
|
287 |
+
input,
|
288 |
+
nslices,
|
289 |
+
kT, kH, kW,
|
290 |
+
dT, dH, dW,
|
291 |
+
pT, pH, pW,
|
292 |
+
dilationT, dilationH, dilationW,
|
293 |
+
itime, iheight, iwidth,
|
294 |
+
otime, oheight, owidth, fn_name);
|
295 |
+
|
296 |
+
check_dim_size(gradOutput, ndim, ndim-4, nslices);
|
297 |
+
check_dim_size(gradOutput, ndim, ndim-3, otime);
|
298 |
+
check_dim_size(gradOutput, ndim, ndim-2, oheight);
|
299 |
+
check_dim_size(gradOutput, ndim, ndim-1, owidth);
|
300 |
+
|
301 |
+
check_dim_size(indices, ndim, ndim-4, nslices);
|
302 |
+
check_dim_size(indices, ndim, ndim-3, otime);
|
303 |
+
check_dim_size(indices, ndim, ndim-2, oheight);
|
304 |
+
check_dim_size(indices, ndim, ndim-1, owidth);
|
305 |
+
}
|
306 |
+
|
307 |
+
static inline void
|
308 |
+
avg_pool3d_backward_shape_check(
|
309 |
+
const Tensor& input,
|
310 |
+
const Tensor& gradOutput,
|
311 |
+
int64_t nslices,
|
312 |
+
int kT, int kH, int kW,
|
313 |
+
int dT, int dH, int dW,
|
314 |
+
int pT, int pH, int pW,
|
315 |
+
int64_t itime, int64_t iheight, int64_t iwidth,
|
316 |
+
int64_t otime, int64_t oheight, int64_t owidth,
|
317 |
+
const char *fn_name)
|
318 |
+
{
|
319 |
+
const int64_t ndim = input.ndimension();
|
320 |
+
|
321 |
+
pool3d_shape_check(
|
322 |
+
input,
|
323 |
+
nslices,
|
324 |
+
kT, kH, kW,
|
325 |
+
dT, dH, dW,
|
326 |
+
pT, pH, pW,
|
327 |
+
1, 1, 1,
|
328 |
+
itime, iheight, iwidth,
|
329 |
+
otime, oheight, owidth,
|
330 |
+
fn_name, true);
|
331 |
+
|
332 |
+
check_dim_size(gradOutput, ndim, ndim-4, nslices);
|
333 |
+
check_dim_size(gradOutput, ndim, ndim-3, otime);
|
334 |
+
check_dim_size(gradOutput, ndim, ndim-2, oheight);
|
335 |
+
check_dim_size(gradOutput, ndim, ndim-1, owidth);
|
336 |
+
}
|
337 |
+
|
338 |
+
} // anonymous namespace
|
339 |
+
|
340 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
class Scalar;
|
7 |
+
}
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
struct TensorIterator;
|
12 |
+
struct TensorIteratorBase;
|
13 |
+
|
14 |
+
namespace native {
|
15 |
+
|
16 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
17 |
+
#define HOST_DEVICE __host__ __device__
|
18 |
+
#else
|
19 |
+
#define HOST_DEVICE
|
20 |
+
#endif
|
21 |
+
|
22 |
+
// integral power in pytorch allows for negative exponents, giving truncated integral results.
|
23 |
+
// e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the
|
24 |
+
// only non-zero result.
|
25 |
+
template <class T,
|
26 |
+
typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
|
27 |
+
static inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
|
28 |
+
T result = 1;
|
29 |
+
while (b) {
|
30 |
+
if (b & 1) {
|
31 |
+
result *= a;
|
32 |
+
}
|
33 |
+
b /= 2;
|
34 |
+
a *= a;
|
35 |
+
}
|
36 |
+
return result;
|
37 |
+
}
|
38 |
+
|
39 |
+
template <class T,
|
40 |
+
typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, T>::type* = nullptr>
|
41 |
+
static inline HOST_DEVICE T powi(T a, T b) {
|
42 |
+
return powi_impl(a, b);
|
43 |
+
}
|
44 |
+
|
45 |
+
template <class T,
|
46 |
+
typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, T>::type* = nullptr>
|
47 |
+
static inline HOST_DEVICE T powi(T a, T b) {
|
48 |
+
if ( b < 0 ) {
|
49 |
+
if ( a == 1 ) {
|
50 |
+
return 1;
|
51 |
+
} else if ( a == -1 ) {
|
52 |
+
auto negative = (-b) % static_cast<T>(2);
|
53 |
+
return negative ? -1 : 1;
|
54 |
+
} else {
|
55 |
+
return 0;
|
56 |
+
}
|
57 |
+
}
|
58 |
+
return powi_impl(a, b);
|
59 |
+
}
|
60 |
+
|
61 |
+
using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&);
|
62 |
+
using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
|
63 |
+
|
64 |
+
DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub);
|
65 |
+
DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub);
|
66 |
+
|
67 |
+
} // namespace native
|
68 |
+
|
69 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/native/DispatchStub.h>
|
2 |
+
#include <c10/core/Scalar.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
struct TensorIterator;
|
6 |
+
|
7 |
+
namespace native {
|
8 |
+
|
9 |
+
DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, const Scalar&), arange_stub);
|
10 |
+
DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, int64_t), linspace_stub);
|
11 |
+
|
12 |
+
}} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
class Tensor;
|
7 |
+
}
|
8 |
+
|
9 |
+
namespace at::native {
|
10 |
+
|
11 |
+
using reduce_all_fn = void (*)(Tensor & result, const Tensor & self);
|
12 |
+
using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self);
|
13 |
+
DECLARE_DISPATCH(reduce_all_fn, min_all_stub);
|
14 |
+
DECLARE_DISPATCH(reduce_all_fn, max_all_stub);
|
15 |
+
|
16 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h
ADDED
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <limits>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
#include <ATen/native/Resize.h>
|
6 |
+
#include <ATen/native/TensorIterator.h>
|
7 |
+
#include <ATen/native/NonEmptyUtils.h>
|
8 |
+
#include <ATen/WrapDimUtilsMulti.h>
|
9 |
+
#include <c10/core/ScalarType.h>
|
10 |
+
#include <c10/util/irange.h>
|
11 |
+
|
12 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
13 |
+
#include <ATen/Functions.h>
|
14 |
+
#else
|
15 |
+
#include <ATen/ops/empty.h>
|
16 |
+
#include <ATen/ops/scalar_tensor.h>
|
17 |
+
#endif
|
18 |
+
|
19 |
+
namespace at::native {
|
20 |
+
|
21 |
+
// Maximum and minimum possible scalar values, including infinities
|
22 |
+
template <typename scalar_t>
|
23 |
+
constexpr scalar_t upper_bound() {
|
24 |
+
using lim = std::numeric_limits<scalar_t>;
|
25 |
+
return lim::has_infinity ? lim::infinity() : lim::max();
|
26 |
+
}
|
27 |
+
|
28 |
+
template <typename scalar_t>
|
29 |
+
constexpr scalar_t lower_bound() {
|
30 |
+
using lim = std::numeric_limits<scalar_t>;
|
31 |
+
return lim::has_infinity ? -lim::infinity() : lim::lowest();
|
32 |
+
}
|
33 |
+
|
34 |
+
static inline Tensor restride_dim(
|
35 |
+
const Tensor& src, int64_t dim,
|
36 |
+
IntArrayRef replacement_shape
|
37 |
+
) {
|
38 |
+
auto strides = ensure_nonempty_vec(src.strides().vec());
|
39 |
+
strides[dim] = 0;
|
40 |
+
return src.as_strided(replacement_shape, strides);
|
41 |
+
}
|
42 |
+
|
43 |
+
inline void _dimreduce_setup(const Tensor &result, const Tensor &self,
|
44 |
+
int64_t dim) {
|
45 |
+
IntArrayRef self_sizes = self.sizes();
|
46 |
+
std::vector<int64_t> result_sizes;
|
47 |
+
result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end());
|
48 |
+
result_sizes[dim] = 1;
|
49 |
+
result.resize_(result_sizes);
|
50 |
+
}
|
51 |
+
|
52 |
+
inline bool _dimreduce_return_trivial(const Tensor &result, const Tensor &self,
|
53 |
+
const Scalar& ident, int64_t dim, bool keepdim) {
|
54 |
+
if (self.numel() == 1 && self.ndimension() == 0) {
|
55 |
+
result.resize_({});
|
56 |
+
result.fill_(self);
|
57 |
+
return true;
|
58 |
+
}
|
59 |
+
// Return identity
|
60 |
+
if (self.numel() == 0) {
|
61 |
+
_dimreduce_setup(result, self, dim);
|
62 |
+
result.fill_(ident);
|
63 |
+
if (!keepdim) result.squeeze_(dim);
|
64 |
+
return true;
|
65 |
+
}
|
66 |
+
return false;
|
67 |
+
}
|
68 |
+
|
69 |
+
inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
|
70 |
+
int64_t /*dim*/, bool /*keepdim*/, const char* /*fn_name*/) {
|
71 |
+
if (self.numel() == 1 && self.ndimension() == 0) {
|
72 |
+
result.resize_({});
|
73 |
+
result.fill_(self);
|
74 |
+
return true;
|
75 |
+
}
|
76 |
+
|
77 |
+
return false;
|
78 |
+
}
|
79 |
+
|
80 |
+
inline c10::optional<Tensor> _allreduce_return_trivial(
|
81 |
+
const Tensor& self,
|
82 |
+
const Scalar& ident) {
|
83 |
+
// Return identity
|
84 |
+
if (self.numel() == 0) {
|
85 |
+
return at::scalar_tensor(ident, self.options());
|
86 |
+
}
|
87 |
+
return c10::nullopt;
|
88 |
+
}
|
89 |
+
|
90 |
+
#define OPTION_TYPE_EQUALITY_CHECK(option, out, self) \
|
91 |
+
{ \
|
92 |
+
TORCH_CHECK(\
|
93 |
+
out.option() == self.option(),\
|
94 |
+
"expected ", #option, " ",\
|
95 |
+
self.option(),\
|
96 |
+
" but found ", out.option())\
|
97 |
+
}
|
98 |
+
|
99 |
+
static inline void check_scalar_type_device_layout_equal(const Tensor& out, const Tensor& self) {
|
100 |
+
OPTION_TYPE_EQUALITY_CHECK(scalar_type, out, self);
|
101 |
+
OPTION_TYPE_EQUALITY_CHECK(device, out.options(), self.options());
|
102 |
+
OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options());
|
103 |
+
}
|
104 |
+
|
105 |
+
static inline Tensor integer_upcast(const Tensor& self, c10::optional<ScalarType> dtype) {
|
106 |
+
ScalarType scalarType = self.scalar_type();
|
107 |
+
ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType);
|
108 |
+
return self.toType(upcast_scalarType);
|
109 |
+
}
|
110 |
+
|
111 |
+
using DimMask = TensorIterator::DimMask;
|
112 |
+
|
113 |
+
static DimVector make_dim_vector(OptionalIntArrayRef opt_dims, int64_t ndim) {
|
114 |
+
if (opt_dims.has_value()) {
|
115 |
+
return DimVector(opt_dims.value());
|
116 |
+
} else {
|
117 |
+
std::vector<int64_t> all_dims(ndim);
|
118 |
+
std::iota(all_dims.begin(), all_dims.end(), 0);
|
119 |
+
return DimVector(all_dims);
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
static DimMask make_dim_mask(OptionalIntArrayRef opt_dims, int64_t ndim, bool allow_empty_dims=false) {
|
124 |
+
DimMask mask;
|
125 |
+
if (opt_dims.has_value()) {
|
126 |
+
auto dims = opt_dims.value();
|
127 |
+
if (dims.empty() && !allow_empty_dims) {
|
128 |
+
mask = DimMask().flip();
|
129 |
+
} else {
|
130 |
+
mask = at::dim_list_to_bitset(dims, ndim);
|
131 |
+
}
|
132 |
+
} else {
|
133 |
+
mask = DimMask().flip();
|
134 |
+
}
|
135 |
+
return mask;
|
136 |
+
}
|
137 |
+
|
138 |
+
inline DimVector shape_from_dim_mask(const Tensor& self, DimMask mask, bool keepdim) {
|
139 |
+
auto shape = DimVector(self.sizes());
|
140 |
+
for (int dim = shape.size() - 1; dim >= 0; dim--) {
|
141 |
+
if (mask[dim]) {
|
142 |
+
if (keepdim) {
|
143 |
+
shape[dim] = 1;
|
144 |
+
} else {
|
145 |
+
shape.erase(shape.begin() + dim);
|
146 |
+
}
|
147 |
+
}
|
148 |
+
}
|
149 |
+
return shape;
|
150 |
+
}
|
151 |
+
|
152 |
+
static void resize_reduction_result(
|
153 |
+
Tensor& result, const Tensor& self, DimMask mask, bool keepdim,
|
154 |
+
ScalarType /*dtype*/)
|
155 |
+
{
|
156 |
+
auto shape = shape_from_dim_mask(self, mask, keepdim);
|
157 |
+
TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
|
158 |
+
at::native::resize_output(result, shape);
|
159 |
+
}
|
160 |
+
|
161 |
+
inline Tensor create_reduction_result(
|
162 |
+
const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype
|
163 |
+
) {
|
164 |
+
DimMask mask = make_dim_mask(dim, self.dim());
|
165 |
+
auto shape = shape_from_dim_mask(self, mask, keepdim);
|
166 |
+
return at::empty(shape, self.options().dtype(dtype));
|
167 |
+
}
|
168 |
+
|
169 |
+
static Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask, bool keepdim) {
|
170 |
+
if (keepdim) {
|
171 |
+
return result;
|
172 |
+
}
|
173 |
+
auto shape = DimVector(result.sizes());
|
174 |
+
auto stride = DimVector(result.strides());
|
175 |
+
for (const auto dim : c10::irange(ndim)) {
|
176 |
+
if (mask[dim]) {
|
177 |
+
shape.insert(shape.begin() + dim, 1);
|
178 |
+
stride.insert(stride.begin() + dim, 0);
|
179 |
+
}
|
180 |
+
}
|
181 |
+
return result.as_strided(shape, stride);
|
182 |
+
}
|
183 |
+
|
184 |
+
static TensorIterator make_reduction(
|
185 |
+
const char* name, Tensor& result, const Tensor& self,
|
186 |
+
at::OptionalIntArrayRef dim_opt,
|
187 |
+
bool keepdim, ScalarType in_dtype, ScalarType out_dtype) {
|
188 |
+
// check that result type and dtype match if provided
|
189 |
+
TORCH_CHECK(
|
190 |
+
!result.defined() || result.scalar_type() == out_dtype,
|
191 |
+
name, ": provided dtype must match dtype of result. Got ",
|
192 |
+
toString(result.scalar_type()),
|
193 |
+
" and ",
|
194 |
+
toString(out_dtype),
|
195 |
+
".");
|
196 |
+
// dim={} performs an all-reduce, same as dim=None
|
197 |
+
IntArrayRef dim = dim_opt.value_or(IntArrayRef{});
|
198 |
+
int64_t ndim = self.dim();
|
199 |
+
auto mask = make_dim_mask(dim, ndim);
|
200 |
+
resize_reduction_result(result, self, mask, keepdim, out_dtype);
|
201 |
+
auto viewed_result = review_reduce_result(result, ndim, mask, keepdim);
|
202 |
+
namedinference::propagate_names_for_reduction(result, self, dim, keepdim);
|
203 |
+
if (self.scalar_type() == in_dtype) {
|
204 |
+
return TensorIterator::reduce_op(viewed_result, self);
|
205 |
+
}
|
206 |
+
return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
|
207 |
+
}
|
208 |
+
|
209 |
+
static C10_UNUSED TensorIterator make_reduction(
|
210 |
+
const char* name, Tensor& result, const Tensor& self,
|
211 |
+
at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) {
|
212 |
+
// special case for type promotion in mixed precision, improves computational
|
213 |
+
// efficiency.
|
214 |
+
// not generalize this to common mismatched input/output types to avoid cross
|
215 |
+
// product of templated kernel launches.
|
216 |
+
const bool gpu_lowp_to_f32 = (
|
217 |
+
self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype == kFloat);
|
218 |
+
auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type()
|
219 |
+
: self.is_complex() ? c10::toComplexType(out_dtype)
|
220 |
+
: out_dtype;
|
221 |
+
return make_reduction(name, result, self, dim, keepdim, in_dtype, out_dtype);
|
222 |
+
}
|
223 |
+
|
224 |
+
static TensorIterator make_reduction(
|
225 |
+
const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
|
226 |
+
at::OptionalIntArrayRef dim_opt, bool keepdim, ScalarType dtype1,
|
227 |
+
ScalarType dtype2) {
|
228 |
+
// check that result type and dtype match if provided
|
229 |
+
TORCH_CHECK(
|
230 |
+
(!result1.defined() || result1.scalar_type() == dtype1) && (!result2.defined() || result2.scalar_type() == dtype2),
|
231 |
+
name, ": provided dtype must match dtype of result. Got ",
|
232 |
+
toString(result1.scalar_type()), toString(result2.scalar_type()),
|
233 |
+
" and ",
|
234 |
+
toString(dtype1), toString(dtype2),
|
235 |
+
".");
|
236 |
+
|
237 |
+
// dim={} performs an all-reduce, same as dim=None
|
238 |
+
auto dim = dim_opt.value_or(IntArrayRef{});
|
239 |
+
int64_t ndim = self.dim();
|
240 |
+
DimMask mask = make_dim_mask(dim, ndim);
|
241 |
+
resize_reduction_result(result1, self, mask, keepdim, dtype1);
|
242 |
+
auto viewed_result1 = review_reduce_result(result1, ndim, mask, keepdim);
|
243 |
+
|
244 |
+
resize_reduction_result(result2, self, mask, keepdim, dtype2);
|
245 |
+
auto viewed_result2 = review_reduce_result(result2, ndim, mask, keepdim);
|
246 |
+
|
247 |
+
namedinference::propagate_names_for_reduction(result1, self, dim, keepdim);
|
248 |
+
namedinference::propagate_names_for_reduction(result2, self, dim, keepdim);
|
249 |
+
|
250 |
+
// special case for type promotion in mixed precision, improves computational
|
251 |
+
// efficiency.
|
252 |
+
// We don't generalize this to common mismatched input/output types to avoid cross
|
253 |
+
// product of templated kernel launches.
|
254 |
+
if (self.scalar_type() == dtype1 ||
|
255 |
+
(self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
|
256 |
+
return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
|
257 |
+
}
|
258 |
+
return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
|
259 |
+
}
|
260 |
+
|
261 |
+
static C10_UNUSED TensorIterator make_reduction(
|
262 |
+
const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
|
263 |
+
at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) {
|
264 |
+
return make_reduction(name, result1, result2, self, dim, keepdim, dtype, dtype);
|
265 |
+
}
|
266 |
+
|
267 |
+
static void zero_numel_check_dims(const Tensor& self, const int64_t dim, const char *fn_name) {
|
268 |
+
if (self.ndimension() == 0) {
|
269 |
+
TORCH_CHECK_INDEX(dim == 0 || dim == -1, fn_name,
|
270 |
+
": Expected reduction dim -1 or 0 for scalar but got ", dim);
|
271 |
+
}
|
272 |
+
else {
|
273 |
+
TORCH_CHECK_INDEX(self.size(dim) != 0, fn_name,
|
274 |
+
": Expected reduction dim ", dim, " to have non-zero size.");
|
275 |
+
}
|
276 |
+
}
|
277 |
+
|
278 |
+
static void zero_numel_check_dims(const Tensor& self, const IntArrayRef dim, const char *fn_name) {
|
279 |
+
TORCH_CHECK(
|
280 |
+
!dim.empty(),
|
281 |
+
fn_name, ": Expected reduction dim to be specified for input.numel() == 0. ",
|
282 |
+
"Specify the reduction dim with the 'dim' argument.");
|
283 |
+
for (const int64_t d : dim) {
|
284 |
+
zero_numel_check_dims(self, d, fn_name);
|
285 |
+
}
|
286 |
+
}
|
287 |
+
|
288 |
+
static std::vector<int64_t> get_zero_numel_tensor_size(
|
289 |
+
const Tensor& self,
|
290 |
+
const int64_t dim,
|
291 |
+
const bool keepdim,
|
292 |
+
const char* fn_name) {
|
293 |
+
TORCH_INTERNAL_ASSERT(self.numel() == 0, fn_name, ": Expected self.numel() == 0.");
|
294 |
+
zero_numel_check_dims(self, dim, fn_name);
|
295 |
+
std::vector<int64_t> sizes;
|
296 |
+
if (keepdim) {
|
297 |
+
sizes = self.sizes().vec();
|
298 |
+
sizes[dim] = 1;
|
299 |
+
}
|
300 |
+
else {
|
301 |
+
for (const auto d : c10::irange(self.dim())) {
|
302 |
+
if (d != dim) {
|
303 |
+
sizes.push_back(self.sizes()[d]);
|
304 |
+
}
|
305 |
+
}
|
306 |
+
}
|
307 |
+
return sizes;
|
308 |
+
}
|
309 |
+
|
310 |
+
// Resize the result tensor and indices when result.numel() == 0 depending on values of
|
311 |
+
// dim and keepdim for returning tensors containing reduction results.
|
312 |
+
// This function should be called when you are reducing a zero-numel tensor and want to
|
313 |
+
// resize the output and return it. This function exists for resizing zero-numel
|
314 |
+
// tensors when the size of the reduction dimension is non-zero.
|
315 |
+
static C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_indices,
|
316 |
+
const Tensor& self, const int64_t dim,
|
317 |
+
const bool keepdim, const char *fn_name) {
|
318 |
+
auto sizes = get_zero_numel_tensor_size(self, dim, keepdim, fn_name);
|
319 |
+
at::native::resize_output(result, sizes);
|
320 |
+
at::native::resize_output(result_indices, sizes);
|
321 |
+
}
|
322 |
+
|
323 |
+
inline ScalarType get_dtype_from_self(
|
324 |
+
const Tensor& self,
|
325 |
+
const c10::optional<ScalarType>& dtype,
|
326 |
+
bool promote_integers) {
|
327 |
+
if (dtype.has_value()) {
|
328 |
+
return dtype.value();
|
329 |
+
}
|
330 |
+
ScalarType src_type = self.scalar_type();
|
331 |
+
if (promote_integers && at::isIntegralType(src_type, /*includeBool=*/true)) {
|
332 |
+
return kLong;
|
333 |
+
}
|
334 |
+
return src_type;
|
335 |
+
}
|
336 |
+
|
337 |
+
inline ScalarType get_dtype_from_result(Tensor& result, c10::optional<ScalarType> dtype) {
|
338 |
+
TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
|
339 |
+
if (dtype.has_value()) {
|
340 |
+
return dtype.value();
|
341 |
+
} else {
|
342 |
+
return result.scalar_type();
|
343 |
+
}
|
344 |
+
}
|
345 |
+
|
346 |
+
|
347 |
+
} // namespace at::native
|
348 |
+
|
349 |
+
namespace at::meta {
|
350 |
+
|
351 |
+
static C10_UNUSED DimVector get_reduction_shape(
|
352 |
+
const Tensor& self,
|
353 |
+
IntArrayRef dims,
|
354 |
+
bool keepdim,
|
355 |
+
bool allow_empty_dims=false) {
|
356 |
+
auto mask = native::make_dim_mask(dims, self.dim(), allow_empty_dims);
|
357 |
+
return native::shape_from_dim_mask(self, mask, keepdim);
|
358 |
+
}
|
359 |
+
|
360 |
+
static void resize_reduction(
|
361 |
+
impl::MetaBase& meta,
|
362 |
+
const Tensor& self,
|
363 |
+
OptionalIntArrayRef opt_dims,
|
364 |
+
bool keepdim,
|
365 |
+
ScalarType out_dtype,
|
366 |
+
bool allow_empty_dims=false) {
|
367 |
+
DimVector dims_ = at::native::make_dim_vector(opt_dims, self.dim());
|
368 |
+
maybe_wrap_dims(dims_, self.dim());
|
369 |
+
auto shape = get_reduction_shape(self, dims_, keepdim, allow_empty_dims);
|
370 |
+
meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
|
371 |
+
namedinference::propagate_names_for_reduction(
|
372 |
+
meta.maybe_get_output(), self, dims_, keepdim);
|
373 |
+
}
|
374 |
+
|
375 |
+
static void resize_reduction_with_indices(
|
376 |
+
impl::MetaBase& meta,
|
377 |
+
const Tensor& self,
|
378 |
+
IntArrayRef dims,
|
379 |
+
bool keepdim,
|
380 |
+
ScalarType out_dtype) {
|
381 |
+
DimVector dims_(dims);
|
382 |
+
maybe_wrap_dims(dims_, self.dim());
|
383 |
+
auto shape = get_reduction_shape(self, dims_, keepdim);
|
384 |
+
meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
|
385 |
+
meta.set_output_raw_strided(1, shape, {}, self.options().dtype(kLong));
|
386 |
+
namedinference::propagate_names_for_reduction(
|
387 |
+
meta.maybe_get_output(0), self, dims_, keepdim);
|
388 |
+
namedinference::propagate_names_for_reduction(
|
389 |
+
meta.maybe_get_output(1), self, dims_, keepdim);
|
390 |
+
}
|
391 |
+
|
392 |
+
static TensorIterator make_reduction(
|
393 |
+
const Tensor& self,
|
394 |
+
const Tensor& result,
|
395 |
+
OptionalIntArrayRef opt_dims,
|
396 |
+
bool keepdim,
|
397 |
+
ScalarType in_dtype) {
|
398 |
+
int64_t ndim = self.dim();
|
399 |
+
auto mask = at::native::make_dim_mask(opt_dims, ndim);
|
400 |
+
auto viewed_result =
|
401 |
+
at::native::review_reduce_result(result, ndim, mask, keepdim);
|
402 |
+
if (self.scalar_type() == in_dtype) {
|
403 |
+
return TensorIterator::reduce_op(viewed_result, self);
|
404 |
+
}
|
405 |
+
return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
|
406 |
+
}
|
407 |
+
|
408 |
+
static TensorIterator make_reduction(
|
409 |
+
const Tensor& self,
|
410 |
+
const Tensor& result1,
|
411 |
+
const Tensor& result2,
|
412 |
+
IntArrayRef dims,
|
413 |
+
bool keepdim,
|
414 |
+
ScalarType dtype1,
|
415 |
+
ScalarType /*dtype2*/) {
|
416 |
+
int64_t ndim = self.dim();
|
417 |
+
auto mask = at::native::make_dim_mask(dims, ndim);
|
418 |
+
auto viewed_result1 = at::native::review_reduce_result(result1, ndim, mask, keepdim);
|
419 |
+
auto viewed_result2 = at::native::review_reduce_result(result2, ndim, mask, keepdim);
|
420 |
+
// special case for type promotion in mixed precision, improves computational efficiency.
|
421 |
+
// We don't generalize this to common mismatched input/output types to avoid cross product
|
422 |
+
// of templated kernel launches.
|
423 |
+
if (self.scalar_type() == dtype1 ||
|
424 |
+
(self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
|
425 |
+
return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
|
426 |
+
}
|
427 |
+
return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
|
428 |
+
}
|
429 |
+
|
430 |
+
static C10_UNUSED TensorIterator make_reduction_from_out_ty(
|
431 |
+
const Tensor& self,
|
432 |
+
const Tensor& result,
|
433 |
+
OptionalIntArrayRef opt_dims,
|
434 |
+
bool keepdim,
|
435 |
+
ScalarType out_dtype) {
|
436 |
+
// special case for type promotion in mixed precision, improves computational
|
437 |
+
// efficiency.
|
438 |
+
// not generalize this to common mismatched input/output types to avoid cross
|
439 |
+
// product of templated kernel launches.
|
440 |
+
const bool gpu_lowp_to_f32 =
|
441 |
+
(self.is_cuda() &&
|
442 |
+
(self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) &&
|
443 |
+
out_dtype == kFloat);
|
444 |
+
auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() : out_dtype;
|
445 |
+
return make_reduction(self, result, opt_dims, keepdim, in_dtype);
|
446 |
+
}
|
447 |
+
|
448 |
+
} // namespace at::meta
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Scalar.h>
|
4 |
+
|
5 |
+
namespace at::native {
|
6 |
+
|
7 |
+
enum class ReductionType {MAX, MEAN, MIN, SUM, PROD};
|
8 |
+
|
9 |
+
static inline ReductionType get_reduction_enum(const c10::string_view& reduce) {
|
10 |
+
if (reduce == "max" || reduce == "amax") {
|
11 |
+
return ReductionType::MAX;
|
12 |
+
} else if (reduce == "mean") {
|
13 |
+
return ReductionType::MEAN;
|
14 |
+
} else if (reduce == "min" || reduce == "amin") {
|
15 |
+
return ReductionType::MIN;
|
16 |
+
} else if (reduce == "sum") {
|
17 |
+
return ReductionType::SUM;
|
18 |
+
} else if (reduce == "prod") {
|
19 |
+
return ReductionType::PROD;
|
20 |
+
} else {
|
21 |
+
TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce);
|
22 |
+
}
|
23 |
+
}
|
24 |
+
|
25 |
+
// used for `scatter_reduce`, old options for BC.
|
26 |
+
static inline ReductionType get_operator_enum(const c10::string_view reduce, bool use_new_options) {
|
27 |
+
if (use_new_options) {
|
28 |
+
return get_reduction_enum(reduce);
|
29 |
+
} else {
|
30 |
+
if (reduce == "add") {
|
31 |
+
return ReductionType::SUM;
|
32 |
+
} else if (reduce == "multiply") {
|
33 |
+
return ReductionType::PROD;
|
34 |
+
} else {
|
35 |
+
TORCH_CHECK(false, "reduce argument must be either add or multiply.")
|
36 |
+
}
|
37 |
+
}
|
38 |
+
}
|
39 |
+
|
40 |
+
} // at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/TensorOperators.h>
|
5 |
+
|
6 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
7 |
+
#include <ATen/Functions.h>
|
8 |
+
#else
|
9 |
+
#include <ATen/ops/empty.h>
|
10 |
+
#include <ATen/ops/empty_like.h>
|
11 |
+
#endif
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
|
15 |
+
template <
|
16 |
+
typename index_t,
|
17 |
+
void compute(index_t*, int64_t*, index_t*, int64_t, int64_t)>
|
18 |
+
static inline Tensor repeat_interleave_common(
|
19 |
+
const Tensor& repeats,
|
20 |
+
c10::optional<int64_t> output_size) {
|
21 |
+
TORCH_CHECK(
|
22 |
+
repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat");
|
23 |
+
TORCH_CHECK(
|
24 |
+
repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt,
|
25 |
+
"repeats has to be Long or Int tensor");
|
26 |
+
if (repeats.size(0) == 0) {
|
27 |
+
return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
28 |
+
}
|
29 |
+
Tensor repeats_ = repeats.contiguous();
|
30 |
+
Tensor cumsum = repeats.cumsum(0);
|
31 |
+
int64_t total;
|
32 |
+
if (output_size.has_value()) {
|
33 |
+
total = output_size.value();
|
34 |
+
} else {
|
35 |
+
total = cumsum[-1].item<int64_t>();
|
36 |
+
TORCH_CHECK(
|
37 |
+
(repeats >= 0).all().item<uint8_t>(), "repeats can not be negative");
|
38 |
+
}
|
39 |
+
|
40 |
+
Tensor result = at::empty({total}, repeats.options());
|
41 |
+
index_t* repeat_ptr = repeats_.data_ptr<index_t>();
|
42 |
+
int64_t* cumsum_ptr = cumsum.data_ptr<int64_t>();
|
43 |
+
index_t* result_ptr = result.data_ptr<index_t>();
|
44 |
+
compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total);
|
45 |
+
return result;
|
46 |
+
}
|
47 |
+
|
48 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/ResizeCommon.h>
|
5 |
+
#include <ATen/EmptyTensor.h>
|
6 |
+
#include <ATen/TensorUtils.h>
|
7 |
+
|
8 |
+
#include <c10/core/CPUAllocator.h>
|
9 |
+
|
10 |
+
#include <utility>
|
11 |
+
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
|
15 |
+
// TODO: make all operations that resize given outputs use this function
|
16 |
+
// for consistency and maintainability.
|
17 |
+
// Some operations like `cat` might not be able to make the use of
|
18 |
+
// resize_output directly. For more details to understand how it works in `cat`,
|
19 |
+
// see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
|
20 |
+
// Resizes outputs
|
21 |
+
// Functions accepting output tensors, like with the "out" kwarg, should
|
22 |
+
// call this function to handle resizing their output tensor.
|
23 |
+
// Issues a warning if the output tensor has one or more elements and
|
24 |
+
// needs resizing
|
25 |
+
// NOTE: In the future the warning will become an error
|
26 |
+
// Returns a bool saying whether or not the resize actually happened or not
|
27 |
+
TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape);
|
28 |
+
// WARNING: Do NOT call this directly. If you are resizing an output and want
|
29 |
+
// to support dynamic shapes call at::resize__symint and resize_output_check_symint.
|
30 |
+
// For more details, see: https://github.com/pytorch/pytorch/pull/111530/files#r1365845272
|
31 |
+
TORCH_API bool resize_output_symint(const Tensor& output, SymIntArrayRef shape);
|
32 |
+
|
33 |
+
// Utility for resize_output
|
34 |
+
// Returns a bool saying resize should happen or not and
|
35 |
+
// raises a warning if resizing for one or more elements
|
36 |
+
TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape);
|
37 |
+
TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape);
|
38 |
+
|
39 |
+
TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes);
|
40 |
+
TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes);
|
41 |
+
|
42 |
+
static inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) {
|
43 |
+
// It does not make sense to try to resize a storage
|
44 |
+
// to hold 0 elements, and this can break
|
45 |
+
// if storage_offset is positive but
|
46 |
+
// new_size is 0, so just bail in that case
|
47 |
+
// (same comment is in cuda/Resize.h)
|
48 |
+
if (self->numel() == 0) {
|
49 |
+
return;
|
50 |
+
}
|
51 |
+
|
52 |
+
const Storage& storage = self->unsafe_storage();
|
53 |
+
if (!storage) {
|
54 |
+
auto new_storage = c10::make_intrusive<StorageImpl>(
|
55 |
+
StorageImpl::use_byte_size_t(),
|
56 |
+
new_size_bytes,
|
57 |
+
c10::GetCPUAllocator(),
|
58 |
+
true);
|
59 |
+
self->set_storage_keep_dtype(std::move(new_storage));
|
60 |
+
} else if (new_size_bytes > storage.nbytes()) {
|
61 |
+
resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes);
|
62 |
+
}
|
63 |
+
}
|
64 |
+
|
65 |
+
TORCH_API TensorImpl* resize_impl_cpu_(
|
66 |
+
TensorImpl* self,
|
67 |
+
IntArrayRef size,
|
68 |
+
at::OptionalIntArrayRef stride,
|
69 |
+
bool resize_storage = true);
|
70 |
+
|
71 |
+
template <typename T>
|
72 |
+
T maybe_convert_symint(c10::SymInt) = delete;
|
73 |
+
|
74 |
+
template <>
|
75 |
+
inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; }
|
76 |
+
|
77 |
+
template <>
|
78 |
+
inline int64_t maybe_convert_symint(c10::SymInt x) { return x.guard_int(__FILE__, __LINE__); }
|
79 |
+
|
80 |
+
template <typename T>
|
81 |
+
static inline void checkInBoundsForStorage(
|
82 |
+
ArrayRef<T> size,
|
83 |
+
ArrayRef<T> stride,
|
84 |
+
T storage_offset,
|
85 |
+
const caffe2::TypeMeta& data_type,
|
86 |
+
const Storage& new_storage) {
|
87 |
+
T storage_size_bytes =
|
88 |
+
at::detail::computeStorageNbytes(size, stride, data_type.itemsize());
|
89 |
+
T storage_offset_bytes = storage_offset * data_type.itemsize();
|
90 |
+
if (storage_size_bytes == 0) {
|
91 |
+
// NB: (a tensor with arbitrary 0 dims)'s storage can have any numel.
|
92 |
+
return;
|
93 |
+
}
|
94 |
+
T new_storage_size_bytes = maybe_convert_symint<T>(new_storage.sym_nbytes());
|
95 |
+
TORCH_CHECK(
|
96 |
+
storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes,
|
97 |
+
"setStorage: sizes ",
|
98 |
+
size,
|
99 |
+
", strides ",
|
100 |
+
stride,
|
101 |
+
","
|
102 |
+
" storage offset ",
|
103 |
+
storage_offset,
|
104 |
+
", and itemsize ",
|
105 |
+
data_type.itemsize(),
|
106 |
+
" requiring a storage size of ",
|
107 |
+
storage_size_bytes + storage_offset_bytes,
|
108 |
+
" are out of bounds for storage of size ",
|
109 |
+
new_storage_size_bytes);
|
110 |
+
}
|
111 |
+
|
112 |
+
template <typename T>
|
113 |
+
static inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset,
|
114 |
+
ArrayRef<T> size, ArrayRef<T> stride) {
|
115 |
+
// FIXME: stride should be optional
|
116 |
+
if (stride.data()) {
|
117 |
+
TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(),
|
118 |
+
") and stride length (", stride.size(), ")");
|
119 |
+
}
|
120 |
+
|
121 |
+
#ifdef DEBUG
|
122 |
+
TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX");
|
123 |
+
#endif
|
124 |
+
|
125 |
+
// storage: note this can't be replaced with result.set_(storage) as the semantics of that
|
126 |
+
// function is to set the tensor size to be equal to the size of the storage.
|
127 |
+
if (!result.storage().is_alias_of(storage)) {
|
128 |
+
// Caffe2 might have tensors whose storages are null, but we
|
129 |
+
// don't allow it in PyTorch.
|
130 |
+
TORCH_INTERNAL_ASSERT(storage);
|
131 |
+
TORCH_INTERNAL_ASSERT(result.storage());
|
132 |
+
|
133 |
+
// We used to allow this, but this breaks device caching.
|
134 |
+
// Let's put an actual error message for this one.
|
135 |
+
TORCH_CHECK(result.storage().device() == storage.device(),
|
136 |
+
"Attempted to set the storage of a tensor on device \"", result.storage().device(),
|
137 |
+
"\" to a storage on different device \"", storage.device(),
|
138 |
+
"\". This is no longer allowed; the devices must match.");
|
139 |
+
result.unsafeGetTensorImpl()->set_storage_keep_dtype(std::move(storage));
|
140 |
+
}
|
141 |
+
|
142 |
+
// storageOffset
|
143 |
+
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
|
144 |
+
}
|
145 |
+
|
146 |
+
/**
|
147 |
+
* Set self's sizes, strides, and storage_offset.
|
148 |
+
* (size, stride, storage_offset) must be in bounds for self's storage.
|
149 |
+
*/
|
150 |
+
template <typename T>
|
151 |
+
inline void setStrided(
|
152 |
+
const Tensor& self,
|
153 |
+
ArrayRef<T> size,
|
154 |
+
ArrayRef<T> stride,
|
155 |
+
T storage_offset) {
|
156 |
+
TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape");
|
157 |
+
for (const auto& val : stride) {
|
158 |
+
TORCH_CHECK(val >= 0,
|
159 |
+
"as_strided: Negative strides are not supported at the moment, "
|
160 |
+
"got strides: ", stride);
|
161 |
+
}
|
162 |
+
|
163 |
+
auto* self_ = self.unsafeGetTensorImpl();
|
164 |
+
checkInBoundsForStorage(
|
165 |
+
size, stride, storage_offset, self_->dtype(), self_->storage());
|
166 |
+
|
167 |
+
/* storage offset */
|
168 |
+
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
|
169 |
+
self_->set_sizes_and_strides(size, stride, c10::make_optional(storage_offset));
|
170 |
+
}
|
171 |
+
|
172 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/TensorFactories.h>
|
5 |
+
#include <ATen/NamedTensorUtils.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
9 |
+
#include <ATen/NativeFunctions.h>
|
10 |
+
#else
|
11 |
+
#include <ATen/ops/empty.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
namespace at::native {
|
15 |
+
|
16 |
+
template <typename T>
|
17 |
+
inline T storage_size_for(ArrayRef<T> size, ArrayRef<T> stride) {
|
18 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(),
|
19 |
+
"storage_size_for(size, stride) requires that size and stride ",
|
20 |
+
"have the same size as a precondition.");
|
21 |
+
T storage_size = 1;
|
22 |
+
for (const auto dim : c10::irange(size.size())) {
|
23 |
+
if (size[dim] == 0) {
|
24 |
+
storage_size = 0;
|
25 |
+
break;
|
26 |
+
}
|
27 |
+
storage_size += (size[dim] - 1) * stride[dim];
|
28 |
+
}
|
29 |
+
return storage_size;
|
30 |
+
}
|
31 |
+
|
32 |
+
inline const Tensor& resize_named_tensor_(
|
33 |
+
const Tensor& self,
|
34 |
+
IntArrayRef size,
|
35 |
+
c10::optional<MemoryFormat> optional_memory_format) {
|
36 |
+
TORCH_INTERNAL_ASSERT(self.has_names());
|
37 |
+
TORCH_CHECK(
|
38 |
+
self.sizes() == size,
|
39 |
+
"Cannot resize named tensor with resize_ or resize_as_ (tried to resize "
|
40 |
+
"Tensor",
|
41 |
+
self.names(),
|
42 |
+
" with size ",
|
43 |
+
self.sizes(),
|
44 |
+
" to ",
|
45 |
+
size,
|
46 |
+
"). This may be caused by passing a named tensor ",
|
47 |
+
"as an `out=` argument; please ensure that the sizes are the same. ");
|
48 |
+
TORCH_CHECK(
|
49 |
+
!optional_memory_format.has_value(),
|
50 |
+
"Unsupported memory format for named tensor resize ",
|
51 |
+
optional_memory_format.value());
|
52 |
+
return self;
|
53 |
+
}
|
54 |
+
|
55 |
+
// For deterministic output, fill new elements that were added after a storage
|
56 |
+
// resize with NaN or MAX_INT. `old_storage_nbytes` is the size of the storage
|
57 |
+
// before the resize happened.
|
58 |
+
inline const Tensor& fill_resize_deterministic_(const Tensor& tensor, int64_t old_storage_nbytes) {
|
59 |
+
const at::Storage& storage = tensor.unsafeGetTensorImpl()->unsafe_storage();
|
60 |
+
int64_t new_storage_nbytes = storage.nbytes();
|
61 |
+
int64_t old_storage_numel = old_storage_nbytes / tensor.itemsize();
|
62 |
+
int64_t new_storage_numel = new_storage_nbytes / tensor.itemsize();
|
63 |
+
if (new_storage_numel > old_storage_numel) {
|
64 |
+
at::Tensor tensor_view = at::empty({}, at::TensorOptions().dtype(tensor.scalar_type()).device(tensor.device()));
|
65 |
+
tensor_view.set_(
|
66 |
+
storage,
|
67 |
+
/*storage_offset=*/old_storage_numel,
|
68 |
+
/*size=*/{new_storage_numel - old_storage_numel},
|
69 |
+
/*stride=*/{1});
|
70 |
+
at::native::fill_empty_deterministic_(tensor_view);
|
71 |
+
}
|
72 |
+
return tensor;
|
73 |
+
}
|
74 |
+
|
75 |
+
} // namespace at::native
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
#include <ATen/native/ReduceOpsUtils.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
namespace at::native {
|
9 |
+
|
10 |
+
namespace {
|
11 |
+
|
12 |
+
// checks whether index.dtype == int64
|
13 |
+
// and self.dtype == src.dtype if src is a Tensor
|
14 |
+
static void scatter_gather_dtype_check(
|
15 |
+
const std::string& method_name,
|
16 |
+
const Tensor& self,
|
17 |
+
const Tensor& index,
|
18 |
+
const c10::optional<Tensor>& src_opt = c10::nullopt
|
19 |
+
) {
|
20 |
+
if (index.numel() != 0) {
|
21 |
+
TORCH_CHECK(
|
22 |
+
index.scalar_type() == at::ScalarType::Long,
|
23 |
+
method_name, "(): Expected dtype int64 for index"
|
24 |
+
);
|
25 |
+
}
|
26 |
+
|
27 |
+
if (src_opt.has_value()) {
|
28 |
+
const auto& src = src_opt.value();
|
29 |
+
TORCH_CHECK(
|
30 |
+
self.scalar_type() == src.scalar_type(),
|
31 |
+
method_name, "(): Expected self.dtype to be equal to src.dtype"
|
32 |
+
);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// Used for `gather`-like methods
|
37 |
+
// Note: self means the input tensor here
|
38 |
+
// Test:
|
39 |
+
// 1. index.size(d) <= self.size(d) for all d != dim
|
40 |
+
// 2. index.dim() == self.dim()
|
41 |
+
static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim,
|
42 |
+
const Tensor& index
|
43 |
+
) {
|
44 |
+
auto self_dims = ensure_nonempty_dim(self.dim());
|
45 |
+
TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()),
|
46 |
+
"Index tensor must have the same number of dimensions as input tensor"
|
47 |
+
);
|
48 |
+
|
49 |
+
for (const auto i : c10::irange(self_dims)) {
|
50 |
+
if (i != dim) {
|
51 |
+
TORCH_CHECK(
|
52 |
+
ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
|
53 |
+
"Size does not match at dimension ", i,
|
54 |
+
" expected index ", index.sizes(),
|
55 |
+
" to be smaller than self ", self.sizes(),
|
56 |
+
" apart from dimension ", dim
|
57 |
+
);
|
58 |
+
}
|
59 |
+
}
|
60 |
+
}
|
61 |
+
|
62 |
+
// Used for `scatter` and `scatter_add`
|
63 |
+
// Tests:
|
64 |
+
// 1. index.size(d) <= self.size(d) for all d != dim
|
65 |
+
// 2. index.size(d) <= src.size(d) for all d if src is a Tensor
|
66 |
+
// 3. index.dim() == self.dim() == src.dim()
|
67 |
+
static C10_UNUSED void scatter_shape_check(
|
68 |
+
const Tensor& self, int64_t dim, const Tensor& index,
|
69 |
+
const c10::optional<Tensor>& src_opt = c10::nullopt
|
70 |
+
) {
|
71 |
+
if (index.numel() == 0) return;
|
72 |
+
TORCH_CHECK(
|
73 |
+
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
|
74 |
+
"Index tensor must have the same number of dimensions as self tensor"
|
75 |
+
);
|
76 |
+
|
77 |
+
bool is_wrong_shape = false;
|
78 |
+
int64_t self_dims = ensure_nonempty_dim(self.dim());
|
79 |
+
|
80 |
+
// Check: index.size(d) <= self.size(d) for all d != dim
|
81 |
+
for (const auto d : c10::irange(self_dims)) {
|
82 |
+
int64_t index_d_size = ensure_nonempty_size(index, d);
|
83 |
+
if (d == dim) continue;
|
84 |
+
if (index_d_size > ensure_nonempty_size(self, d)) {
|
85 |
+
is_wrong_shape = true;
|
86 |
+
break;
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
// Check: index.size(d) <= src.size(d) for all d if src is Tensor
|
91 |
+
if (!is_wrong_shape && src_opt.has_value()) {
|
92 |
+
const auto& src = src_opt.value();
|
93 |
+
for (const auto d : c10::irange(self_dims)) {
|
94 |
+
int64_t index_d_size = ensure_nonempty_size(index, d);
|
95 |
+
if (index_d_size > ensure_nonempty_size(src, d)) {
|
96 |
+
is_wrong_shape = true;
|
97 |
+
break;
|
98 |
+
}
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
if (src_opt.has_value()) {
|
103 |
+
const auto& src = src_opt.value();
|
104 |
+
|
105 |
+
TORCH_CHECK(
|
106 |
+
ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()),
|
107 |
+
"Index tensor must have the same number of dimensions as src tensor"
|
108 |
+
);
|
109 |
+
|
110 |
+
TORCH_CHECK(!is_wrong_shape,
|
111 |
+
"Expected index ", index.sizes(),
|
112 |
+
" to be smaller than self ", self.sizes(),
|
113 |
+
" apart from dimension ", dim,
|
114 |
+
" and to be smaller size than src ", src.sizes()
|
115 |
+
);
|
116 |
+
}
|
117 |
+
else {
|
118 |
+
TORCH_CHECK(!is_wrong_shape,
|
119 |
+
"Expected index ", index.sizes(),
|
120 |
+
" to be smaller than self ", self.sizes(),
|
121 |
+
" apart from dimension ", dim
|
122 |
+
);
|
123 |
+
}
|
124 |
+
}
|
125 |
+
|
126 |
+
} // anonymous namespace
|
127 |
+
|
128 |
+
} // namespace at::native
|