Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h +146 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h +41 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h +9 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h +12 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h +18 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h +3 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h +20 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h +18 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h +623 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MaxPooling.h +97 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h +28 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h +48 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h +173 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/TensorFactories.h +142 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/TensorShape.h +105 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/im2col_shape_check.h +232 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/vol2col.h +109 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_native.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h +44 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward.h +47 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode_native.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_ops.h +28 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_cuda_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token_native.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin.h +44 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_native.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_meta_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_native.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h +127 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lerp.h +53 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h +23 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/miopen/Exceptions.h>
|
4 |
+
|
5 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
6 |
+
#include <ATen/core/Tensor.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
|
9 |
+
namespace at { namespace native {
|
10 |
+
|
11 |
+
inline int dataSize(miopenDataType_t dataType)
|
12 |
+
{
|
13 |
+
switch (dataType) {
|
14 |
+
case miopenHalf: return 2;
|
15 |
+
case miopenFloat: return 4;
|
16 |
+
case miopenBFloat16: return 2;
|
17 |
+
default: return 8;
|
18 |
+
}
|
19 |
+
}
|
20 |
+
|
21 |
+
template <typename T, miopenStatus_t (*dtor)(T*)>
|
22 |
+
struct DescriptorDeleter {
|
23 |
+
void operator()(T* x) {
|
24 |
+
if (x != nullptr) {
|
25 |
+
MIOPEN_CHECK(dtor(x));
|
26 |
+
}
|
27 |
+
}
|
28 |
+
};
|
29 |
+
|
30 |
+
// A generic class for wrapping MIOpen descriptor types. All you need
|
31 |
+
// is to give the underlying type the Descriptor_t points to (usually,
|
32 |
+
// if it's miopenTensorDescriptor_t it points to miopenTensorStruct),
|
33 |
+
// the constructor and the destructor. Subclasses are responsible
|
34 |
+
// for defining a set() function to actually set the descriptor.
|
35 |
+
//
|
36 |
+
// Descriptors default construct to a nullptr, and have a descriptor
|
37 |
+
// initialized the first time you call set() or any other initializing
|
38 |
+
// function.
|
39 |
+
template <typename T, miopenStatus_t (*ctor)(T**), miopenStatus_t (*dtor)(T*)>
|
40 |
+
class Descriptor
|
41 |
+
{
|
42 |
+
public:
|
43 |
+
// Use desc() to access the underlying descriptor pointer in
|
44 |
+
// a read-only fashion. Most client code should use this.
|
45 |
+
// If the descriptor was never initialized, this will return
|
46 |
+
// nullptr.
|
47 |
+
T* desc() const { return desc_.get(); }
|
48 |
+
T* desc() { return desc_.get(); }
|
49 |
+
|
50 |
+
// Use mut_desc() to access the underlying descriptor pointer
|
51 |
+
// if you intend to modify what it points to (e.g., using
|
52 |
+
// miopenSetFooDescriptor). This will ensure that the descriptor
|
53 |
+
// is initialized. Code in this file will use this function.
|
54 |
+
T* mut_desc() { init(); return desc_.get(); }
|
55 |
+
protected:
|
56 |
+
void init() {
|
57 |
+
if (desc_ == nullptr) {
|
58 |
+
T* raw_desc;
|
59 |
+
MIOPEN_CHECK(ctor(&raw_desc));
|
60 |
+
desc_.reset(raw_desc);
|
61 |
+
}
|
62 |
+
}
|
63 |
+
private:
|
64 |
+
std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
|
65 |
+
};
|
66 |
+
|
67 |
+
class TensorDescriptor
|
68 |
+
: public Descriptor<miopenTensorDescriptor,
|
69 |
+
&miopenCreateTensorDescriptor,
|
70 |
+
&miopenDestroyTensorDescriptor>
|
71 |
+
{
|
72 |
+
public:
|
73 |
+
TensorDescriptor() {}
|
74 |
+
explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
|
75 |
+
set(t, pad);
|
76 |
+
}
|
77 |
+
|
78 |
+
void set(const at::Tensor &t, size_t pad = 0);
|
79 |
+
void set(miopenDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
80 |
+
|
81 |
+
void print();
|
82 |
+
|
83 |
+
private:
|
84 |
+
void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
|
85 |
+
MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
|
86 |
+
}
|
87 |
+
};
|
88 |
+
|
89 |
+
std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
|
90 |
+
|
91 |
+
class FilterDescriptor
|
92 |
+
: public Descriptor<miopenTensorDescriptor,
|
93 |
+
&miopenCreateTensorDescriptor,
|
94 |
+
&miopenDestroyTensorDescriptor>
|
95 |
+
{
|
96 |
+
public:
|
97 |
+
void set(const at::Tensor &t, int64_t pad = 0) {
|
98 |
+
set(t, at::MemoryFormat::Contiguous, pad);
|
99 |
+
}
|
100 |
+
|
101 |
+
void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
|
102 |
+
|
103 |
+
private:
|
104 |
+
void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
|
105 |
+
MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
|
106 |
+
}
|
107 |
+
};
|
108 |
+
|
109 |
+
struct ConvolutionDescriptor
|
110 |
+
: public Descriptor<miopenConvolutionDescriptor,
|
111 |
+
&miopenCreateConvolutionDescriptor,
|
112 |
+
&miopenDestroyConvolutionDescriptor>
|
113 |
+
{
|
114 |
+
void set(miopenDataType_t dataType, miopenConvolutionMode_t c_mode, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool deterministic) {
|
115 |
+
MIOPEN_CHECK(miopenInitConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, c_mode));
|
116 |
+
MIOPEN_CHECK(miopenSetConvolutionGroupCount(mut_desc(), groups));
|
117 |
+
MIOPEN_CHECK(miopenSetConvolutionAttribute(mut_desc(), MIOPEN_CONVOLUTION_ATTRIB_DETERMINISTIC, deterministic ? 1 : 0));
|
118 |
+
}
|
119 |
+
};
|
120 |
+
|
121 |
+
|
122 |
+
struct RNNDescriptor
|
123 |
+
: public Descriptor<miopenRNNDescriptor,
|
124 |
+
&miopenCreateRNNDescriptor,
|
125 |
+
&miopenDestroyRNNDescriptor>
|
126 |
+
{
|
127 |
+
void set(int64_t hidden_size, int64_t num_layers, miopenRNNInputMode_t input_mode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnn_mode,
|
128 |
+
miopenRNNBiasMode_t bias_mode, miopenRNNAlgo_t algorithm, miopenDataType_t datatype) {
|
129 |
+
MIOPEN_CHECK(miopenSetRNNDescriptor(mut_desc(), hidden_size, num_layers, input_mode, direction, rnn_mode, bias_mode, algorithm, datatype));
|
130 |
+
}
|
131 |
+
};
|
132 |
+
|
133 |
+
union Constant
|
134 |
+
{
|
135 |
+
float f;
|
136 |
+
double d;
|
137 |
+
Constant(miopenDataType_t dataType, double value) {
|
138 |
+
if (dataType == miopenHalf || dataType == miopenFloat || dataType == miopenBFloat16) {
|
139 |
+
f = static_cast<float>(value);
|
140 |
+
} else {
|
141 |
+
d = value;
|
142 |
+
}
|
143 |
+
}
|
144 |
+
};
|
145 |
+
|
146 |
+
}} // namespace
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
4 |
+
#include <string>
|
5 |
+
#include <stdexcept>
|
6 |
+
#include <sstream>
|
7 |
+
|
8 |
+
namespace at { namespace native {
|
9 |
+
|
10 |
+
class miopen_exception : public std::runtime_error {
|
11 |
+
public:
|
12 |
+
miopenStatus_t status;
|
13 |
+
miopen_exception(miopenStatus_t status, const char* msg)
|
14 |
+
: std::runtime_error(msg)
|
15 |
+
, status(status) {}
|
16 |
+
miopen_exception(miopenStatus_t status, const std::string& msg)
|
17 |
+
: std::runtime_error(msg)
|
18 |
+
, status(status) {}
|
19 |
+
};
|
20 |
+
|
21 |
+
inline void MIOPEN_CHECK(miopenStatus_t status)
|
22 |
+
{
|
23 |
+
if (status != miopenStatusSuccess) {
|
24 |
+
if (status == miopenStatusNotImplemented) {
|
25 |
+
throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
|
26 |
+
". This error may appear if you passed in a non-contiguous input.");
|
27 |
+
}
|
28 |
+
throw miopen_exception(status, miopenGetErrorString(status));
|
29 |
+
}
|
30 |
+
}
|
31 |
+
|
32 |
+
inline void HIP_CHECK(hipError_t error)
|
33 |
+
{
|
34 |
+
if (error != hipSuccess) {
|
35 |
+
std::string msg("HIP error: ");
|
36 |
+
msg += hipGetErrorString(error);
|
37 |
+
throw std::runtime_error(msg);
|
38 |
+
}
|
39 |
+
}
|
40 |
+
|
41 |
+
}} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
4 |
+
|
5 |
+
namespace at { namespace native {
|
6 |
+
|
7 |
+
miopenHandle_t getMiopenHandle();
|
8 |
+
|
9 |
+
}} // namespace
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
4 |
+
#include <ATen/Tensor.h>
|
5 |
+
|
6 |
+
namespace at { namespace native {
|
7 |
+
|
8 |
+
miopenDataType_t getMiopenDataType(const at::Tensor& tensor);
|
9 |
+
|
10 |
+
int64_t miopen_version();
|
11 |
+
|
12 |
+
}} // namespace at::miopen
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
5 |
+
#include <ATen/miopen/Handle.h>
|
6 |
+
|
7 |
+
namespace at { namespace native {
|
8 |
+
|
9 |
+
// This function makes tensors which have zero stride contiguous, by
|
10 |
+
// setting the strides to 1.
|
11 |
+
inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
|
12 |
+
for (auto s : t.strides()) {
|
13 |
+
if (s == 0) return t.contiguous();
|
14 |
+
}
|
15 |
+
return t;
|
16 |
+
}
|
17 |
+
|
18 |
+
}}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <miopen/miopen.h>
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
class Tensor;
|
7 |
+
|
8 |
+
namespace native {
|
9 |
+
|
10 |
+
using pdist_forward_fn = void(*)(Tensor&, const Tensor&, const double p);
|
11 |
+
using pdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
|
12 |
+
using cdist_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p);
|
13 |
+
using cdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
|
14 |
+
|
15 |
+
DECLARE_DISPATCH(pdist_forward_fn, pdist_forward_stub);
|
16 |
+
DECLARE_DISPATCH(pdist_backward_fn, pdist_backward_stub);
|
17 |
+
DECLARE_DISPATCH(cdist_fn, cdist_stub);
|
18 |
+
DECLARE_DISPATCH(cdist_backward_fn, cdist_backward_stub);
|
19 |
+
|
20 |
+
}} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
#include <c10/util/Optional.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
class Scalar;
|
8 |
+
}
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
struct TensorIterator;
|
12 |
+
}
|
13 |
+
|
14 |
+
namespace at::native {
|
15 |
+
|
16 |
+
using addr_fn = void (*)(TensorIterator &, const Scalar& beta, const Scalar& alpha);
|
17 |
+
DECLARE_DISPATCH(addr_fn, addr_stub);
|
18 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h
ADDED
@@ -0,0 +1,623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/ScalarType.h>
|
4 |
+
#include <c10/util/irange.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/strides.h>
|
7 |
+
#include <ATen/core/Tensor.h>
|
8 |
+
#include <ATen/ExpandUtils.h>
|
9 |
+
#include <ATen/TensorUtils.h>
|
10 |
+
#include <ATen/native/TensorIterator.h>
|
11 |
+
#include <ATen/native/TransposeType.h>
|
12 |
+
#include <limits>
|
13 |
+
#include <type_traits>
|
14 |
+
#include <sstream>
|
15 |
+
#include <cstring>
|
16 |
+
#include <cctype>
|
17 |
+
|
18 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
19 |
+
#include <ATen/Functions.h>
|
20 |
+
#else
|
21 |
+
#include <ATen/ops/arange.h>
|
22 |
+
#include <ATen/ops/empty.h>
|
23 |
+
#include <ATen/ops/empty_like.h>
|
24 |
+
#include <ATen/ops/empty_strided.h>
|
25 |
+
#include <ATen/ops/zeros.h>
|
26 |
+
#endif
|
27 |
+
|
28 |
+
namespace at::native {
|
29 |
+
|
30 |
+
static inline c10::MaybeOwned<Tensor> expect_resolved_conj(const Tensor& tensor) {
|
31 |
+
if (tensor.is_conj()) {
|
32 |
+
return c10::MaybeOwned<Tensor>::owned(tensor.resolve_conj());
|
33 |
+
} else {
|
34 |
+
return c10::MaybeOwned<Tensor>::borrowed(tensor);
|
35 |
+
}
|
36 |
+
}
|
37 |
+
|
38 |
+
static inline DimVector batched_matrix_contiguous_strides(
|
39 |
+
const IntArrayRef sizes,
|
40 |
+
const bool f_contig = false) {
|
41 |
+
// f_contig chooses between the strides of a batch of Fortran (F-contiguous)
|
42 |
+
// and C-contiguous matrices
|
43 |
+
auto strides = c10::contiguous_strides(sizes);
|
44 |
+
auto dim = strides.size();
|
45 |
+
|
46 |
+
if (f_contig && dim >= 2) {
|
47 |
+
// Fix the strides of the last two dimensions, so that we return
|
48 |
+
// C-contiguous batches of F-contiguous matrices.
|
49 |
+
strides[dim - 1] = std::max(sizes[dim - 2], static_cast<int64_t>(1));
|
50 |
+
strides[dim - 2] = 1;
|
51 |
+
}
|
52 |
+
return strides;
|
53 |
+
}
|
54 |
+
|
55 |
+
/*
|
56 |
+
* Clones a Tensor so that the following conditions hold:
|
57 |
+
* If we think of a Tensor of having size (B, M, N), where B is any number
|
58 |
+
* of batch dimensions, then:
|
59 |
+
* - Each (M, N) matrix is in column major form
|
60 |
+
* - Let Tensor P have size (B, M, N) and Q have size (B, M', N').
|
61 |
+
* Then when laid out in memory, the M by N matrix starting at
|
62 |
+
* P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N'
|
63 |
+
* matrix starting at Q.data_ptr()[B * M' * N'].
|
64 |
+
*/
|
65 |
+
static inline Tensor cloneBatchedColumnMajor(const Tensor& src) {
|
66 |
+
// If src is already in batched column major format, then
|
67 |
+
// this will be efficient (no reordering of the data will occur)
|
68 |
+
// because the first transpose will make the tensor contiguous,
|
69 |
+
// and cloning a contiguous tensor is fast.
|
70 |
+
auto result = src.mT().clone(at::MemoryFormat::Contiguous);
|
71 |
+
result.transpose_(-2, -1);
|
72 |
+
return result;
|
73 |
+
}
|
74 |
+
|
75 |
+
/*
|
76 |
+
* contig chooses between C-contig (true) and F-contig (false)
|
77 |
+
*/
|
78 |
+
static inline c10::MaybeOwned<Tensor> borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) {
|
79 |
+
return cond ? c10::MaybeOwned<Tensor>::borrowed(borrow)
|
80 |
+
: c10::MaybeOwned<Tensor>::owned(contig ? clone.clone(MemoryFormat::Contiguous)
|
81 |
+
: cloneBatchedColumnMajor(clone));
|
82 |
+
}
|
83 |
+
|
84 |
+
/*
|
85 |
+
* This method is designed to be a faster alternative to
|
86 |
+
* `cloneBatchedColumnMajor` with some additional features,
|
87 |
+
* namely:
|
88 |
+
* 1. It uses `copy` instead of `clone` which could be much faster.
|
89 |
+
* 2. `nrows` parameter used to create inputs with the number of rows larger
|
90 |
+
* than the original input, which is required for some LAPACK/MAGMA methods.
|
91 |
+
* 3. `desired_batch_size` is used to create copies with the batch size
|
92 |
+
* which is either the original batch size of the input, or its larger
|
93 |
+
* broadcasted shape.
|
94 |
+
*/
|
95 |
+
static inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1,
|
96 |
+
at::OptionalIntArrayRef desired_batch_sizes = c10::nullopt) {
|
97 |
+
nrows = (nrows == -1) ? src.size(-2) : nrows;
|
98 |
+
auto copy_sizes = desired_batch_sizes.has_value()
|
99 |
+
? desired_batch_sizes.value().vec()
|
100 |
+
: IntArrayRef(src.sizes().data(), src.dim() - 2).vec();
|
101 |
+
copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)});
|
102 |
+
const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true);
|
103 |
+
auto copy = at::empty_strided(copy_sizes, copy_strides, src.options());
|
104 |
+
copy.narrow(-2, 0, src.size(-2)).copy_(src);
|
105 |
+
return copy;
|
106 |
+
}
|
107 |
+
|
108 |
+
/*
|
109 |
+
* Given batches of matrices with arbitrary batch dim,
|
110 |
+
* computes the number of batches.
|
111 |
+
*/
|
112 |
+
static inline int64_t batchCount(const Tensor& batched_matrices) {
|
113 |
+
int64_t result = 1;
|
114 |
+
for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
|
115 |
+
result *= batched_matrices.size(i);
|
116 |
+
}
|
117 |
+
return result;
|
118 |
+
}
|
119 |
+
|
120 |
+
// Computes the number of elements of a matrix in a batched matrix tensor
|
121 |
+
static inline int64_t matrixStride(const Tensor& batched_matrices) {
|
122 |
+
return batched_matrices.size(-1) * batched_matrices.size(-2);
|
123 |
+
}
|
124 |
+
|
125 |
+
// Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig)
|
126 |
+
static inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") {
|
127 |
+
TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions.");
|
128 |
+
}
|
129 |
+
static inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") {
|
130 |
+
checkIsMatrix(self, f_name, arg_name);
|
131 |
+
TORCH_CHECK(self.sym_size(-1) == self.sym_size(-2),
|
132 |
+
f_name,
|
133 |
+
": ", arg_name, " must be batches of square matrices, "
|
134 |
+
"but they are ", self.sym_size(-2), " by ", self.sym_size(-1), " matrices");
|
135 |
+
}
|
136 |
+
|
137 |
+
static inline void checkInputsSolver(const Tensor& A,
|
138 |
+
const Tensor& B,
|
139 |
+
const bool left,
|
140 |
+
const char* const f_name) {
|
141 |
+
squareCheckInputs(A, f_name, "A");
|
142 |
+
checkIsMatrix(B, f_name, "B");
|
143 |
+
TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1),
|
144 |
+
f_name, ": Incompatible shapes of A and B for the equation ",
|
145 |
+
left ? "AX = B" : "XA = B",
|
146 |
+
" (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")");
|
147 |
+
}
|
148 |
+
|
149 |
+
static inline bool is_row_or_column_contiguous(const Tensor& t) {
|
150 |
+
// This could be made more general, similar to how it's checked in matmul, which would allow to
|
151 |
+
// ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky.
|
152 |
+
// We choose to be conservative for simplicity
|
153 |
+
return t.is_contiguous() || t.transpose(-2, -1).is_contiguous();
|
154 |
+
}
|
155 |
+
|
156 |
+
static inline TransposeType to_transpose_type(const bool contig, const bool conj) {
|
157 |
+
if (conj) {
|
158 |
+
if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); }
|
159 |
+
else { return TransposeType::ConjTranspose; }
|
160 |
+
} else {
|
161 |
+
if (contig) { return TransposeType::NoTranspose; }
|
162 |
+
else { return TransposeType::Transpose; }
|
163 |
+
}
|
164 |
+
}
|
165 |
+
|
166 |
+
|
167 |
+
// This function is designed to be used with linear algebra methods that minimize
|
168 |
+
// L(ax - b) = 0, where L is generally the identity map (`solve`, for example)
|
169 |
+
// or the L2 norm (`lstsq`).
|
170 |
+
// It is expected that `a` and `b` are contiguous tensors of column-major matrices
|
171 |
+
// (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`),
|
172 |
+
// with the following additional properties:
|
173 |
+
//
|
174 |
+
// 1. a.dim() == b.dim()
|
175 |
+
// 2. a.shape[:-2] broadcasts over b.shape[:-2]
|
176 |
+
// 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions)
|
177 |
+
//
|
178 |
+
// MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method
|
179 |
+
// is to be memory efficient, which means that if there exists an index i such that
|
180 |
+
// a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3,
|
181 |
+
// then instead of materializing copies of `a` in the broadcasted shape, we keep
|
182 |
+
// a buffer copy of `a` along with flags that check whether specific batch dimension
|
183 |
+
// indices for `a` were already accessed. If they were, we copy the data from the buffer
|
184 |
+
// into `a`. The number of copies does not exceed
|
185 |
+
// prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1)
|
186 |
+
// and this value is attained by tensors with non-empty batch dimensions.
|
187 |
+
//
|
188 |
+
// func_t `f` is a callable that is being supplied with
|
189 |
+
// scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx.
|
190 |
+
// a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines,
|
191 |
+
// and a_linear_batch_idx is an index in the 3d representation which corresponds to
|
192 |
+
// the memory a_working_ptr points to, in other words:
|
193 |
+
// a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr<scalar_t>();
|
194 |
+
// a_linear_batch_idx is useful to store metadata related to `a`, such as, for example,
|
195 |
+
// its rank or singular values (see linalg_lstsq).
|
196 |
+
template<typename scalar_t, typename func_t>
|
197 |
+
void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) {
|
198 |
+
IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2);
|
199 |
+
IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2);
|
200 |
+
|
201 |
+
auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes);
|
202 |
+
auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes);
|
203 |
+
|
204 |
+
TensorIterator iter = TensorIteratorConfig()
|
205 |
+
.set_check_mem_overlap(false)
|
206 |
+
.check_all_same_dtype(false)
|
207 |
+
.resize_outputs(false)
|
208 |
+
.add_output(b_linear_batch_idx)
|
209 |
+
.add_input(a_linear_batch_idx)
|
210 |
+
.build();
|
211 |
+
|
212 |
+
auto m = a.size(-2);
|
213 |
+
auto n = a.size(-1);
|
214 |
+
auto a_3d = a.view({batchCount(a), m, n});
|
215 |
+
auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)});
|
216 |
+
|
217 |
+
auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes);
|
218 |
+
Tensor a_buffer, a_was_accessed, a_buffer_3d;
|
219 |
+
std::function<void(int64_t)> check_if_copy_needed_for_a
|
220 |
+
= [](int64_t /*a_curr_linear_batch_idx*/){};
|
221 |
+
if (a_broadcasts_over_b) {
|
222 |
+
a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options())
|
223 |
+
.copy_(a);
|
224 |
+
a_was_accessed = at::zeros(batchCount(a), at::kBool);
|
225 |
+
a_buffer_3d = a_buffer.view({batchCount(a), m, n});
|
226 |
+
check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) {
|
227 |
+
auto* a_was_accessed_flag = a_was_accessed
|
228 |
+
.select(0, a_curr_linear_batch_idx)
|
229 |
+
.data_ptr<bool>();
|
230 |
+
if (!(*a_was_accessed_flag)) {
|
231 |
+
*a_was_accessed_flag = true;
|
232 |
+
}
|
233 |
+
else {
|
234 |
+
a_3d.select(0, a_curr_linear_batch_idx)
|
235 |
+
.copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx));
|
236 |
+
}
|
237 |
+
};
|
238 |
+
}
|
239 |
+
|
240 |
+
auto loop = [&](char** data, const int64_t* strides, int64_t nelems) {
|
241 |
+
auto* b_batch_idx_ptr = data[0];
|
242 |
+
auto* a_batch_idx_ptr = data[1];
|
243 |
+
|
244 |
+
for (const auto elem C10_UNUSED : c10::irange(nelems)) {
|
245 |
+
auto b_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(b_batch_idx_ptr);
|
246 |
+
auto a_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(a_batch_idx_ptr);
|
247 |
+
|
248 |
+
check_if_copy_needed_for_a(a_curr_linear_batch_idx);
|
249 |
+
|
250 |
+
auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx)
|
251 |
+
.data_ptr<scalar_t>();
|
252 |
+
auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx)
|
253 |
+
.data_ptr<scalar_t>();
|
254 |
+
f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx);
|
255 |
+
|
256 |
+
b_batch_idx_ptr += strides[0];
|
257 |
+
a_batch_idx_ptr += strides[1];
|
258 |
+
}
|
259 |
+
};
|
260 |
+
iter.serial_for_each(loop, {0, batchCount(b)});
|
261 |
+
}
|
262 |
+
|
263 |
+
// Returns the epsilon value for floating types except half
|
264 |
+
static inline double _get_epsilon(const ScalarType& sc_type) {
|
265 |
+
switch (sc_type) {
|
266 |
+
case at::ScalarType::Float:
|
267 |
+
return static_cast<double>(std::numeric_limits<float>::epsilon());
|
268 |
+
case at::ScalarType::Double:
|
269 |
+
return std::numeric_limits<double>::epsilon();
|
270 |
+
default:
|
271 |
+
AT_ERROR("This function doesn't handle types other than float and double");
|
272 |
+
}
|
273 |
+
}
|
274 |
+
|
275 |
+
// Validates input shapes and devices
|
276 |
+
// for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
|
277 |
+
static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) {
|
278 |
+
TORCH_CHECK(self.device() == A.device(),
|
279 |
+
"Expected b and A to be on the same device, but found b on ",
|
280 |
+
self.device(), " and A on ", A.device(), " instead.");
|
281 |
+
|
282 |
+
TORCH_CHECK(self.scalar_type() == A.scalar_type(),
|
283 |
+
"Expected b and A to have the same dtype, but found b of type ",
|
284 |
+
self.scalar_type(), " and A of type ", A.scalar_type(), " instead.");
|
285 |
+
|
286 |
+
TORCH_CHECK(A.size(-1) == A.size(-2),
|
287 |
+
"A must be batches of square matrices, "
|
288 |
+
"but they are ", A.size(-2), " by ", A.size(-1), " matrices");
|
289 |
+
|
290 |
+
TORCH_CHECK(A.size(-1) == self.size(-2),
|
291 |
+
"Incompatible matrix sizes for ", name, ": each A "
|
292 |
+
"matrix is ", A.size(-1), " by ", A.size(-1),
|
293 |
+
" but each b matrix is ", self.size(-2), " by ", self.size(-1));
|
294 |
+
}
|
295 |
+
|
296 |
+
static inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) {
|
297 |
+
auto dtype = t.scalar_type();
|
298 |
+
TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)),
|
299 |
+
f_name, ": Expected a floating point or complex tensor as input. Got ", dtype);
|
300 |
+
if (!allow_low_precision_dtypes) {
|
301 |
+
TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble,
|
302 |
+
f_name, ": Low precision dtypes not supported. Got ", dtype);
|
303 |
+
}
|
304 |
+
}
|
305 |
+
|
306 |
+
|
307 |
+
// Checks if all the Tensors in a TensorList are of the same dimensions
|
308 |
+
static inline void checkAllSameDim(TensorList tensors, int64_t dim) {
|
309 |
+
for (auto &t : tensors) {
|
310 |
+
TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead.");
|
311 |
+
}
|
312 |
+
}
|
313 |
+
|
314 |
+
static inline std::tuple<std::vector<int64_t>, std::vector<int64_t>> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) {
|
315 |
+
// broadcast the batch dimensions of arg1 and arg2.
|
316 |
+
IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
|
317 |
+
IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
|
318 |
+
std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
|
319 |
+
|
320 |
+
std::vector<int64_t> arg1_expand_size({expand_batch_portion});
|
321 |
+
arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) });
|
322 |
+
|
323 |
+
std::vector<int64_t> arg2_expand_size({expand_batch_portion});
|
324 |
+
arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) });
|
325 |
+
return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size));
|
326 |
+
}
|
327 |
+
|
328 |
+
static inline std::tuple<Tensor,Tensor> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) {
|
329 |
+
// If there's no name we assume we don't want to check the errors
|
330 |
+
if (name != nullptr) {
|
331 |
+
linearSolveCheckInputs(arg1, arg2, name);
|
332 |
+
}
|
333 |
+
|
334 |
+
auto [arg1_expand_size, arg2_expand_size] = at::native::_linalg_broadcast_batch_dims(arg1, arg2);
|
335 |
+
|
336 |
+
auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size);
|
337 |
+
auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size);
|
338 |
+
return std::make_tuple(arg1_broadcasted, arg2_broadcasted);
|
339 |
+
}
|
340 |
+
|
341 |
+
static inline std::vector<int64_t> broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) {
|
342 |
+
IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims);
|
343 |
+
IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims);
|
344 |
+
auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes);
|
345 |
+
return broadcasted_batch_sizes;
|
346 |
+
}
|
347 |
+
|
348 |
+
// Return a permutation with the given axes moved to the end.
|
349 |
+
static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
|
350 |
+
const std::vector<int64_t> a = axes.vec();
|
351 |
+
const int64_t ndim = self.ndimension();
|
352 |
+
std::vector<int64_t> perm;
|
353 |
+
|
354 |
+
for (const auto i : c10::irange(ndim)) {
|
355 |
+
auto it = std::find(a.begin(), a.end(), i);
|
356 |
+
if (it == a.end()) {
|
357 |
+
perm.push_back(i);
|
358 |
+
}
|
359 |
+
}
|
360 |
+
for (auto i : a) {
|
361 |
+
perm.push_back(i);
|
362 |
+
}
|
363 |
+
|
364 |
+
TORCH_CHECK((int64_t)perm.size() == ndim,
|
365 |
+
"duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
|
366 |
+
|
367 |
+
return self.permute(perm);
|
368 |
+
}
|
369 |
+
|
370 |
+
// parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced)
|
371 |
+
static inline std::tuple<bool, bool> _parse_qr_mode(c10::string_view mode) {
|
372 |
+
bool compute_q;
|
373 |
+
bool reduced;
|
374 |
+
if (mode == "reduced") {
|
375 |
+
compute_q = true;
|
376 |
+
reduced = true;
|
377 |
+
} else if (mode == "complete") {
|
378 |
+
compute_q = true;
|
379 |
+
reduced = false;
|
380 |
+
} else if (mode == "r") {
|
381 |
+
compute_q = false;
|
382 |
+
reduced = true; // this is actually irrelevant in this mode
|
383 |
+
} else {
|
384 |
+
TORCH_CHECK(false, "qr received unrecognized mode '", mode,
|
385 |
+
"' but expected one of 'reduced' (default), 'r', or 'complete'");
|
386 |
+
}
|
387 |
+
return std::make_tuple(compute_q, reduced);
|
388 |
+
}
|
389 |
+
|
390 |
+
// Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition
|
391 |
+
static inline std::tuple<DimVector, DimVector, int64_t> _compute_geometry_for_Q(
|
392 |
+
const Tensor& input,
|
393 |
+
bool reduced) {
|
394 |
+
int64_t m = input.size(-2), n = input.size(-1);
|
395 |
+
int64_t n_columns_q;
|
396 |
+
|
397 |
+
// We need to compute the required size of Q based on the `reduced` option
|
398 |
+
DimVector q_sizes(input.sizes());
|
399 |
+
if (!reduced && m > n) {
|
400 |
+
q_sizes[input.dim() - 1] = m;
|
401 |
+
n_columns_q = m;
|
402 |
+
} else {
|
403 |
+
q_sizes[input.dim() - 1] = n;
|
404 |
+
n_columns_q = std::min(m, n);
|
405 |
+
}
|
406 |
+
auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true);
|
407 |
+
return std::make_tuple(q_sizes, q_strides, n_columns_q);
|
408 |
+
}
|
409 |
+
|
410 |
+
static inline bool svd_uses_cusolver(const Tensor& A) {
|
411 |
+
// if cusolver is available, it is used unconditionally
|
412 |
+
return A.is_cuda()
|
413 |
+
&& at::globalContext().hasCuSOLVER()
|
414 |
+
&& at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma;
|
415 |
+
}
|
416 |
+
|
417 |
+
|
418 |
+
// Function used instead of .to so that the original strides are retained
|
419 |
+
// .to doesn't retain strides and make the output tensor contiguous
|
420 |
+
static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) {
|
421 |
+
auto strided_to = at::empty_strided(original_tensor.sizes(),
|
422 |
+
original_tensor.strides(),
|
423 |
+
options);
|
424 |
+
strided_to.copy_(original_tensor);
|
425 |
+
return strided_to;
|
426 |
+
}
|
427 |
+
|
428 |
+
// Creates a dimension permutation array that can be given to `at::permute()`, which will shift
|
429 |
+
// the two specified dimensions to the end of a tensor, without changing the order of
|
430 |
+
// the other dimensions. `dim1` will be placed at the very end, and `dim0` will be
|
431 |
+
// placed just to the left of it.
|
432 |
+
//
|
433 |
+
// For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by
|
434 |
+
// calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will
|
435 |
+
// be `vec(0, 2, 1, 3)`.
|
436 |
+
static inline std::vector<int64_t> create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) {
|
437 |
+
TORCH_CHECK(
|
438 |
+
(dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0),
|
439 |
+
"duplicate or invalid dimensions");
|
440 |
+
std::vector<int64_t> permutation(ndim);
|
441 |
+
int64_t cur_permuted_dim = 0;
|
442 |
+
for (const auto dim_ind : c10::irange(ndim)) {
|
443 |
+
if ((dim_ind != dim0) && (dim_ind != dim1)) {
|
444 |
+
permutation[cur_permuted_dim++] = dim_ind;
|
445 |
+
}
|
446 |
+
}
|
447 |
+
permutation[cur_permuted_dim++] = dim0;
|
448 |
+
permutation[cur_permuted_dim] = dim1;
|
449 |
+
return permutation;
|
450 |
+
}
|
451 |
+
|
452 |
+
// Creates a dimension permutation array that can be given to `at::permute()`, which
|
453 |
+
// will reverse a given permutation.
|
454 |
+
// The reverse permutation array is created by swapping the indices and their
|
455 |
+
// associated values from the given permutation array.
|
456 |
+
static inline std::vector<int64_t> create_reverse_permutation(std::vector<int64_t> permutation) {
|
457 |
+
int64_t ndim = permutation.size();
|
458 |
+
std::vector<int64_t> reverse_permutation(ndim);
|
459 |
+
for (const auto dim_ind : c10::irange(ndim)) {
|
460 |
+
reverse_permutation[permutation[dim_ind]] = dim_ind;
|
461 |
+
}
|
462 |
+
return reverse_permutation;
|
463 |
+
}
|
464 |
+
|
465 |
+
// Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd
|
466 |
+
// See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186
|
467 |
+
static inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) {
|
468 |
+
auto mn = std::min(m, n);
|
469 |
+
auto mx = std::max(m, n);
|
470 |
+
if (jobz == 'N') {
|
471 |
+
#ifdef __APPLE__
|
472 |
+
// According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1
|
473 |
+
return 7 * mn;
|
474 |
+
#else
|
475 |
+
// These setting is valid for on LAPACK 3.6+
|
476 |
+
return 5 * mn;
|
477 |
+
#endif
|
478 |
+
}
|
479 |
+
if (mx > 10 * mn) {
|
480 |
+
return 5 * mn * mn + 5 * mn;
|
481 |
+
}
|
482 |
+
return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn);
|
483 |
+
}
|
484 |
+
|
485 |
+
// This function checks whether the uplo argument input is valid
|
486 |
+
// Allowed strings are "u", "U", "l", "L"
|
487 |
+
static inline void checkUplo(const c10::string_view uplo) {
|
488 |
+
// To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char
|
489 |
+
char uplo_uppercase = static_cast<char>(std::toupper(static_cast<unsigned char>(uplo[0])));
|
490 |
+
TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'),
|
491 |
+
"Expected UPLO argument to be 'L' or 'U', but got ", uplo);
|
492 |
+
}
|
493 |
+
|
494 |
+
static inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
|
495 |
+
TORCH_CHECK(
|
496 |
+
result.device() == input.device(),
|
497 |
+
fn_name,
|
498 |
+
": Expected ", result_name, " and input tensors to be on the same device, but got ",
|
499 |
+
result_name, " on ", result.device(), " and input on ", input.device());
|
500 |
+
}
|
501 |
+
|
502 |
+
// Check the dtype of result and input tensors (for _out variants).
|
503 |
+
// Most linear algebra functions have the same dtype for input and output
|
504 |
+
// (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype.
|
505 |
+
// According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
|
506 |
+
// c10::canCast is used for checking the "safe copy" dtype requirements.
|
507 |
+
static inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
|
508 |
+
bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type());
|
509 |
+
TORCH_CHECK(
|
510 |
+
can_cast,
|
511 |
+
fn_name,
|
512 |
+
": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ",
|
513 |
+
result_name, " with dtype ", result.scalar_type());
|
514 |
+
}
|
515 |
+
|
516 |
+
// Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type)
|
517 |
+
static inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") {
|
518 |
+
bool can_cast = c10::canCast(result_type, out_type);
|
519 |
+
TORCH_CHECK(
|
520 |
+
can_cast,
|
521 |
+
fn_name,
|
522 |
+
": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ",
|
523 |
+
out_name, " with dtype ", out_type);
|
524 |
+
}
|
525 |
+
|
526 |
+
static inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) {
|
527 |
+
TORCH_CHECK(!at::isComplexType(tol.scalar_type()),
|
528 |
+
f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type());
|
529 |
+
}
|
530 |
+
|
531 |
+
/*
|
532 |
+
Two types of 'other' tensors are supported when solving
|
533 |
+
a system of linear equations matmul(input, x) = other:
|
534 |
+
* 1-dimensional (1D) tensor or batch of 1D tensors (vector case)
|
535 |
+
* 2-dimensional (2D) tensor or batch of 2D tensors (matrix case).
|
536 |
+
The original torch.solve supported only the matrix case, while NumPy works for both cases.
|
537 |
+
For the batched input we need to be able to distinguish them.
|
538 |
+
Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m).
|
539 |
+
This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389
|
540 |
+
*/
|
541 |
+
static inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) {
|
542 |
+
auto expected_batched_rhs_shape = SymIntArrayRef(input.sym_sizes().data(), input.dim() - 1); // input.shape[:-1]
|
543 |
+
bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sym_sizes().equals(expected_batched_rhs_shape));
|
544 |
+
return vector_case;
|
545 |
+
}
|
546 |
+
|
547 |
+
/*
|
548 |
+
Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor.
|
549 |
+
*/
|
550 |
+
static inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) {
|
551 |
+
TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU);
|
552 |
+
return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous();
|
553 |
+
}
|
554 |
+
|
555 |
+
class BroadcastLinearIndices {
|
556 |
+
private:
|
557 |
+
Tensor linear_indices_;
|
558 |
+
bool is_broadcasting_;
|
559 |
+
|
560 |
+
public:
|
561 |
+
BroadcastLinearIndices(
|
562 |
+
int64_t numel,
|
563 |
+
IntArrayRef original_shape,
|
564 |
+
IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) {
|
565 |
+
// The assumption is that the broadcast_shape is a materialized broadcast
|
566 |
+
// shape of the original_shape. We need to compute the linear indices
|
567 |
+
// compatible with the original_shape to access the elements in the original
|
568 |
+
// tensor corresponding to the broadcast tensor.
|
569 |
+
if (is_broadcasting_) {
|
570 |
+
linear_indices_ =
|
571 |
+
get_linear_indices(numel, original_shape, broadcast_shape);
|
572 |
+
}
|
573 |
+
}
|
574 |
+
int64_t operator()(int64_t broadcast_linear_index) {
|
575 |
+
return is_broadcasting_
|
576 |
+
? linear_indices_.data_ptr<int64_t>()[broadcast_linear_index]
|
577 |
+
: broadcast_linear_index;
|
578 |
+
}
|
579 |
+
};
|
580 |
+
|
581 |
+
static inline bool is_blas_compatible_column_major_order(const Tensor& input) {
|
582 |
+
IntArrayRef input_strides = input.strides();
|
583 |
+
IntArrayRef input_sizes = input.sizes();
|
584 |
+
auto ndim = input.dim();
|
585 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
|
586 |
+
if (ndim > 3) {
|
587 |
+
return input.transpose(-2, -1).is_contiguous();
|
588 |
+
}
|
589 |
+
auto leading_dimension = input_strides[ndim - 1];
|
590 |
+
auto rows = input_sizes[ndim - 2];
|
591 |
+
bool batch_stride_compatible = true;
|
592 |
+
if (ndim == 3) {
|
593 |
+
auto cols = input_sizes[ndim - 1];
|
594 |
+
batch_stride_compatible =
|
595 |
+
input_strides[ndim - 3] >= leading_dimension * cols;
|
596 |
+
}
|
597 |
+
return (input_strides[ndim - 2] == 1) &&
|
598 |
+
(leading_dimension >= std::max<int64_t>(1, rows)) &&
|
599 |
+
batch_stride_compatible;
|
600 |
+
}
|
601 |
+
|
602 |
+
static inline bool is_blas_compatible_row_major_order(const Tensor& input) {
|
603 |
+
IntArrayRef input_strides = input.strides();
|
604 |
+
IntArrayRef input_sizes = input.sizes();
|
605 |
+
auto ndim = input.dim();
|
606 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
|
607 |
+
if (ndim > 3) {
|
608 |
+
return input.is_contiguous();
|
609 |
+
}
|
610 |
+
auto leading_dimension = input_strides[ndim - 2];
|
611 |
+
auto cols = input_sizes[ndim - 1];
|
612 |
+
bool batch_stride_compatible = true;
|
613 |
+
if (ndim == 3) {
|
614 |
+
auto rows = input_sizes[ndim - 2];
|
615 |
+
batch_stride_compatible =
|
616 |
+
input_strides[ndim - 3] >= leading_dimension * rows;
|
617 |
+
}
|
618 |
+
return (input_strides[ndim - 1] == 1) &&
|
619 |
+
(leading_dimension >= std::max<int64_t>(1, cols)) &&
|
620 |
+
batch_stride_compatible;
|
621 |
+
}
|
622 |
+
|
623 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MaxPooling.h
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/Parallel.h>
|
5 |
+
#include <ATen/native/DispatchStub.h>
|
6 |
+
#include <ATen/native/Pool.h>
|
7 |
+
|
8 |
+
namespace at::native {
|
9 |
+
|
10 |
+
static void check_max_pool1d(
|
11 |
+
const Tensor& self,
|
12 |
+
IntArrayRef kernel_size,
|
13 |
+
IntArrayRef stride,
|
14 |
+
IntArrayRef padding,
|
15 |
+
IntArrayRef dilation,
|
16 |
+
bool ceil_mode) {
|
17 |
+
|
18 |
+
TORCH_CHECK(
|
19 |
+
self.dim() == 2 || self.dim() == 3,
|
20 |
+
"max_pool1d() Expected 2D or 3D input tensor, but got ", self.sym_sizes());
|
21 |
+
TORCH_CHECK(
|
22 |
+
kernel_size.size() == 1,
|
23 |
+
"max_pool1d() kernel_size must be an int, list of ints or tuple of ints of size 1 but got size ",
|
24 |
+
kernel_size.size());
|
25 |
+
TORCH_CHECK(
|
26 |
+
stride.empty() || stride.size() == 1,
|
27 |
+
"max_pool1d() stride must be None, an int, list of ints, or tuple of ints of size 1 but got size ",
|
28 |
+
stride.size());
|
29 |
+
TORCH_CHECK(
|
30 |
+
padding.size() == 1,
|
31 |
+
"max_pool1d() padding must be an int, list of ints, or tuple of ints of size 1 but got size ",
|
32 |
+
padding.size());
|
33 |
+
TORCH_CHECK(
|
34 |
+
dilation.size() == 1,
|
35 |
+
"max_pool1d() dilation must be an int, list of ints or tuple of ints of size 1 but got size ",
|
36 |
+
dilation.size());
|
37 |
+
|
38 |
+
// If stride=None then set it to kernel_size
|
39 |
+
if (stride.empty()) {
|
40 |
+
stride = kernel_size;
|
41 |
+
}
|
42 |
+
|
43 |
+
TORCH_CHECK(
|
44 |
+
kernel_size[0] > 0,
|
45 |
+
"max_pool1d() kernel_size must be greater than zero, but got ",
|
46 |
+
kernel_size[0]);
|
47 |
+
TORCH_CHECK(
|
48 |
+
stride[0] > 0, "max_pool1d() stride must be greater than zero, but got ", stride[0]);
|
49 |
+
TORCH_CHECK(
|
50 |
+
padding[0] >= 0, "max_pool1d() padding must be non-negative, but got ", padding[0]);
|
51 |
+
TORCH_CHECK(
|
52 |
+
padding[0] <= kernel_size[0] / 2,
|
53 |
+
"max_pool1d() padding should be at most half of kernel size, but got padding=",
|
54 |
+
padding[0],
|
55 |
+
" and kernel_size=",
|
56 |
+
kernel_size[0]);
|
57 |
+
TORCH_CHECK(
|
58 |
+
dilation[0] > 0, "max_pool1d() dilation must be greater than zero, but got ", dilation[0]);
|
59 |
+
|
60 |
+
const int64_t OW = pooling_output_shape(self.sym_size(-1).guard_int(__FILE__, __LINE__), kernel_size[0], padding[0], stride[0], dilation[0], ceil_mode);
|
61 |
+
TORCH_CHECK(OW > 0, "max_pool1d() Invalid computed output size: ", OW);
|
62 |
+
}
|
63 |
+
|
64 |
+
// TODO(Heitor) Template by dimension
|
65 |
+
struct PoolingParams1D {
|
66 |
+
int64_t NB; // Number of batches
|
67 |
+
int64_t NC; // Number of channels
|
68 |
+
int64_t IW; // Input width
|
69 |
+
int64_t OW; // Output width
|
70 |
+
int64_t KW; // Kernel width
|
71 |
+
int64_t SJ; // Column stride
|
72 |
+
int64_t PJ; // Column padding
|
73 |
+
int64_t DJ; // Column dilation
|
74 |
+
|
75 |
+
// Return index of input element for the given kernel and output index
|
76 |
+
inline int64_t index(int64_t kj, int64_t oj) const {
|
77 |
+
return oj * SJ + kj * DJ - PJ;
|
78 |
+
}
|
79 |
+
|
80 |
+
// Return index of first output within bounds for this kernel index
|
81 |
+
inline int64_t valid_output_start(int64_t kj) const {
|
82 |
+
int64_t ij = index(kj, 0);;
|
83 |
+
return ij < 0 ? at::divup(-ij, SJ) : 0;
|
84 |
+
}
|
85 |
+
|
86 |
+
// Return index one past last output within bounds for this kernel index
|
87 |
+
inline int64_t valid_output_end(int64_t kj) const {
|
88 |
+
int64_t ij = index(kj, OW - 1);
|
89 |
+
return ij >= IW ? OW - at::divup(ij - (IW - 1), SJ) : OW;
|
90 |
+
}
|
91 |
+
};
|
92 |
+
|
93 |
+
using pooling_fn = void (*)(Tensor&, const Tensor&, const PoolingParams1D&);
|
94 |
+
|
95 |
+
DECLARE_DISPATCH(pooling_fn, max_pool1d_stub);
|
96 |
+
|
97 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Ternary and higher-order pointwise operations
|
2 |
+
#pragma once
|
3 |
+
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
class Scalar;
|
8 |
+
}
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
struct TensorIterator;
|
13 |
+
struct TensorIteratorBase;
|
14 |
+
|
15 |
+
namespace native {
|
16 |
+
|
17 |
+
using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar);
|
18 |
+
using structured_pointwise_fn = void (*)(TensorIteratorBase&, const Scalar& scalar);
|
19 |
+
using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double);
|
20 |
+
|
21 |
+
DECLARE_DISPATCH(structured_pointwise_fn, addcmul_stub);
|
22 |
+
DECLARE_DISPATCH(structured_pointwise_fn, addcdiv_stub);
|
23 |
+
DECLARE_DISPATCH(pointwise_fn_double, smooth_l1_backward_stub);
|
24 |
+
DECLARE_DISPATCH(pointwise_fn_double, huber_backward_stub);
|
25 |
+
DECLARE_DISPATCH(pointwise_fn, mse_backward_stub);
|
26 |
+
|
27 |
+
} // namespace native
|
28 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/TensorOperators.h>
|
5 |
+
|
6 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
7 |
+
#include <ATen/Functions.h>
|
8 |
+
#else
|
9 |
+
#include <ATen/ops/empty.h>
|
10 |
+
#include <ATen/ops/empty_like.h>
|
11 |
+
#endif
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
|
15 |
+
template <
|
16 |
+
typename index_t,
|
17 |
+
void compute(index_t*, int64_t*, index_t*, int64_t, int64_t)>
|
18 |
+
static inline Tensor repeat_interleave_common(
|
19 |
+
const Tensor& repeats,
|
20 |
+
c10::optional<int64_t> output_size) {
|
21 |
+
TORCH_CHECK(
|
22 |
+
repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat");
|
23 |
+
TORCH_CHECK(
|
24 |
+
repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt,
|
25 |
+
"repeats has to be Long or Int tensor");
|
26 |
+
if (repeats.size(0) == 0) {
|
27 |
+
return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
|
28 |
+
}
|
29 |
+
Tensor repeats_ = repeats.contiguous();
|
30 |
+
Tensor cumsum = repeats.cumsum(0);
|
31 |
+
int64_t total;
|
32 |
+
if (output_size.has_value()) {
|
33 |
+
total = output_size.value();
|
34 |
+
} else {
|
35 |
+
total = cumsum[-1].item<int64_t>();
|
36 |
+
TORCH_CHECK(
|
37 |
+
(repeats >= 0).all().item<uint8_t>(), "repeats can not be negative");
|
38 |
+
}
|
39 |
+
|
40 |
+
Tensor result = at::empty({total}, repeats.options());
|
41 |
+
index_t* repeat_ptr = repeats_.data_ptr<index_t>();
|
42 |
+
int64_t* cumsum_ptr = cumsum.data_ptr<int64_t>();
|
43 |
+
index_t* result_ptr = result.data_ptr<index_t>();
|
44 |
+
compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total);
|
45 |
+
return result;
|
46 |
+
}
|
47 |
+
|
48 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/native/ResizeCommon.h>
|
5 |
+
#include <ATen/EmptyTensor.h>
|
6 |
+
#include <ATen/TensorUtils.h>
|
7 |
+
|
8 |
+
#include <c10/core/CPUAllocator.h>
|
9 |
+
|
10 |
+
#include <utility>
|
11 |
+
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
|
15 |
+
// TODO: make all operations that resize given outputs use this function
|
16 |
+
// for consistency and maintainability.
|
17 |
+
// Some operations like `cat` might not be able to make the use of
|
18 |
+
// resize_output directly. For more details to understand how it works in `cat`,
|
19 |
+
// see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
|
20 |
+
// Resizes outputs
|
21 |
+
// Functions accepting output tensors, like with the "out" kwarg, should
|
22 |
+
// call this function to handle resizing their output tensor.
|
23 |
+
// Issues a warning if the output tensor has one or more elements and
|
24 |
+
// needs resizing
|
25 |
+
// NOTE: In the future the warning will become an error
|
26 |
+
// Returns a bool saying whether or not the resize actually happened or not
|
27 |
+
TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape);
|
28 |
+
// WARNING: Do NOT call this directly. If you are resizing an output and want
|
29 |
+
// to support dynamic shapes call at::resize__symint and resize_output_check_symint.
|
30 |
+
// For more details, see: https://github.com/pytorch/pytorch/pull/111530/files#r1365845272
|
31 |
+
TORCH_API bool resize_output_symint(const Tensor& output, SymIntArrayRef shape);
|
32 |
+
|
33 |
+
// Utility for resize_output
|
34 |
+
// Returns a bool saying resize should happen or not and
|
35 |
+
// raises a warning if resizing for one or more elements
|
36 |
+
TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape);
|
37 |
+
TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape);
|
38 |
+
|
39 |
+
TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes);
|
40 |
+
TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes);
|
41 |
+
TORCH_API void resize_bytes_nocuda(const Storage& storage, c10::SymInt size_bytes);
|
42 |
+
|
43 |
+
static inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) {
|
44 |
+
// It does not make sense to try to resize a storage
|
45 |
+
// to hold 0 elements, and this can break
|
46 |
+
// if storage_offset is positive but
|
47 |
+
// new_size is 0, so just bail in that case
|
48 |
+
// (same comment is in cuda/Resize.h)
|
49 |
+
if (self->numel() == 0) {
|
50 |
+
return;
|
51 |
+
}
|
52 |
+
|
53 |
+
const Storage& storage = self->unsafe_storage();
|
54 |
+
if (!storage) {
|
55 |
+
auto new_storage = c10::make_intrusive<StorageImpl>(
|
56 |
+
StorageImpl::use_byte_size_t(),
|
57 |
+
new_size_bytes,
|
58 |
+
c10::GetCPUAllocator(),
|
59 |
+
true);
|
60 |
+
self->set_storage_keep_dtype(std::move(new_storage));
|
61 |
+
} else if (new_size_bytes > storage.nbytes()) {
|
62 |
+
resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes);
|
63 |
+
}
|
64 |
+
}
|
65 |
+
|
66 |
+
TORCH_API TensorImpl* resize_impl_cpu_(
|
67 |
+
TensorImpl* self,
|
68 |
+
IntArrayRef size,
|
69 |
+
at::OptionalIntArrayRef stride,
|
70 |
+
bool resize_storage = true);
|
71 |
+
|
72 |
+
template <typename T>
|
73 |
+
T maybe_convert_symint(c10::SymInt) = delete;
|
74 |
+
|
75 |
+
template <>
|
76 |
+
inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; }
|
77 |
+
|
78 |
+
template <>
|
79 |
+
inline int64_t maybe_convert_symint(c10::SymInt x) { return x.guard_int(__FILE__, __LINE__); }
|
80 |
+
|
81 |
+
template <typename T>
|
82 |
+
static inline void checkInBoundsForStorage(
|
83 |
+
ArrayRef<T> size,
|
84 |
+
ArrayRef<T> stride,
|
85 |
+
T storage_offset,
|
86 |
+
const caffe2::TypeMeta& data_type,
|
87 |
+
const Storage& new_storage) {
|
88 |
+
T storage_size_bytes =
|
89 |
+
at::detail::computeStorageNbytes(size, stride, data_type.itemsize());
|
90 |
+
T storage_offset_bytes = storage_offset * data_type.itemsize();
|
91 |
+
if (storage_size_bytes == 0) {
|
92 |
+
// NB: (a tensor with arbitrary 0 dims)'s storage can have any numel.
|
93 |
+
return;
|
94 |
+
}
|
95 |
+
T new_storage_size_bytes = maybe_convert_symint<T>(new_storage.sym_nbytes());
|
96 |
+
TORCH_CHECK(
|
97 |
+
storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes,
|
98 |
+
"setStorage: sizes ",
|
99 |
+
size,
|
100 |
+
", strides ",
|
101 |
+
stride,
|
102 |
+
","
|
103 |
+
" storage offset ",
|
104 |
+
storage_offset,
|
105 |
+
", and itemsize ",
|
106 |
+
data_type.itemsize(),
|
107 |
+
" requiring a storage size of ",
|
108 |
+
storage_size_bytes + storage_offset_bytes,
|
109 |
+
" are out of bounds for storage of size ",
|
110 |
+
new_storage_size_bytes);
|
111 |
+
}
|
112 |
+
|
113 |
+
template <typename T>
|
114 |
+
static inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset,
|
115 |
+
ArrayRef<T> size, ArrayRef<T> stride) {
|
116 |
+
// FIXME: stride should be optional
|
117 |
+
if (stride.data()) {
|
118 |
+
TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(),
|
119 |
+
") and stride length (", stride.size(), ")");
|
120 |
+
}
|
121 |
+
|
122 |
+
#ifdef DEBUG
|
123 |
+
TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX");
|
124 |
+
#endif
|
125 |
+
|
126 |
+
// storage: note this can't be replaced with result.set_(storage) as the semantics of that
|
127 |
+
// function is to set the tensor size to be equal to the size of the storage.
|
128 |
+
if (!result.storage().is_alias_of(storage)) {
|
129 |
+
// Caffe2 might have tensors whose storages are null, but we
|
130 |
+
// don't allow it in PyTorch.
|
131 |
+
TORCH_INTERNAL_ASSERT(storage);
|
132 |
+
TORCH_INTERNAL_ASSERT(result.storage());
|
133 |
+
|
134 |
+
// We used to allow this, but this breaks device caching.
|
135 |
+
// Let's put an actual error message for this one.
|
136 |
+
TORCH_CHECK(result.storage().device() == storage.device(),
|
137 |
+
"Attempted to set the storage of a tensor on device \"", result.storage().device(),
|
138 |
+
"\" to a storage on different device \"", storage.device(),
|
139 |
+
"\". This is no longer allowed; the devices must match.");
|
140 |
+
result.unsafeGetTensorImpl()->set_storage_keep_dtype(std::move(storage));
|
141 |
+
}
|
142 |
+
|
143 |
+
// storageOffset
|
144 |
+
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
|
145 |
+
}
|
146 |
+
|
147 |
+
/**
|
148 |
+
* Set self's sizes, strides, and storage_offset.
|
149 |
+
* (size, stride, storage_offset) must be in bounds for self's storage.
|
150 |
+
*/
|
151 |
+
template <typename T>
|
152 |
+
inline void setStrided(
|
153 |
+
const Tensor& self,
|
154 |
+
ArrayRef<T> size,
|
155 |
+
ArrayRef<T> stride,
|
156 |
+
T storage_offset) {
|
157 |
+
TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape");
|
158 |
+
for (const auto& val : stride) {
|
159 |
+
TORCH_CHECK(val >= 0,
|
160 |
+
"as_strided: Negative strides are not supported at the moment, "
|
161 |
+
"got strides: ", stride);
|
162 |
+
}
|
163 |
+
|
164 |
+
auto* self_ = self.unsafeGetTensorImpl();
|
165 |
+
checkInBoundsForStorage(
|
166 |
+
size, stride, storage_offset, self_->dtype(), self_->storage());
|
167 |
+
|
168 |
+
/* storage offset */
|
169 |
+
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
|
170 |
+
self_->set_sizes_and_strides(size, stride, c10::make_optional(storage_offset));
|
171 |
+
}
|
172 |
+
|
173 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/TensorFactories.h
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/EmptyTensor.h>
|
5 |
+
#include <ATen/TensorIterator.h>
|
6 |
+
#include <ATen/Dispatch.h>
|
7 |
+
#include <ATen/Dispatch_v2.h>
|
8 |
+
#include <ATen/native/DispatchStub.h>
|
9 |
+
|
10 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
11 |
+
#include <ATen/Functions.h>
|
12 |
+
#else
|
13 |
+
#include <ATen/ops/scalar_tensor.h>
|
14 |
+
#endif
|
15 |
+
|
16 |
+
namespace at::native {
|
17 |
+
// Different combinations of row, col, and offset can lead to two cases:
|
18 |
+
//
|
19 |
+
// Case 1 - Trapezoid (Triangle as a special case): row + offset <= col
|
20 |
+
// Example A: offset > 0
|
21 |
+
// 1 1 0 0 0
|
22 |
+
// 1 1 1 0 0
|
23 |
+
// 1 1 1 1 0
|
24 |
+
// Example B: offset <= 0
|
25 |
+
// 0 0 0
|
26 |
+
// 1 0 0
|
27 |
+
// 1 1 0
|
28 |
+
// In this case, we calculate the number of elements in the first row and
|
29 |
+
// last row of the tril respectively, and then compute the tril size.
|
30 |
+
//
|
31 |
+
// Case 2 - Trapezoid + Rectangle: row + offset > col
|
32 |
+
// Example:
|
33 |
+
// 1 1 0
|
34 |
+
// 1 1 1
|
35 |
+
// 1 1 1
|
36 |
+
// In this case, we first calculate the size of top trapezoid, and then
|
37 |
+
// calculate the size of the bottom rectangle.
|
38 |
+
inline int64_t get_tril_size(int64_t row, int64_t col, int64_t offset) {
|
39 |
+
// If either dimension is 0 then the there is no tril
|
40 |
+
if (row == 0 || col == 0) {
|
41 |
+
return 0;
|
42 |
+
}
|
43 |
+
// number of elements in the first row of the tril
|
44 |
+
auto m_first_row = offset > 0 ?
|
45 |
+
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
|
46 |
+
row + offset > 0; // either 0 or 1
|
47 |
+
// number of elements in the last row of the tril, bounded by [0, col]
|
48 |
+
auto m_last_row = std::max<int64_t>(0, std::min<int64_t>(col, row + offset));
|
49 |
+
// number of rows, bounded by [0, row]
|
50 |
+
auto n_row_all = std::max<int64_t>(0, std::min<int64_t>(row, row + offset));
|
51 |
+
auto n_row_trapezoid = (m_last_row - m_first_row + 1);
|
52 |
+
|
53 |
+
// calculate # of elements in the top trapezoid
|
54 |
+
auto tril_size = (m_first_row + m_last_row) * n_row_trapezoid >> 1;
|
55 |
+
|
56 |
+
// calculate # of elements in the bottom rectangle if there is any
|
57 |
+
auto diff_row = n_row_all - n_row_trapezoid;
|
58 |
+
if (diff_row > 0) {
|
59 |
+
tril_size += diff_row * col;
|
60 |
+
}
|
61 |
+
|
62 |
+
return tril_size;
|
63 |
+
}
|
64 |
+
|
65 |
+
inline void check_args(
|
66 |
+
int64_t row, int64_t col, c10::optional<Layout> layout_opt) {
|
67 |
+
TORCH_CHECK(row >= 0, "row must be non-negative, got", row);
|
68 |
+
TORCH_CHECK(col >= 0, "col must be non-negative, got", col);
|
69 |
+
if (layout_opt.has_value()) {
|
70 |
+
TORCH_CHECK(
|
71 |
+
*layout_opt == at::kStrided,
|
72 |
+
"only support layout=torch.strided, got",
|
73 |
+
*layout_opt)
|
74 |
+
}
|
75 |
+
}
|
76 |
+
|
77 |
+
using at::check_size_nonnegative;
|
78 |
+
|
79 |
+
// assumes maximum value in created tensor is n-1 (e.g., torch.randperm(n))
|
80 |
+
inline void check_supported_max_int_with_precision(int64_t n, const Tensor& tensor) {
|
81 |
+
// match defined() to behavior of checks below
|
82 |
+
TORCH_CHECK(at::scalar_tensor(n>0?n-1:n, tensor.options()).defined(),
|
83 |
+
"n is too large for result tensor type: '", tensor.toString(), "'");
|
84 |
+
|
85 |
+
// Ensure sufficient precision for floating point representation.
|
86 |
+
switch (tensor.scalar_type()) {
|
87 |
+
case at::ScalarType::Half:
|
88 |
+
TORCH_CHECK(n <= (int64_t(1) << 11) + 1, "n cannot be greater than 2049 for Half type.");
|
89 |
+
break;
|
90 |
+
case at::ScalarType::Float:
|
91 |
+
TORCH_CHECK(n <= (int64_t(1) << 24) + 1, "n cannot be greater than 2^24+1 for Float type.");
|
92 |
+
break;
|
93 |
+
case at::ScalarType::Double: // Unlikely to happen, but doesn't hurt to check
|
94 |
+
TORCH_CHECK(n <= (int64_t(1) << 53) + 1, "n cannot be greater than 2^53+1 for Double type.");
|
95 |
+
break;
|
96 |
+
default:
|
97 |
+
break;
|
98 |
+
}
|
99 |
+
}
|
100 |
+
|
101 |
+
// Called by `empty*` functions when deterministic algorithms are enabled to
|
102 |
+
// fill the tensor with NaN if it is floating point or complex type, or fill
|
103 |
+
// with max value if it is integer type
|
104 |
+
inline Tensor& fill_empty_deterministic_(Tensor& tensor) {
|
105 |
+
if (tensor.is_floating_point() || tensor.is_complex()) {
|
106 |
+
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
|
107 |
+
kBFloat16, kHalf, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
|
108 |
+
tensor.fill_(std::numeric_limits<scalar_t>::quiet_NaN());
|
109 |
+
});
|
110 |
+
} else {
|
111 |
+
AT_DISPATCH_V2(
|
112 |
+
tensor.scalar_type(), "fill_empty_deterministic_", AT_WRAP([&]() {
|
113 |
+
tensor.fill_(std::numeric_limits<scalar_t>::max());
|
114 |
+
}), kBool, AT_EXPAND(AT_INTEGRAL_TYPES_V2));
|
115 |
+
}
|
116 |
+
return tensor;
|
117 |
+
}
|
118 |
+
|
119 |
+
// The ZeroTensor allocator ignores whatever allocation is requested and always
|
120 |
+
// gives you nullptr
|
121 |
+
struct ZeroTensorAllocator final : public at::Allocator {
|
122 |
+
ZeroTensorAllocator(at::Device device) : device_(device) {};
|
123 |
+
~ZeroTensorAllocator() override = default;
|
124 |
+
static void deleter(void* const pointer) {
|
125 |
+
TORCH_INTERNAL_ASSERT(!pointer);
|
126 |
+
}
|
127 |
+
DataPtr allocate(const size_t /*nbytes*/) override {
|
128 |
+
return {nullptr, nullptr, &deleter, device_};
|
129 |
+
}
|
130 |
+
DeleterFnPtr raw_deleter() const override {
|
131 |
+
return deleter;
|
132 |
+
}
|
133 |
+
void copy_data(void* dest, const void* src, std::size_t count) const final {}
|
134 |
+
at::Device device_;
|
135 |
+
};
|
136 |
+
|
137 |
+
using binary_fn = void (*)(TensorIterator&);
|
138 |
+
|
139 |
+
DECLARE_DISPATCH(binary_fn, complex_stub);
|
140 |
+
DECLARE_DISPATCH(binary_fn, polar_stub);
|
141 |
+
|
142 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/TensorShape.h
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Tensor.h>
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
#include <ATen/core/IListRef.h>
|
5 |
+
|
6 |
+
namespace at::native {
|
7 |
+
|
8 |
+
TORCH_API at::Tensor clone_preserve_strides(const at::Tensor& self);
|
9 |
+
|
10 |
+
inline bool cat_should_skip_tensor(const Tensor& t) {
|
11 |
+
return t.numel() == 0 && t.dim() == 1;
|
12 |
+
}
|
13 |
+
|
14 |
+
// Check to see if the shape of tensors is compatible
|
15 |
+
// for being concatenated along a given dimension.
|
16 |
+
inline void check_cat_shape_except_dim(const Tensor & first, const Tensor & second, int64_t dimension, int64_t index) {
|
17 |
+
int64_t first_dims = first.dim();
|
18 |
+
int64_t second_dims = second.dim();
|
19 |
+
TORCH_CHECK(first_dims == second_dims, "Tensors must have same number of dimensions: got ",
|
20 |
+
first_dims, " and ", second_dims);
|
21 |
+
for (const auto dim : c10::irange(first_dims)) {
|
22 |
+
if (dim == dimension) {
|
23 |
+
continue;
|
24 |
+
}
|
25 |
+
int64_t first_dim_size = first.sizes()[dim];
|
26 |
+
int64_t second_dim_size = second.sizes()[dim];
|
27 |
+
TORCH_CHECK(first_dim_size == second_dim_size, "Sizes of tensors must match except in dimension ",
|
28 |
+
dimension, ". Expected size ", static_cast<long long>(first_dim_size), " but got size ", static_cast<long long>(second_dim_size), " for tensor number ", index, " in the list.");
|
29 |
+
}
|
30 |
+
}
|
31 |
+
|
32 |
+
inline void check_cat_no_zero_dim(const MaterializedITensorListRef& tensors) {
|
33 |
+
int64_t i = 0;
|
34 |
+
for(const Tensor& t : tensors) {
|
35 |
+
TORCH_CHECK(t.dim() > 0,
|
36 |
+
"zero-dimensional tensor (at position ", i, ") cannot be concatenated");
|
37 |
+
i++;
|
38 |
+
}
|
39 |
+
}
|
40 |
+
|
41 |
+
inline int64_t get_num_splits(const Tensor& self, int64_t split_size, int64_t dim) {
|
42 |
+
TORCH_CHECK(self.dim() != 0, "split expects at least a 1-dimensional tensor");
|
43 |
+
TORCH_CHECK(split_size >= 0, "split expects split_size be non-negative, but got split_size=", split_size);
|
44 |
+
int64_t dim_size = self.size(dim);
|
45 |
+
TORCH_CHECK(split_size > 0 || dim_size == 0,
|
46 |
+
"split_size can only be 0 if dimension size is 0, "
|
47 |
+
"but got dimension size of ", dim_size);
|
48 |
+
// if split_size is 0 and dimension size is 0, there is 1 split.
|
49 |
+
int64_t num_splits = 1;
|
50 |
+
if (split_size != 0) {
|
51 |
+
// ensuring num_splits is at least 1 makes consistent the case where split_size > dim_size
|
52 |
+
// (returns a single split). We might want to error here, but keep it for BC.
|
53 |
+
num_splits = std::max<int64_t>((dim_size + split_size - 1) / split_size, 1);
|
54 |
+
}
|
55 |
+
return num_splits;
|
56 |
+
}
|
57 |
+
|
58 |
+
inline bool have_same_ndims(TensorList tensors) {
|
59 |
+
auto ndim = tensors[0].dim();
|
60 |
+
for (const auto tensor_idx : c10::irange(tensors.size())) {
|
61 |
+
if(tensors[tensor_idx].dim() != ndim) {
|
62 |
+
return false;
|
63 |
+
}
|
64 |
+
}
|
65 |
+
return true;
|
66 |
+
}
|
67 |
+
|
68 |
+
inline void leading_dimension_matches(TensorList tensors, int64_t dim) {
|
69 |
+
auto tensor_zero_size = tensors[0].sizes();
|
70 |
+
std::vector<c10::SymInt> leading_dim_sizes(tensor_zero_size.begin(), tensor_zero_size.begin() + dim);
|
71 |
+
for (const auto i : c10::irange(tensors.size())) {
|
72 |
+
at::Tensor tensor = tensors[i];
|
73 |
+
for(const auto j : c10::irange(dim)) {
|
74 |
+
TORCH_CHECK(
|
75 |
+
tensor.size(j) == leading_dim_sizes[j],
|
76 |
+
"_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors"
|
77 |
+
);
|
78 |
+
}
|
79 |
+
}
|
80 |
+
}
|
81 |
+
|
82 |
+
inline int64_t preprocess_chunk_cat_inputs(TensorList tensors, int64_t dim, int64_t num_chunks) {
|
83 |
+
TORCH_CHECK(num_chunks >= 1, "_chunk_cat expects positive num_chunks");
|
84 |
+
TORCH_CHECK(!tensors.empty(),
|
85 |
+
"_chunk_cat expects a non-empty input tensor list");
|
86 |
+
auto expected_dtype = tensors[0].dtype();
|
87 |
+
auto expected_device = tensors[0].device();
|
88 |
+
for(const auto i : c10::irange(tensors.size())) {
|
89 |
+
TORCH_CHECK(tensors[i].numel() > 0, "_chunk_cat expects non-empty tensor");
|
90 |
+
TORCH_CHECK(tensors[i].dtype() == expected_dtype, "_chunk_cat expects all input tensors with the same dtype");
|
91 |
+
TORCH_CHECK(tensors[i].device() == expected_device, "_chunk_cat expects all inputs tensors on the same device");
|
92 |
+
}
|
93 |
+
if (have_same_ndims(tensors)) {
|
94 |
+
dim = maybe_wrap_dim(dim, tensors[0].dim());
|
95 |
+
} else {
|
96 |
+
TORCH_CHECK(dim >= 0, "_chunk_cat expects non-negative dim when input tensors have different ndims")
|
97 |
+
for(const auto i : c10::irange(tensors.size())) {
|
98 |
+
TORCH_CHECK(dim < tensors[i].ndimension(), "_chunk_cat expects dim < ndim for all input tensors");
|
99 |
+
}
|
100 |
+
}
|
101 |
+
leading_dimension_matches(tensors, dim);
|
102 |
+
return dim;
|
103 |
+
}
|
104 |
+
|
105 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/im2col_shape_check.h
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Tensor.h>
|
3 |
+
#include <ATen/TensorUtils.h>
|
4 |
+
#include <ATen/div_rtn.h>
|
5 |
+
|
6 |
+
namespace at::native {
|
7 |
+
|
8 |
+
static inline void col2im_shape_check(
|
9 |
+
const Tensor& input,
|
10 |
+
const Tensor& grad_output,
|
11 |
+
int64_t output_height,
|
12 |
+
int64_t output_width,
|
13 |
+
int64_t kernel_height,
|
14 |
+
int64_t kernel_width,
|
15 |
+
int64_t dilation_height,
|
16 |
+
int64_t dilation_width,
|
17 |
+
int64_t pad_height,
|
18 |
+
int64_t pad_width,
|
19 |
+
int64_t stride_height,
|
20 |
+
int64_t stride_width) {
|
21 |
+
TORCH_CHECK(
|
22 |
+
kernel_width > 0 && kernel_height > 0,
|
23 |
+
"kernel size should be greater than zero, but got kernel_height: ",
|
24 |
+
kernel_height,
|
25 |
+
" kernel_width: ",
|
26 |
+
kernel_width);
|
27 |
+
TORCH_CHECK(
|
28 |
+
stride_width > 0 && stride_height > 0,
|
29 |
+
"stride should be greater than zero, but got stride_height: ",
|
30 |
+
stride_height,
|
31 |
+
" stride_width: ",
|
32 |
+
stride_width);
|
33 |
+
TORCH_CHECK(
|
34 |
+
dilation_width > 0 && dilation_height > 0,
|
35 |
+
"dilation should be greater than zero, but got dilation_height: ",
|
36 |
+
dilation_height,
|
37 |
+
" dilation_width: ",
|
38 |
+
dilation_width);
|
39 |
+
TORCH_CHECK(
|
40 |
+
pad_width >= 0 && pad_height >= 0,
|
41 |
+
"padding should be non-negative, but got pad_height: ",
|
42 |
+
pad_height,
|
43 |
+
" pad_width: ",
|
44 |
+
pad_width);
|
45 |
+
|
46 |
+
|
47 |
+
int64_t ndim = input.ndimension();
|
48 |
+
// allow dim=0 only the batch dimension.
|
49 |
+
TORCH_CHECK(
|
50 |
+
(ndim == 2 && input.size(0) != 0 && input.size(1) != 0) ||
|
51 |
+
(ndim == 3 && input.size(1) != 0 && input.size(2) != 0),
|
52 |
+
"Expected 2D or 3D (batch mode) tensor for input with possibly 0 batch size and non-zero dimensions for input, but got: ",
|
53 |
+
input.sizes());
|
54 |
+
|
55 |
+
int64_t batch_dim = (ndim == 3) ? 0 : -1;
|
56 |
+
int64_t n_input_plane = input.size(batch_dim + 1);
|
57 |
+
|
58 |
+
if (n_input_plane % (kernel_width * kernel_height) != 0) {
|
59 |
+
AT_ERROR(
|
60 |
+
"Expected size of input's dimension 1 to be divisible by the "
|
61 |
+
"product of kernel_size, but got input.size(1)=",
|
62 |
+
n_input_plane,
|
63 |
+
" and kernel_size=(",
|
64 |
+
kernel_height,
|
65 |
+
", ",
|
66 |
+
kernel_width,
|
67 |
+
").");
|
68 |
+
}
|
69 |
+
|
70 |
+
int64_t input_length = input.size(batch_dim + 2);
|
71 |
+
int64_t n_blocks_height =
|
72 |
+
div_rtn<int64_t>(
|
73 |
+
output_height + 2 * pad_height -
|
74 |
+
dilation_height * (kernel_height - 1) - 1,
|
75 |
+
stride_height) +
|
76 |
+
1;
|
77 |
+
int64_t n_blocks_width = div_rtn<int64_t>(
|
78 |
+
output_width + 2 * pad_width -
|
79 |
+
dilation_width * (kernel_width - 1) - 1,
|
80 |
+
stride_width) +
|
81 |
+
1;
|
82 |
+
|
83 |
+
if (input_length != (n_blocks_height * n_blocks_width)) {
|
84 |
+
AT_ERROR(
|
85 |
+
"Given output_size=(",
|
86 |
+
output_height,
|
87 |
+
", ",
|
88 |
+
output_width,
|
89 |
+
"), kernel_size=(",
|
90 |
+
kernel_height,
|
91 |
+
", ",
|
92 |
+
kernel_width,
|
93 |
+
"), dilation=(",
|
94 |
+
dilation_height,
|
95 |
+
", ",
|
96 |
+
dilation_width,
|
97 |
+
"), padding=(",
|
98 |
+
pad_height,
|
99 |
+
", ",
|
100 |
+
pad_width,
|
101 |
+
"), stride=(",
|
102 |
+
stride_height,
|
103 |
+
", ",
|
104 |
+
stride_width,
|
105 |
+
"), expected size of input's dimension 2 to match the calculated number of ",
|
106 |
+
"sliding blocks ",
|
107 |
+
n_blocks_height,
|
108 |
+
" * ",
|
109 |
+
n_blocks_width,
|
110 |
+
" = ",
|
111 |
+
(n_blocks_height * n_blocks_width),
|
112 |
+
", but got input.size(2)=",
|
113 |
+
input_length,
|
114 |
+
".");
|
115 |
+
}
|
116 |
+
|
117 |
+
TORCH_CHECK(
|
118 |
+
n_blocks_height >= 1 && n_blocks_width >= 1,
|
119 |
+
"Given output_size=(", output_height, ", ", output_width, "), ",
|
120 |
+
"kernel_size=(", kernel_height, ", ", kernel_width, "), ",
|
121 |
+
"dilation=(", dilation_height, ", ", dilation_width, "), ",
|
122 |
+
"padding=(", pad_height, ", ", pad_width, "), ",
|
123 |
+
"stride=(", stride_height, ", ", stride_width, "), ",
|
124 |
+
"calculated shape of the array of sliding blocks as ",
|
125 |
+
"(", n_blocks_height, ", ", n_blocks_width, "), ",
|
126 |
+
"which is too small (non-positive)");
|
127 |
+
|
128 |
+
if (output_width < 1 || output_height < 1) {
|
129 |
+
AT_ERROR(
|
130 |
+
"Expected output spatial size to be positive, but got: output_size=(",
|
131 |
+
output_height,
|
132 |
+
", ",
|
133 |
+
output_width,
|
134 |
+
").");
|
135 |
+
}
|
136 |
+
}
|
137 |
+
|
138 |
+
static inline void im2col_shape_check(
|
139 |
+
const Tensor& input,
|
140 |
+
const Tensor& grad_output,
|
141 |
+
int64_t kernel_height,
|
142 |
+
int64_t kernel_width,
|
143 |
+
int64_t dilation_height,
|
144 |
+
int64_t dilation_width,
|
145 |
+
int64_t pad_height,
|
146 |
+
int64_t pad_width,
|
147 |
+
int64_t stride_height,
|
148 |
+
int64_t stride_width) {
|
149 |
+
TORCH_CHECK(
|
150 |
+
kernel_width > 0 && kernel_height > 0,
|
151 |
+
"kernel size should be greater than zero, but got kernel_height: ",
|
152 |
+
kernel_height,
|
153 |
+
" kernel_width: ",
|
154 |
+
kernel_width);
|
155 |
+
|
156 |
+
TORCH_CHECK(
|
157 |
+
dilation_width > 0 && dilation_height > 0,
|
158 |
+
"dilation should be greater than zero, but got dilation_height: ",
|
159 |
+
dilation_height,
|
160 |
+
" dilation_width: ",
|
161 |
+
dilation_width);
|
162 |
+
|
163 |
+
TORCH_CHECK(
|
164 |
+
pad_width >= 0 && pad_height >= 0,
|
165 |
+
"padding should be non-negative, but got pad_height: ",
|
166 |
+
pad_height,
|
167 |
+
" pad_width: ",
|
168 |
+
pad_width);
|
169 |
+
|
170 |
+
TORCH_CHECK(
|
171 |
+
stride_width > 0 && stride_height > 0,
|
172 |
+
"stride should be greater than zero, but got stride_height: ",
|
173 |
+
stride_height,
|
174 |
+
" stride_width: ",
|
175 |
+
stride_width);
|
176 |
+
|
177 |
+
int64_t ndim = input.ndimension();
|
178 |
+
|
179 |
+
// allow dim=0 only the batch dimension.
|
180 |
+
bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
|
181 |
+
TORCH_CHECK(
|
182 |
+
(ndim == 3 && input.size(0) && valid_dims) ||
|
183 |
+
(ndim == 4 && valid_dims && input.size(3) != 0),
|
184 |
+
"Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
|
185 |
+
input.sizes());
|
186 |
+
|
187 |
+
int64_t dim_batch = 0;
|
188 |
+
|
189 |
+
if (ndim == 3) {
|
190 |
+
dim_batch = -1;
|
191 |
+
}
|
192 |
+
|
193 |
+
int64_t input_height = input.size(dim_batch + 2);
|
194 |
+
int64_t input_width = input.size(dim_batch + 3);
|
195 |
+
int64_t output_height = div_rtn<int64_t>(
|
196 |
+
input_height + 2 * pad_height -
|
197 |
+
(dilation_height * (kernel_height - 1) + 1),
|
198 |
+
stride_height) +
|
199 |
+
1;
|
200 |
+
int64_t output_width = div_rtn<int64_t>(
|
201 |
+
input_width + 2 * pad_width -
|
202 |
+
(dilation_width * (kernel_width - 1) + 1),
|
203 |
+
stride_width) +
|
204 |
+
1;
|
205 |
+
|
206 |
+
if (output_height < 1 || output_width < 1) {
|
207 |
+
AT_ERROR(
|
208 |
+
"Given input with spatial size (",
|
209 |
+
input_height,
|
210 |
+
", ",
|
211 |
+
input_height,
|
212 |
+
"), kernel_size=(",
|
213 |
+
kernel_height,
|
214 |
+
", ",
|
215 |
+
kernel_width,
|
216 |
+
"), dilation=(",
|
217 |
+
dilation_height,
|
218 |
+
", ",
|
219 |
+
dilation_width,
|
220 |
+
"), padding=(",
|
221 |
+
pad_height,
|
222 |
+
", ",
|
223 |
+
pad_width,
|
224 |
+
"), calculated shape of the array of sliding blocks as (",
|
225 |
+
output_height,
|
226 |
+
", ",
|
227 |
+
output_width,
|
228 |
+
"), but its components must be at least one.");
|
229 |
+
}
|
230 |
+
}
|
231 |
+
|
232 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/vol2col.h
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstring>
|
4 |
+
|
5 |
+
namespace at::native {
|
6 |
+
|
7 |
+
template <typename T>
|
8 |
+
static void vol2col(
|
9 |
+
const T* data_vol,
|
10 |
+
const int64_t channels,
|
11 |
+
const int64_t depth,
|
12 |
+
const int64_t height,
|
13 |
+
const int64_t width,
|
14 |
+
const int64_t depth_col,
|
15 |
+
const int64_t height_col,
|
16 |
+
const int64_t width_col,
|
17 |
+
const int64_t kT,
|
18 |
+
const int64_t kernel_height,
|
19 |
+
const int64_t kernel_width,
|
20 |
+
const int64_t pT,
|
21 |
+
const int64_t pH,
|
22 |
+
const int64_t pW,
|
23 |
+
const int64_t dT,
|
24 |
+
const int64_t dH,
|
25 |
+
const int64_t dW,
|
26 |
+
const int64_t dilationT,
|
27 |
+
const int64_t dilationH,
|
28 |
+
const int64_t dilationW,
|
29 |
+
T* data_col) {
|
30 |
+
int64_t c, t, h, w;
|
31 |
+
int64_t channels_col = channels * kT * kernel_height * kernel_width;
|
32 |
+
for (c = 0; c < channels_col; ++c) {
|
33 |
+
int64_t w_offset = c % kernel_width;
|
34 |
+
int64_t h_offset = (c / kernel_width) % kernel_height;
|
35 |
+
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
|
36 |
+
int64_t c_vol = c / kT / kernel_height / kernel_width;
|
37 |
+
for (t = 0; t < depth_col; ++t) {
|
38 |
+
int64_t t_pad = t * dT - pT + t_offset * dilationT;
|
39 |
+
for (h = 0; h < height_col; ++h) {
|
40 |
+
int64_t h_pad = h * dH - pH + h_offset * dilationH;
|
41 |
+
for (w = 0; w < width_col; ++w) {
|
42 |
+
int64_t w_pad = w * dW - pW + w_offset * dilationW;
|
43 |
+
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
|
44 |
+
w_pad >= 0 && w_pad < width)
|
45 |
+
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
|
46 |
+
data_vol
|
47 |
+
[((c_vol * depth + t_pad) * height + h_pad) * width +
|
48 |
+
w_pad];
|
49 |
+
else
|
50 |
+
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
|
51 |
+
0;
|
52 |
+
}
|
53 |
+
}
|
54 |
+
}
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
template <typename T>
|
59 |
+
static void col2vol(
|
60 |
+
const T* data_col,
|
61 |
+
const int64_t channels,
|
62 |
+
const int64_t depth,
|
63 |
+
const int64_t height,
|
64 |
+
const int64_t width,
|
65 |
+
const int64_t out_depth,
|
66 |
+
const int64_t out_height,
|
67 |
+
const int64_t out_width,
|
68 |
+
const int64_t kT,
|
69 |
+
const int64_t kernel_height,
|
70 |
+
const int64_t kernel_width,
|
71 |
+
const int64_t pT,
|
72 |
+
const int64_t pH,
|
73 |
+
const int64_t pW,
|
74 |
+
const int64_t dT,
|
75 |
+
const int64_t dH,
|
76 |
+
const int64_t dW,
|
77 |
+
const int64_t dilationT,
|
78 |
+
const int64_t dilationH,
|
79 |
+
const int64_t dilationW,
|
80 |
+
T* data_vol) {
|
81 |
+
memset(data_vol, 0, sizeof(T) * depth * height * width * channels);
|
82 |
+
int64_t depth_col = out_depth;
|
83 |
+
int64_t height_col = out_height;
|
84 |
+
int64_t width_col = out_width;
|
85 |
+
int64_t channels_col = channels * kT * kernel_height * kernel_width;
|
86 |
+
for (int64_t c = 0; c < channels_col; ++c) {
|
87 |
+
int64_t w_offset = c % kernel_width;
|
88 |
+
int64_t h_offset = (c / kernel_width) % kernel_height;
|
89 |
+
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
|
90 |
+
int64_t c_vol = c / kT / kernel_height / kernel_width;
|
91 |
+
for (int64_t t = 0; t < depth_col; ++t) {
|
92 |
+
int64_t t_pad = t * dT - pT + t_offset * dilationT;
|
93 |
+
for (int64_t h = 0; h < height_col; ++h) {
|
94 |
+
int64_t h_pad = h * dH - pH + h_offset * dilationH;
|
95 |
+
for (int64_t w = 0; w < width_col; ++w) {
|
96 |
+
int64_t w_pad = w * dW - pW + w_offset * dilationW;
|
97 |
+
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
|
98 |
+
w_pad >= 0 && w_pad < width)
|
99 |
+
data_vol
|
100 |
+
[((c_vol * depth + t_pad) * height + h_pad) * width + w_pad] +=
|
101 |
+
data_col
|
102 |
+
[((c * depth_col + t) * height_col + h) * width_col + w];
|
103 |
+
}
|
104 |
+
}
|
105 |
+
}
|
106 |
+
}
|
107 |
+
}
|
108 |
+
|
109 |
+
} // namespace at::native
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
|
26 |
+
inline void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
27 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
|
31 |
+
inline void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
32 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
|
33 |
+
}
|
34 |
+
// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
|
35 |
+
inline void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
|
36 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
|
40 |
+
inline ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
41 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
|
42 |
+
}
|
43 |
+
|
44 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & _cdist_backward_out(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_convolution_double_backward_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
|
27 |
+
return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
|
28 |
+
}
|
29 |
+
namespace symint {
|
30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
31 |
+
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
|
32 |
+
return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
|
37 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_symint(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
38 |
+
return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
39 |
+
}
|
40 |
+
namespace symint {
|
41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
42 |
+
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
43 |
+
return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _convolution_mode_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Scalar & scalar);
|
21 |
+
TORCH_API void _foreach_div_(at::TensorList self, const at::Scalar & scalar);
|
22 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::TensorList other);
|
23 |
+
TORCH_API void _foreach_div_(at::TensorList self, at::TensorList other);
|
24 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
25 |
+
TORCH_API void _foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
26 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Tensor & other);
|
27 |
+
TORCH_API void _foreach_div_(at::TensorList self, const at::Tensor & other);
|
28 |
+
|
29 |
+
} // namespace cpu
|
30 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _is_all_true {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_is_all_true")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_is_all_true(Tensor self) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _make_dep_token_cpu(c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, c10::optional<double> scale=c10::nullopt) {
|
27 |
+
return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_sparse_sum_backward_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
|
26 |
+
inline at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
|
27 |
+
return at::_ops::_sparse_sum_backward::call(grad, self, dim);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _sparse_sum_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
|
32 |
+
return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out);
|
33 |
+
}
|
34 |
+
// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _sparse_sum_backward_outf(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
|
36 |
+
return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & _to_sparse_bsc_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & _to_sparse_bsc_outf(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask={}, c10::optional<int64_t> mask_type=c10::nullopt);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API void _validate_compressed_sparse_indices_cpu(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz);
|
20 |
+
TORCH_API void _validate_compressed_sparse_indices_cuda(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API adaptive_max_pool3d_backward_grad_input {
|
18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API adaptive_max_pool3d_backward {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d_backward")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Scalar & other);
|
20 |
+
TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other);
|
21 |
+
TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Tensor & other);
|
22 |
+
TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/asin_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::asin(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor asin(const at::Tensor & self) {
|
27 |
+
return at::_ops::asin::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::asin_(Tensor(a!) self) -> Tensor(a!)
|
31 |
+
inline at::Tensor & asin_(at::Tensor & self) {
|
32 |
+
return at::_ops::asin_::call(self);
|
33 |
+
}
|
34 |
+
|
35 |
+
// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
36 |
+
inline at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
|
37 |
+
return at::_ops::asin_out::call(self, out);
|
38 |
+
}
|
39 |
+
// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
40 |
+
inline at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
|
41 |
+
return at::_ops::asin_out::call(self, out);
|
42 |
+
}
|
43 |
+
|
44 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor asin(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & asin_(at::Tensor & self);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_native.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & dequantize_self_out(const at::Tensor & self, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor dequantize_cpu_or_cuda(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor dequantize_quantized(const at::Tensor & self);
|
22 |
+
TORCH_API void dequantize_tensors_out(at::TensorList tensors, at::TensorList out);
|
23 |
+
TORCH_API ::std::vector<at::Tensor> dequantize_tensors_quantized_cpu(at::TensorList tensors);
|
24 |
+
} // namespace native
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & diagonal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1);
|
21 |
+
TORCH_API at::Tensor & diagonal_copy_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API embedding_dense_backward {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_dense_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API embedding_dense_backward_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, bool, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_dense_backward")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
|
26 |
+
inline at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
|
27 |
+
return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent);
|
22 |
+
|
23 |
+
} // namespace cpu
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_meta_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional<at::Generator> generator=c10::nullopt);
|
21 |
+
|
22 |
+
} // namespace meta
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1);
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API index_fill__int_Scalar {
|
18 |
+
using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API index_fill_int_Scalar {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API index_fill__int_Tensor {
|
40 |
+
using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API index_fill_int_Tensor {
|
51 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor")
|
57 |
+
static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value);
|
58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API index_fill__Dimname_Scalar {
|
62 |
+
using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Scalar")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)")
|
68 |
+
static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value);
|
69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API index_fill__Dimname_Tensor {
|
73 |
+
using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Tensor")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)")
|
79 |
+
static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value);
|
80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value);
|
81 |
+
};
|
82 |
+
|
83 |
+
struct TORCH_API index_fill_Dimname_Scalar {
|
84 |
+
using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &);
|
85 |
+
using ptr_schema = schema*;
|
86 |
+
// See Note [static constexpr char* members for windows NVCC]
|
87 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
88 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Scalar")
|
89 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor")
|
90 |
+
static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value);
|
91 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value);
|
92 |
+
};
|
93 |
+
|
94 |
+
struct TORCH_API index_fill_Dimname_Tensor {
|
95 |
+
using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &);
|
96 |
+
using ptr_schema = schema*;
|
97 |
+
// See Note [static constexpr char* members for windows NVCC]
|
98 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
99 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Tensor")
|
100 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor")
|
101 |
+
static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value);
|
102 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value);
|
103 |
+
};
|
104 |
+
|
105 |
+
struct TORCH_API index_fill_int_Scalar_out {
|
106 |
+
using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, at::Tensor &);
|
107 |
+
using ptr_schema = schema*;
|
108 |
+
// See Note [static constexpr char* members for windows NVCC]
|
109 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
110 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar_out")
|
111 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
|
112 |
+
static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out);
|
113 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out);
|
114 |
+
};
|
115 |
+
|
116 |
+
struct TORCH_API index_fill_int_Tensor_out {
|
117 |
+
using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &);
|
118 |
+
using ptr_schema = schema*;
|
119 |
+
// See Note [static constexpr char* members for windows NVCC]
|
120 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill")
|
121 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor_out")
|
122 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
|
123 |
+
static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out);
|
124 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out);
|
125 |
+
};
|
126 |
+
|
127 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/kl_div_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
|
26 |
+
inline at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) {
|
27 |
+
return at::_ops::kl_div::call(self, target, reduction, log_target);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lerp.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/lerp_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
|
26 |
+
inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
|
27 |
+
return at::_ops::lerp_Scalar_out::call(self, end, weight, out);
|
28 |
+
}
|
29 |
+
// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
|
30 |
+
inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
|
31 |
+
return at::_ops::lerp_Scalar_out::call(self, end, weight, out);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
|
36 |
+
return at::_ops::lerp_Tensor_out::call(self, end, weight, out);
|
37 |
+
}
|
38 |
+
// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
|
39 |
+
inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
|
40 |
+
return at::_ops::lerp_Tensor_out::call(self, end, weight, out);
|
41 |
+
}
|
42 |
+
|
43 |
+
// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
|
44 |
+
inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
|
45 |
+
return at::_ops::lerp_Scalar::call(self, end, weight);
|
46 |
+
}
|
47 |
+
|
48 |
+
// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
|
49 |
+
inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
|
50 |
+
return at::_ops::lerp_Tensor::call(self, end, weight);
|
51 |
+
}
|
52 |
+
|
53 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau);
|
20 |
+
TORCH_API at::Tensor & linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API linalg_inv {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv(Tensor A) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & A);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API linalg_inv_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & A, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API linalg_matmul {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matmul")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matmul(Tensor self, Tensor other) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API linalg_matmul_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matmul")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor mH(const at::Tensor & self);
|
21 |
+
|
22 |
+
} // namespace compositeimplicitautograd
|
23 |
+
} // namespace at
|