Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ATen.h +37 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h +153 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Backend.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h +343 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h +33 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h +542 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h +323 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Config.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h +27 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h +41 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/DimVector.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Dimname.h +1 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h +34 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h +160 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h +527 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Formatting.h +1 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h +408 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h +15 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Layout.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h +139 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h +109 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h +42 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h +443 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h +1 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h +283 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h +69 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h +187 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Operators.h +1358 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Parallel.h +160 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h +19 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h +54 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h +34 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h +0 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h +4 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h +13 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h +411 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Storage.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Tensor.h +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h +144 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h +137 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h +113 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Utils.h +138 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/code_template.h +243 -0
ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41dec19e41e01e697ba09437d7a19d3b6ced65f1bcf710af3d3ab2eb1aed5f70
|
3 |
+
size 50332828
|
ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e896fd7e1e34ab0d6fed56670fcd8268239dc24e787e38f5b7d2366b0e9e09f
|
3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8889a8cf12e2e8c498b8ede4eb6c879a6e49eba3d7eb7bfcf62cbbbac59d5018
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/torch/include/ATen/ATen.h
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#if !defined(_MSC_VER) && __cplusplus < 201703L
|
4 |
+
#error C++17 or later compatible compiler is required to use ATen.
|
5 |
+
#endif
|
6 |
+
|
7 |
+
#include <ATen/Context.h>
|
8 |
+
#include <ATen/Device.h>
|
9 |
+
#include <ATen/DeviceGuard.h>
|
10 |
+
#include <ATen/DimVector.h>
|
11 |
+
#include <ATen/Dispatch.h>
|
12 |
+
#include <ATen/Formatting.h>
|
13 |
+
#include <ATen/Functions.h>
|
14 |
+
#include <ATen/NamedTensor.h>
|
15 |
+
#include <ATen/ScalarOps.h>
|
16 |
+
#include <ATen/Tensor.h>
|
17 |
+
#include <ATen/TensorGeometry.h>
|
18 |
+
#include <ATen/TensorIndexing.h>
|
19 |
+
#include <ATen/TensorOperators.h>
|
20 |
+
#include <ATen/Version.h>
|
21 |
+
#include <ATen/core/ATenGeneral.h>
|
22 |
+
#include <ATen/core/Generator.h>
|
23 |
+
#include <ATen/core/Reduction.h>
|
24 |
+
#include <ATen/core/Scalar.h>
|
25 |
+
#include <ATen/core/UnsafeFromTH.h>
|
26 |
+
#include <ATen/core/ivalue.h>
|
27 |
+
#include <ATen/core/jit_type.h>
|
28 |
+
#include <c10/core/Allocator.h>
|
29 |
+
#include <c10/core/InferenceMode.h>
|
30 |
+
#include <c10/core/Layout.h>
|
31 |
+
#include <c10/core/Storage.h>
|
32 |
+
#include <c10/core/TensorOptions.h>
|
33 |
+
#include <c10/util/Exception.h>
|
34 |
+
|
35 |
+
// TODO: try to remove this
|
36 |
+
// There is some back story, see https://github.com/pytorch/pytorch/issues/48684
|
37 |
+
#include <ATen/NativeFunctions.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/Config.h>
|
3 |
+
#include <c10/core/DeviceType.h>
|
4 |
+
#include <c10/core/ScalarType.h>
|
5 |
+
#include <c10/util/BFloat16.h>
|
6 |
+
#include <c10/util/Float8_e4m3fn.h>
|
7 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
8 |
+
#include <c10/util/Float8_e5m2.h>
|
9 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
10 |
+
#include <c10/util/Half.h>
|
11 |
+
|
12 |
+
// Defines the accumulation type for a scalar type.
|
13 |
+
// Example:
|
14 |
+
// using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
|
15 |
+
//
|
16 |
+
// Accumulation types are an important concept in numeric computing
|
17 |
+
// because you frequently want to perform intermediate computations
|
18 |
+
// at a higher precision than the input and output precision, to avoid
|
19 |
+
// compounding internal rounding errors. Accumulation is the most
|
20 |
+
// well-known intermediate computation (it is of great importance for
|
21 |
+
// sum reduction and matrix multiply, for example), but in PyTorch
|
22 |
+
// acc_type ends up getting used for all sorts of other intermediate
|
23 |
+
// computations, so it perhaps would be more accurately (ahem) called an
|
24 |
+
// "accurate" type. acc_type is especially important for reduced
|
25 |
+
// precision operations like float16 and bfloat16, where relatively
|
26 |
+
// benign looking inputs can easily end up overflowing/underflowing.
|
27 |
+
//
|
28 |
+
// acc_type is parametrized by whether or not you are running on CUDA
|
29 |
+
// or not, because on CUDA double precision operations are expensive
|
30 |
+
// and so by default, we don't actually want to use double as an
|
31 |
+
// acc_type on CUDA. A lot of things are typed out below, but
|
32 |
+
// basically, the table is generated by a few rules:
|
33 |
+
//
|
34 |
+
// If bool:
|
35 |
+
// Use 'bool' as acc_type.
|
36 |
+
// If floating point:
|
37 |
+
// If CUDA, use 'float' as acc_type (unless scalar_t is double),
|
38 |
+
// otherwise (CPU) use 'double'
|
39 |
+
// If integral:
|
40 |
+
// Use 'int64_t' as acc_type
|
41 |
+
//
|
42 |
+
// You're not forced to use this template; if you happen to know
|
43 |
+
// something specific about your use case, you can specify your own
|
44 |
+
// desired behavior. This template, however, will give you a reasonable
|
45 |
+
// default that will work for all dtypes supported in PyTorch.
|
46 |
+
|
47 |
+
#if defined(__CUDACC__)
|
48 |
+
#include <cuda.h>
|
49 |
+
#include <cuda_fp16.h>
|
50 |
+
#elif defined(__HIPCC__)
|
51 |
+
#include <hip/hip_fp16.h>
|
52 |
+
#include <hip/hip_runtime.h>
|
53 |
+
#endif
|
54 |
+
|
55 |
+
namespace at {
|
56 |
+
|
57 |
+
template <typename T, c10::DeviceType D>
|
58 |
+
struct AccumulateTypeDevice {};
|
59 |
+
|
60 |
+
template <typename T, bool>
|
61 |
+
struct AccumulateType {};
|
62 |
+
|
63 |
+
template <typename T>
|
64 |
+
struct AccumulateType<T, false> {
|
65 |
+
using type = typename AccumulateTypeDevice<T, c10::DeviceType::CPU>::type;
|
66 |
+
};
|
67 |
+
|
68 |
+
template <typename T>
|
69 |
+
struct AccumulateType<T, true> {
|
70 |
+
using type = typename AccumulateTypeDevice<T, c10::DeviceType::CUDA>::type;
|
71 |
+
};
|
72 |
+
|
73 |
+
template <typename T, c10::DeviceType device>
|
74 |
+
using acc_type_device = typename AccumulateTypeDevice<T, device>::type;
|
75 |
+
|
76 |
+
template <typename T, bool is_cuda>
|
77 |
+
using acc_type = typename AccumulateType<T, is_cuda>::type;
|
78 |
+
|
79 |
+
#define ACC_TYPE(t, acc_t, device_type) \
|
80 |
+
template <> \
|
81 |
+
struct AccumulateTypeDevice<t, device_type> { \
|
82 |
+
using type = acc_t; \
|
83 |
+
};
|
84 |
+
#define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS)
|
85 |
+
#define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA)
|
86 |
+
#define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU)
|
87 |
+
|
88 |
+
MPS_ACC_TYPE(BFloat16, float);
|
89 |
+
MPS_ACC_TYPE(Half, float);
|
90 |
+
MPS_ACC_TYPE(Float8_e5m2, float);
|
91 |
+
MPS_ACC_TYPE(Float8_e4m3fn, float);
|
92 |
+
MPS_ACC_TYPE(Float8_e5m2fnuz, float);
|
93 |
+
MPS_ACC_TYPE(Float8_e4m3fnuz, float);
|
94 |
+
MPS_ACC_TYPE(float, float);
|
95 |
+
MPS_ACC_TYPE(double, float);
|
96 |
+
MPS_ACC_TYPE(int8_t, int64_t);
|
97 |
+
MPS_ACC_TYPE(uint8_t, int64_t);
|
98 |
+
MPS_ACC_TYPE(char, int64_t);
|
99 |
+
MPS_ACC_TYPE(int16_t, int64_t);
|
100 |
+
MPS_ACC_TYPE(int32_t, int64_t);
|
101 |
+
MPS_ACC_TYPE(int64_t, int64_t);
|
102 |
+
MPS_ACC_TYPE(bool, bool);
|
103 |
+
MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
104 |
+
MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>);
|
105 |
+
MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>);
|
106 |
+
|
107 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
108 |
+
CUDA_ACC_TYPE(half, float);
|
109 |
+
#endif
|
110 |
+
CUDA_ACC_TYPE(BFloat16, float);
|
111 |
+
CUDA_ACC_TYPE(Half, float);
|
112 |
+
CUDA_ACC_TYPE(Float8_e5m2, float);
|
113 |
+
CUDA_ACC_TYPE(Float8_e4m3fn, float);
|
114 |
+
CUDA_ACC_TYPE(Float8_e5m2fnuz, float);
|
115 |
+
CUDA_ACC_TYPE(Float8_e4m3fnuz, float);
|
116 |
+
CUDA_ACC_TYPE(float, float);
|
117 |
+
CUDA_ACC_TYPE(double, double);
|
118 |
+
CUDA_ACC_TYPE(int8_t, int64_t);
|
119 |
+
CUDA_ACC_TYPE(uint8_t, int64_t);
|
120 |
+
CUDA_ACC_TYPE(char, int64_t);
|
121 |
+
CUDA_ACC_TYPE(int16_t, int64_t);
|
122 |
+
CUDA_ACC_TYPE(int32_t, int64_t);
|
123 |
+
CUDA_ACC_TYPE(int64_t, int64_t);
|
124 |
+
CUDA_ACC_TYPE(bool, bool);
|
125 |
+
CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
126 |
+
CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>);
|
127 |
+
CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>);
|
128 |
+
|
129 |
+
CPU_ACC_TYPE(BFloat16, float);
|
130 |
+
CPU_ACC_TYPE(Half, float);
|
131 |
+
CPU_ACC_TYPE(Float8_e5m2, float);
|
132 |
+
CPU_ACC_TYPE(Float8_e4m3fn, float);
|
133 |
+
CPU_ACC_TYPE(Float8_e5m2fnuz, float);
|
134 |
+
CPU_ACC_TYPE(Float8_e4m3fnuz, float);
|
135 |
+
CPU_ACC_TYPE(float, double);
|
136 |
+
CPU_ACC_TYPE(double, double);
|
137 |
+
CPU_ACC_TYPE(int8_t, int64_t);
|
138 |
+
CPU_ACC_TYPE(uint8_t, int64_t);
|
139 |
+
CPU_ACC_TYPE(char, int64_t);
|
140 |
+
CPU_ACC_TYPE(int16_t, int64_t);
|
141 |
+
CPU_ACC_TYPE(int32_t, int64_t);
|
142 |
+
CPU_ACC_TYPE(int64_t, int64_t);
|
143 |
+
CPU_ACC_TYPE(bool, bool);
|
144 |
+
CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
|
145 |
+
CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>);
|
146 |
+
CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
|
147 |
+
|
148 |
+
TORCH_API c10::ScalarType toAccumulateType(
|
149 |
+
c10::ScalarType type,
|
150 |
+
c10::DeviceType device);
|
151 |
+
TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
|
152 |
+
|
153 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Backend.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/Backend.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CPUApplyUtils.h
ADDED
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/CollapseDims.h>
|
4 |
+
#include <ATen/Parallel.h>
|
5 |
+
#include <ATen/TensorUtils.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <cstring>
|
8 |
+
#include <limits>
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
/*
|
13 |
+
* The basic strategy for apply is as follows:
|
14 |
+
*
|
15 |
+
* 1. Starting with the outermost index, loop until we reach a dimension where
|
16 |
+
* the data is no longer contiguous, i.e. the stride at that dimension is not
|
17 |
+
* equal to the size of the tensor defined by the outer dimensions. Let's call
|
18 |
+
* this outer (contiguous) tensor A. Note that if the Tensor is contiguous, then
|
19 |
+
* A is equal to the entire Tensor. Let's call the inner tensor B.
|
20 |
+
*
|
21 |
+
* 2. We loop through the indices in B, starting at its outermost dimension. For
|
22 |
+
* example, if B is a 2x2 matrix, then we do:
|
23 |
+
*
|
24 |
+
* B[0][0]
|
25 |
+
* B[0][1]
|
26 |
+
* B[1][0]
|
27 |
+
* B[1][1]
|
28 |
+
*
|
29 |
+
* We set the offset into the underlying storage as (storageOffset + stride_B *
|
30 |
+
* index_B), i.e. basically we compute the offset into the storage as we would
|
31 |
+
* normally for a Tensor. But because we are guaranteed the subsequent data is
|
32 |
+
* contiguous in memory, we can simply loop for sizeof(A) iterations and perform
|
33 |
+
* the operation, without having to follow the order described by the strides of
|
34 |
+
* A.
|
35 |
+
*
|
36 |
+
* 3. As an optimization, we merge dimensions of A that are contiguous in
|
37 |
+
* memory. For example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor,
|
38 |
+
* then the first two dimensions can be merged for the purposes of APPLY,
|
39 |
+
* reducing the number of nested loops.
|
40 |
+
*/
|
41 |
+
|
42 |
+
inline Tensor sort_strides(Tensor& tensor_) {
|
43 |
+
IntArrayRef strides = tensor_.strides();
|
44 |
+
std::vector<int64_t> indices;
|
45 |
+
indices.reserve(tensor_.ndimension());
|
46 |
+
for (const auto i : c10::irange(tensor_.ndimension())) {
|
47 |
+
indices.push_back(i);
|
48 |
+
}
|
49 |
+
std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) {
|
50 |
+
return strides[i1] > strides[i2];
|
51 |
+
});
|
52 |
+
Tensor tensor = tensor_.permute(indices);
|
53 |
+
return tensor;
|
54 |
+
}
|
55 |
+
|
56 |
+
template <typename T, int N>
|
57 |
+
struct strided_tensor_iter_fixed {
|
58 |
+
public:
|
59 |
+
T* data_ = NULL;
|
60 |
+
int64_t dim_ = 0;
|
61 |
+
|
62 |
+
int64_t counter_[N] = {0};
|
63 |
+
int64_t sizes_[N] = {0};
|
64 |
+
int64_t strides_[N] = {0};
|
65 |
+
|
66 |
+
strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete;
|
67 |
+
void operator=(strided_tensor_iter_fixed const& x) = delete;
|
68 |
+
strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
|
69 |
+
strided_tensor_iter_fixed(
|
70 |
+
Tensor& tensor,
|
71 |
+
C10_UNUSED bool sort_strides = false)
|
72 |
+
: data_(tensor.data_ptr<T>()) {
|
73 |
+
std::memset(counter_, 0, sizeof(int64_t) * N);
|
74 |
+
if (tensor.dim() > 0) {
|
75 |
+
std::memcpy(
|
76 |
+
sizes_, tensor.sizes().data(), tensor.dim() * sizeof(int64_t));
|
77 |
+
std::memcpy(
|
78 |
+
strides_, tensor.strides().data(), tensor.dim() * sizeof(int64_t));
|
79 |
+
}
|
80 |
+
dim_ = std::get<1>(collapse_dims(sizes_, strides_, tensor.ndimension()));
|
81 |
+
}
|
82 |
+
};
|
83 |
+
|
84 |
+
template <typename T>
|
85 |
+
struct strided_tensor_iter {
|
86 |
+
private:
|
87 |
+
public:
|
88 |
+
T* data_ = NULL;
|
89 |
+
int64_t dim_;
|
90 |
+
|
91 |
+
std::vector<int64_t> counter_;
|
92 |
+
std::vector<int64_t> sizes_;
|
93 |
+
std::vector<int64_t> strides_;
|
94 |
+
|
95 |
+
strided_tensor_iter(strided_tensor_iter const&) = delete;
|
96 |
+
void operator=(strided_tensor_iter const& x) = delete;
|
97 |
+
strided_tensor_iter(strided_tensor_iter&&) = default;
|
98 |
+
strided_tensor_iter(Tensor& tensor)
|
99 |
+
: data_(tensor.data_ptr<T>()),
|
100 |
+
dim_(tensor.ndimension()),
|
101 |
+
counter_(dim_, 0),
|
102 |
+
sizes_(tensor.sizes().vec()),
|
103 |
+
strides_(tensor.strides().vec()) {
|
104 |
+
dim_ = std::get<1>(collapse_dims(sizes_.data(), strides_.data(), dim_));
|
105 |
+
}
|
106 |
+
};
|
107 |
+
|
108 |
+
inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
|
109 |
+
if (tensors.empty())
|
110 |
+
return true;
|
111 |
+
int64_t all_numel = tensors[0].numel();
|
112 |
+
for (const auto i : c10::irange(1, tensors.size())) {
|
113 |
+
if (tensors[i].numel() != all_numel)
|
114 |
+
return false;
|
115 |
+
}
|
116 |
+
return true;
|
117 |
+
}
|
118 |
+
|
119 |
+
inline std::string _all_equal_numel_error(at::ArrayRef<Tensor> tensors) {
|
120 |
+
std::ostringstream oss;
|
121 |
+
oss << "inconsistent tensor size, expected ";
|
122 |
+
for (size_t i = 0; i < tensors.size() - 1; i++) {
|
123 |
+
oss << tensors[i].sizes() << ", ";
|
124 |
+
}
|
125 |
+
oss << "and " << tensors[tensors.size() - 1].sizes()
|
126 |
+
<< " to have the same number of elements, but got ";
|
127 |
+
for (size_t i = 0; i < tensors.size() - 1; i++) {
|
128 |
+
oss << tensors[i].numel() << ", ";
|
129 |
+
}
|
130 |
+
oss << "and " << tensors[tensors.size() - 1].numel()
|
131 |
+
<< " elements respectively";
|
132 |
+
return oss.str();
|
133 |
+
}
|
134 |
+
|
135 |
+
inline bool _apply_preamble(ArrayRef<Tensor> tensors) {
|
136 |
+
checkDeviceType("CPU_tensor_apply", tensors, kCPU);
|
137 |
+
checkLayout("CPU_tensor_apply", tensors, kStrided);
|
138 |
+
if (!_all_equal_numel(tensors))
|
139 |
+
AT_ERROR(_all_equal_numel_error(tensors));
|
140 |
+
// An empty tensor has no elements
|
141 |
+
for (auto& t : tensors)
|
142 |
+
if (t.numel() == 0)
|
143 |
+
return false;
|
144 |
+
return true;
|
145 |
+
}
|
146 |
+
|
147 |
+
inline int64_t _max_dim_tensors(ArrayRef<Tensor> tensors) {
|
148 |
+
int64_t dim = 0;
|
149 |
+
for (auto& t : tensors)
|
150 |
+
dim = std::max(dim, t.ndimension());
|
151 |
+
return dim;
|
152 |
+
}
|
153 |
+
|
154 |
+
inline void iterate(int64_t /*size*/){};
|
155 |
+
|
156 |
+
template <typename Arg, typename... Args>
|
157 |
+
inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) {
|
158 |
+
iter.counter_[iter.dim_ - 1] += size;
|
159 |
+
iter.data_ = iter.data_ + size * iter.strides_[iter.dim_ - 1];
|
160 |
+
iterate(size, iter_tail...);
|
161 |
+
}
|
162 |
+
|
163 |
+
inline bool iterate_continue() {
|
164 |
+
return true;
|
165 |
+
};
|
166 |
+
|
167 |
+
template <typename Arg, typename... Args>
|
168 |
+
inline bool iterate_continue(Arg& iter, Args&... iter_tail) {
|
169 |
+
return iter.counter_[iter.dim_ - 1] < iter.sizes_[iter.dim_ - 1] &&
|
170 |
+
iterate_continue(iter_tail...);
|
171 |
+
}
|
172 |
+
|
173 |
+
inline int64_t max_iterate_size() {
|
174 |
+
return std::numeric_limits<int64_t>::max();
|
175 |
+
};
|
176 |
+
|
177 |
+
template <typename Arg, typename... Args>
|
178 |
+
inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) {
|
179 |
+
return std::min(
|
180 |
+
(iter.sizes_[iter.dim_ - 1] - iter.counter_[iter.dim_ - 1]),
|
181 |
+
max_iterate_size(iter_tail...));
|
182 |
+
}
|
183 |
+
|
184 |
+
inline void iterate_overflow(){};
|
185 |
+
|
186 |
+
template <typename Arg, typename... Args>
|
187 |
+
inline void iterate_overflow(Arg& iter, Args&... iter_tail) {
|
188 |
+
if (iter.counter_[iter.dim_ - 1] == iter.sizes_[iter.dim_ - 1]) {
|
189 |
+
for (int64_t i = iter.dim_ - 1; i > 0; i--) {
|
190 |
+
if (iter.counter_[i] == iter.sizes_[i]) {
|
191 |
+
iter.counter_[i] = 0;
|
192 |
+
iter.counter_[i - 1]++;
|
193 |
+
iter.data_ = iter.data_ - (iter.sizes_[i] * iter.strides_[i]) +
|
194 |
+
iter.strides_[i - 1];
|
195 |
+
}
|
196 |
+
}
|
197 |
+
}
|
198 |
+
iterate_overflow(iter_tail...);
|
199 |
+
}
|
200 |
+
|
201 |
+
inline void forward(int64_t /*offset*/){};
|
202 |
+
|
203 |
+
template <typename Arg, typename... Args>
|
204 |
+
inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) {
|
205 |
+
int64_t multi = offset;
|
206 |
+
for (int64_t i = iter.dim_ - 1; i >= 0; i--) {
|
207 |
+
int64_t inc = multi % iter.sizes_[i];
|
208 |
+
multi = multi / iter.sizes_[i];
|
209 |
+
iter.data_ = iter.data_ + inc * iter.strides_[i];
|
210 |
+
iter.counter_[i] += inc;
|
211 |
+
}
|
212 |
+
forward(offset, iter_tail...);
|
213 |
+
}
|
214 |
+
|
215 |
+
inline int64_t max_dim() {
|
216 |
+
return 0;
|
217 |
+
}
|
218 |
+
|
219 |
+
template <typename Arg, typename... Args>
|
220 |
+
inline int64_t max_dim(Arg& iter, Args&... iter_tail) {
|
221 |
+
return std::max(iter.dim_, max_dim(iter_tail...));
|
222 |
+
}
|
223 |
+
|
224 |
+
inline void apply_op(){};
|
225 |
+
|
226 |
+
template <typename Op, typename... Args>
|
227 |
+
inline void apply_op(
|
228 |
+
int64_t numel,
|
229 |
+
int64_t offset,
|
230 |
+
const Op& op,
|
231 |
+
Args... iters) {
|
232 |
+
// For 0-dim tensors
|
233 |
+
if (numel == 1 && max_dim(iters...) == 0) {
|
234 |
+
op(*iters.data_...);
|
235 |
+
return;
|
236 |
+
}
|
237 |
+
if (offset > 0)
|
238 |
+
forward(offset, iters...);
|
239 |
+
// Splitting this into chunks helps the compiler create faster assembly
|
240 |
+
for (int64_t i = 0; i < numel;) {
|
241 |
+
for (; iterate_continue(iters...) && i < numel;) {
|
242 |
+
op(*iters.data_...);
|
243 |
+
iterate(1, iters...);
|
244 |
+
i++;
|
245 |
+
}
|
246 |
+
iterate_overflow(iters...);
|
247 |
+
}
|
248 |
+
}
|
249 |
+
|
250 |
+
/*
|
251 |
+
Apply a pointwise operator to sequence of tensors
|
252 |
+
|
253 |
+
The calling convention for op is a function/functor that takes the same
|
254 |
+
number of pointers of type scalar as the number of given tensors. For example,
|
255 |
+
to compute a = b * c, op would be of the form:
|
256 |
+
[](scalar* a_val, const scalar* b_val, const scalar* c_val) { a_val[0] =
|
257 |
+
b_val[0] * c_val[0]; };
|
258 |
+
*/
|
259 |
+
|
260 |
+
template <typename scalar1, typename scalar2, typename Op>
|
261 |
+
inline void CPU_tensor_apply2(Tensor tensor1, Tensor tensor2, const Op op) {
|
262 |
+
if (!_apply_preamble({tensor1, tensor2}))
|
263 |
+
return;
|
264 |
+
if (_max_dim_tensors({tensor1, tensor2}) <= 8) {
|
265 |
+
apply_op(
|
266 |
+
tensor1.numel(),
|
267 |
+
0,
|
268 |
+
op,
|
269 |
+
strided_tensor_iter_fixed<scalar1, 8>(tensor1),
|
270 |
+
strided_tensor_iter_fixed<scalar2, 8>(tensor2));
|
271 |
+
} else {
|
272 |
+
apply_op(
|
273 |
+
tensor1.numel(),
|
274 |
+
0,
|
275 |
+
op,
|
276 |
+
strided_tensor_iter<scalar1>(tensor1),
|
277 |
+
strided_tensor_iter<scalar2>(tensor2));
|
278 |
+
}
|
279 |
+
}
|
280 |
+
|
281 |
+
template <typename scalar1, typename scalar2, typename scalar3, typename Op>
|
282 |
+
inline void CPU_tensor_apply3(
|
283 |
+
Tensor tensor1,
|
284 |
+
Tensor tensor2,
|
285 |
+
Tensor tensor3,
|
286 |
+
const Op op) {
|
287 |
+
if (!_apply_preamble({tensor1, tensor2, tensor3}))
|
288 |
+
return;
|
289 |
+
if (_max_dim_tensors({tensor1, tensor2, tensor3}) <= 8) {
|
290 |
+
apply_op(
|
291 |
+
tensor1.numel(),
|
292 |
+
0,
|
293 |
+
op,
|
294 |
+
strided_tensor_iter_fixed<scalar1, 8>(tensor1),
|
295 |
+
strided_tensor_iter_fixed<scalar2, 8>(tensor2),
|
296 |
+
strided_tensor_iter_fixed<scalar3, 8>(tensor3));
|
297 |
+
} else {
|
298 |
+
apply_op(
|
299 |
+
tensor1.numel(),
|
300 |
+
0,
|
301 |
+
op,
|
302 |
+
strided_tensor_iter<scalar1>(tensor1),
|
303 |
+
strided_tensor_iter<scalar2>(tensor2),
|
304 |
+
strided_tensor_iter<scalar3>(tensor3));
|
305 |
+
}
|
306 |
+
}
|
307 |
+
|
308 |
+
template <
|
309 |
+
typename scalar1,
|
310 |
+
typename scalar2,
|
311 |
+
typename scalar3,
|
312 |
+
typename scalar4,
|
313 |
+
typename Op>
|
314 |
+
inline void CPU_tensor_apply4(
|
315 |
+
Tensor tensor1,
|
316 |
+
Tensor tensor2,
|
317 |
+
Tensor tensor3,
|
318 |
+
Tensor tensor4,
|
319 |
+
const Op op) {
|
320 |
+
if (!_apply_preamble({tensor1, tensor2, tensor3, tensor4}))
|
321 |
+
return;
|
322 |
+
if (_max_dim_tensors({tensor1, tensor2, tensor3, tensor4}) <= 8) {
|
323 |
+
apply_op(
|
324 |
+
tensor1.numel(),
|
325 |
+
0,
|
326 |
+
op,
|
327 |
+
strided_tensor_iter_fixed<scalar1, 8>(tensor1),
|
328 |
+
strided_tensor_iter_fixed<scalar2, 8>(tensor2),
|
329 |
+
strided_tensor_iter_fixed<scalar3, 8>(tensor3),
|
330 |
+
strided_tensor_iter_fixed<scalar4, 8>(tensor4));
|
331 |
+
} else {
|
332 |
+
apply_op(
|
333 |
+
tensor1.numel(),
|
334 |
+
0,
|
335 |
+
op,
|
336 |
+
strided_tensor_iter<scalar1>(tensor1),
|
337 |
+
strided_tensor_iter<scalar2>(tensor2),
|
338 |
+
strided_tensor_iter<scalar3>(tensor3),
|
339 |
+
strided_tensor_iter<scalar4>(tensor4));
|
340 |
+
}
|
341 |
+
}
|
342 |
+
|
343 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Allocator.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
|
6 |
+
// This file creates a fake allocator that just throws exceptions if
|
7 |
+
// it is actually used.
|
8 |
+
|
9 |
+
// state passed to the allocator is the std::function<void(void*)> called
|
10 |
+
// when the blob is release by ATen
|
11 |
+
|
12 |
+
namespace at {
|
13 |
+
|
14 |
+
static cpu_fixed_malloc(void*, ptrdiff_t) {
|
15 |
+
AT_ERROR("attempting to resize a tensor view of an external blob");
|
16 |
+
}
|
17 |
+
|
18 |
+
static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
|
19 |
+
AT_ERROR("attempting to resize a tensor view of an external blob");
|
20 |
+
}
|
21 |
+
|
22 |
+
static cpu_fixed_free(void* state, void* allocation) {
|
23 |
+
auto on_release = static_cast<std::function<void(void*)>*>(state);
|
24 |
+
(*on_release)(allocation);
|
25 |
+
delete on_release;
|
26 |
+
}
|
27 |
+
|
28 |
+
static Allocator CPU_fixed_allocator = {
|
29 |
+
cpu_fixed_malloc,
|
30 |
+
cpu_fixed_realloc,
|
31 |
+
cpu_fixed_free};
|
32 |
+
|
33 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CUDAFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h
ADDED
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_compositeexplicitautograd_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h>
|
20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h>
|
22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
23 |
+
#include <ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h>
|
24 |
+
#include <ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h>
|
25 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h>
|
26 |
+
#include <ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h>
|
27 |
+
#include <ATen/ops/_assert_scalar_compositeexplicitautograd_dispatch.h>
|
28 |
+
#include <ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h>
|
29 |
+
#include <ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h>
|
30 |
+
#include <ATen/ops/_cholesky_solve_helper_compositeexplicitautograd_dispatch.h>
|
31 |
+
#include <ATen/ops/_chunk_cat_compositeexplicitautograd_dispatch.h>
|
32 |
+
#include <ATen/ops/_coalesce_compositeexplicitautograd_dispatch.h>
|
33 |
+
#include <ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h>
|
34 |
+
#include <ATen/ops/_conj_compositeexplicitautograd_dispatch.h>
|
35 |
+
#include <ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h>
|
36 |
+
#include <ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h>
|
37 |
+
#include <ATen/ops/_convolution_compositeexplicitautograd_dispatch.h>
|
38 |
+
#include <ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h>
|
39 |
+
#include <ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h>
|
40 |
+
#include <ATen/ops/_ctc_loss_compositeexplicitautograd_dispatch.h>
|
41 |
+
#include <ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h>
|
42 |
+
#include <ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h>
|
43 |
+
#include <ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h>
|
44 |
+
#include <ATen/ops/_cudnn_rnn_compositeexplicitautograd_dispatch.h>
|
45 |
+
#include <ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h>
|
46 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_compositeexplicitautograd_dispatch.h>
|
47 |
+
#include <ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h>
|
48 |
+
#include <ATen/ops/_efficientzerotensor_compositeexplicitautograd_dispatch.h>
|
49 |
+
#include <ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h>
|
50 |
+
#include <ATen/ops/_embedding_bag_dense_backward_compositeexplicitautograd_dispatch.h>
|
51 |
+
#include <ATen/ops/_embedding_bag_forward_only_compositeexplicitautograd_dispatch.h>
|
52 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_compositeexplicitautograd_dispatch.h>
|
53 |
+
#include <ATen/ops/_empty_affine_quantized_compositeexplicitautograd_dispatch.h>
|
54 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h>
|
55 |
+
#include <ATen/ops/_euclidean_dist_compositeexplicitautograd_dispatch.h>
|
56 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h>
|
57 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h>
|
58 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_compositeexplicitautograd_dispatch.h>
|
59 |
+
#include <ATen/ops/_foobar_compositeexplicitautograd_dispatch.h>
|
60 |
+
#include <ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h>
|
61 |
+
#include <ATen/ops/_foreach_acos_compositeexplicitautograd_dispatch.h>
|
62 |
+
#include <ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h>
|
63 |
+
#include <ATen/ops/_foreach_addcdiv_compositeexplicitautograd_dispatch.h>
|
64 |
+
#include <ATen/ops/_foreach_addcmul_compositeexplicitautograd_dispatch.h>
|
65 |
+
#include <ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h>
|
66 |
+
#include <ATen/ops/_foreach_atan_compositeexplicitautograd_dispatch.h>
|
67 |
+
#include <ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h>
|
68 |
+
#include <ATen/ops/_foreach_clamp_max_compositeexplicitautograd_dispatch.h>
|
69 |
+
#include <ATen/ops/_foreach_clamp_min_compositeexplicitautograd_dispatch.h>
|
70 |
+
#include <ATen/ops/_foreach_copy_compositeexplicitautograd_dispatch.h>
|
71 |
+
#include <ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h>
|
72 |
+
#include <ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h>
|
73 |
+
#include <ATen/ops/_foreach_div_compositeexplicitautograd_dispatch.h>
|
74 |
+
#include <ATen/ops/_foreach_erf_compositeexplicitautograd_dispatch.h>
|
75 |
+
#include <ATen/ops/_foreach_erfc_compositeexplicitautograd_dispatch.h>
|
76 |
+
#include <ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h>
|
77 |
+
#include <ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h>
|
78 |
+
#include <ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h>
|
79 |
+
#include <ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h>
|
80 |
+
#include <ATen/ops/_foreach_lerp_compositeexplicitautograd_dispatch.h>
|
81 |
+
#include <ATen/ops/_foreach_lgamma_compositeexplicitautograd_dispatch.h>
|
82 |
+
#include <ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h>
|
83 |
+
#include <ATen/ops/_foreach_log10_compositeexplicitautograd_dispatch.h>
|
84 |
+
#include <ATen/ops/_foreach_log1p_compositeexplicitautograd_dispatch.h>
|
85 |
+
#include <ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h>
|
86 |
+
#include <ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h>
|
87 |
+
#include <ATen/ops/_foreach_minimum_compositeexplicitautograd_dispatch.h>
|
88 |
+
#include <ATen/ops/_foreach_mul_compositeexplicitautograd_dispatch.h>
|
89 |
+
#include <ATen/ops/_foreach_neg_compositeexplicitautograd_dispatch.h>
|
90 |
+
#include <ATen/ops/_foreach_norm_compositeexplicitautograd_dispatch.h>
|
91 |
+
#include <ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h>
|
92 |
+
#include <ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h>
|
93 |
+
#include <ATen/ops/_foreach_round_compositeexplicitautograd_dispatch.h>
|
94 |
+
#include <ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h>
|
95 |
+
#include <ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h>
|
96 |
+
#include <ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h>
|
97 |
+
#include <ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h>
|
98 |
+
#include <ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h>
|
99 |
+
#include <ATen/ops/_foreach_sub_compositeexplicitautograd_dispatch.h>
|
100 |
+
#include <ATen/ops/_foreach_tan_compositeexplicitautograd_dispatch.h>
|
101 |
+
#include <ATen/ops/_foreach_tanh_compositeexplicitautograd_dispatch.h>
|
102 |
+
#include <ATen/ops/_foreach_trunc_compositeexplicitautograd_dispatch.h>
|
103 |
+
#include <ATen/ops/_foreach_zero_compositeexplicitautograd_dispatch.h>
|
104 |
+
#include <ATen/ops/_functional_assert_scalar_compositeexplicitautograd_dispatch.h>
|
105 |
+
#include <ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
106 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
107 |
+
#include <ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h>
|
108 |
+
#include <ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h>
|
109 |
+
#include <ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h>
|
110 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_compositeexplicitautograd_dispatch.h>
|
111 |
+
#include <ATen/ops/_fused_sgd_compositeexplicitautograd_dispatch.h>
|
112 |
+
#include <ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h>
|
113 |
+
#include <ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h>
|
114 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h>
|
115 |
+
#include <ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h>
|
116 |
+
#include <ATen/ops/_histogramdd_bin_edges_compositeexplicitautograd_dispatch.h>
|
117 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h>
|
118 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_compositeexplicitautograd_dispatch.h>
|
119 |
+
#include <ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h>
|
120 |
+
#include <ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h>
|
121 |
+
#include <ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h>
|
122 |
+
#include <ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h>
|
123 |
+
#include <ATen/ops/_lazy_clone_compositeexplicitautograd_dispatch.h>
|
124 |
+
#include <ATen/ops/_linalg_check_errors_compositeexplicitautograd_dispatch.h>
|
125 |
+
#include <ATen/ops/_lstm_mps_compositeexplicitautograd_dispatch.h>
|
126 |
+
#include <ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h>
|
127 |
+
#include <ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h>
|
128 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
129 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h>
|
130 |
+
#include <ATen/ops/_masked_scale_compositeexplicitautograd_dispatch.h>
|
131 |
+
#include <ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h>
|
132 |
+
#include <ATen/ops/_masked_softmax_backward_compositeexplicitautograd_dispatch.h>
|
133 |
+
#include <ATen/ops/_mkldnn_reshape_compositeexplicitautograd_dispatch.h>
|
134 |
+
#include <ATen/ops/_mkldnn_transpose_compositeexplicitautograd_dispatch.h>
|
135 |
+
#include <ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h>
|
136 |
+
#include <ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
137 |
+
#include <ATen/ops/_native_batch_norm_legit_compositeexplicitautograd_dispatch.h>
|
138 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h>
|
139 |
+
#include <ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
140 |
+
#include <ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h>
|
141 |
+
#include <ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h>
|
142 |
+
#include <ATen/ops/_nested_from_padded_compositeexplicitautograd_dispatch.h>
|
143 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h>
|
144 |
+
#include <ATen/ops/_nested_get_values_copy_compositeexplicitautograd_dispatch.h>
|
145 |
+
#include <ATen/ops/_nested_tensor_from_mask_compositeexplicitautograd_dispatch.h>
|
146 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h>
|
147 |
+
#include <ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h>
|
148 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_compositeexplicitautograd_dispatch.h>
|
149 |
+
#include <ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h>
|
150 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h>
|
151 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_compositeexplicitautograd_dispatch.h>
|
152 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_compositeexplicitautograd_dispatch.h>
|
153 |
+
#include <ATen/ops/_nnpack_spatial_convolution_compositeexplicitautograd_dispatch.h>
|
154 |
+
#include <ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h>
|
155 |
+
#include <ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h>
|
156 |
+
#include <ATen/ops/_pdist_forward_compositeexplicitautograd_dispatch.h>
|
157 |
+
#include <ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h>
|
158 |
+
#include <ATen/ops/_print_compositeexplicitautograd_dispatch.h>
|
159 |
+
#include <ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h>
|
160 |
+
#include <ATen/ops/_reshape_copy_compositeexplicitautograd_dispatch.h>
|
161 |
+
#include <ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h>
|
162 |
+
#include <ATen/ops/_sample_dirichlet_compositeexplicitautograd_dispatch.h>
|
163 |
+
#include <ATen/ops/_segment_reduce_backward_compositeexplicitautograd_dispatch.h>
|
164 |
+
#include <ATen/ops/_slow_conv2d_backward_compositeexplicitautograd_dispatch.h>
|
165 |
+
#include <ATen/ops/_sparse_addmm_compositeexplicitautograd_dispatch.h>
|
166 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautograd_dispatch.h>
|
167 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_compositeexplicitautograd_dispatch.h>
|
168 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h>
|
169 |
+
#include <ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h>
|
170 |
+
#include <ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h>
|
171 |
+
#include <ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h>
|
172 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
173 |
+
#include <ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h>
|
174 |
+
#include <ATen/ops/_sparse_softmax_compositeexplicitautograd_dispatch.h>
|
175 |
+
#include <ATen/ops/_sparse_softmax_backward_data_compositeexplicitautograd_dispatch.h>
|
176 |
+
#include <ATen/ops/_sparse_sparse_matmul_compositeexplicitautograd_dispatch.h>
|
177 |
+
#include <ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h>
|
178 |
+
#include <ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h>
|
179 |
+
#include <ATen/ops/_spdiags_compositeexplicitautograd_dispatch.h>
|
180 |
+
#include <ATen/ops/_stack_compositeexplicitautograd_dispatch.h>
|
181 |
+
#include <ATen/ops/_standard_gamma_compositeexplicitautograd_dispatch.h>
|
182 |
+
#include <ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h>
|
183 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_compositeexplicitautograd_dispatch.h>
|
184 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_compositeexplicitautograd_dispatch.h>
|
185 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h>
|
186 |
+
#include <ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h>
|
187 |
+
#include <ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h>
|
188 |
+
#include <ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h>
|
189 |
+
#include <ATen/ops/_test_optional_intlist_compositeexplicitautograd_dispatch.h>
|
190 |
+
#include <ATen/ops/_test_parallel_materialize_compositeexplicitautograd_dispatch.h>
|
191 |
+
#include <ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h>
|
192 |
+
#include <ATen/ops/_thnn_fused_gru_cell_compositeexplicitautograd_dispatch.h>
|
193 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_compositeexplicitautograd_dispatch.h>
|
194 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_compositeexplicitautograd_dispatch.h>
|
195 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_compositeexplicitautograd_dispatch.h>
|
196 |
+
#include <ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h>
|
197 |
+
#include <ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h>
|
198 |
+
#include <ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h>
|
199 |
+
#include <ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h>
|
200 |
+
#include <ATen/ops/_to_sparse_bsr_compositeexplicitautograd_dispatch.h>
|
201 |
+
#include <ATen/ops/_to_sparse_csc_compositeexplicitautograd_dispatch.h>
|
202 |
+
#include <ATen/ops/_to_sparse_csr_compositeexplicitautograd_dispatch.h>
|
203 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_compositeexplicitautograd_dispatch.h>
|
204 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h>
|
205 |
+
#include <ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h>
|
206 |
+
#include <ATen/ops/_triton_multi_head_attention_compositeexplicitautograd_dispatch.h>
|
207 |
+
#include <ATen/ops/_triton_scaled_dot_attention_compositeexplicitautograd_dispatch.h>
|
208 |
+
#include <ATen/ops/_unique_compositeexplicitautograd_dispatch.h>
|
209 |
+
#include <ATen/ops/_unique2_compositeexplicitautograd_dispatch.h>
|
210 |
+
#include <ATen/ops/_unsafe_index_compositeexplicitautograd_dispatch.h>
|
211 |
+
#include <ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h>
|
212 |
+
#include <ATen/ops/_unsafe_view_compositeexplicitautograd_dispatch.h>
|
213 |
+
#include <ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h>
|
214 |
+
#include <ATen/ops/_weight_norm_interface_compositeexplicitautograd_dispatch.h>
|
215 |
+
#include <ATen/ops/_weight_norm_interface_backward_compositeexplicitautograd_dispatch.h>
|
216 |
+
#include <ATen/ops/abs_compositeexplicitautograd_dispatch.h>
|
217 |
+
#include <ATen/ops/add_compositeexplicitautograd_dispatch.h>
|
218 |
+
#include <ATen/ops/addr_compositeexplicitautograd_dispatch.h>
|
219 |
+
#include <ATen/ops/affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
220 |
+
#include <ATen/ops/alias_compositeexplicitautograd_dispatch.h>
|
221 |
+
#include <ATen/ops/alias_copy_compositeexplicitautograd_dispatch.h>
|
222 |
+
#include <ATen/ops/all_compositeexplicitautograd_dispatch.h>
|
223 |
+
#include <ATen/ops/allclose_compositeexplicitautograd_dispatch.h>
|
224 |
+
#include <ATen/ops/any_compositeexplicitautograd_dispatch.h>
|
225 |
+
#include <ATen/ops/arange_compositeexplicitautograd_dispatch.h>
|
226 |
+
#include <ATen/ops/argsort_compositeexplicitautograd_dispatch.h>
|
227 |
+
#include <ATen/ops/as_strided_copy_compositeexplicitautograd_dispatch.h>
|
228 |
+
#include <ATen/ops/as_strided_scatter_compositeexplicitautograd_dispatch.h>
|
229 |
+
#include <ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h>
|
230 |
+
#include <ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h>
|
231 |
+
#include <ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h>
|
232 |
+
#include <ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h>
|
233 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h>
|
234 |
+
#include <ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h>
|
235 |
+
#include <ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h>
|
236 |
+
#include <ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h>
|
237 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_compositeexplicitautograd_dispatch.h>
|
238 |
+
#include <ATen/ops/bincount_compositeexplicitautograd_dispatch.h>
|
239 |
+
#include <ATen/ops/binomial_compositeexplicitautograd_dispatch.h>
|
240 |
+
#include <ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h>
|
241 |
+
#include <ATen/ops/bitwise_left_shift_compositeexplicitautograd_dispatch.h>
|
242 |
+
#include <ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h>
|
243 |
+
#include <ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h>
|
244 |
+
#include <ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h>
|
245 |
+
#include <ATen/ops/blackman_window_compositeexplicitautograd_dispatch.h>
|
246 |
+
#include <ATen/ops/block_diag_compositeexplicitautograd_dispatch.h>
|
247 |
+
#include <ATen/ops/bucketize_compositeexplicitautograd_dispatch.h>
|
248 |
+
#include <ATen/ops/cauchy_compositeexplicitautograd_dispatch.h>
|
249 |
+
#include <ATen/ops/ccol_indices_compositeexplicitautograd_dispatch.h>
|
250 |
+
#include <ATen/ops/ccol_indices_copy_compositeexplicitautograd_dispatch.h>
|
251 |
+
#include <ATen/ops/celu_compositeexplicitautograd_dispatch.h>
|
252 |
+
#include <ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h>
|
253 |
+
#include <ATen/ops/cholesky_solve_compositeexplicitautograd_dispatch.h>
|
254 |
+
#include <ATen/ops/clone_compositeexplicitautograd_dispatch.h>
|
255 |
+
#include <ATen/ops/col_indices_compositeexplicitautograd_dispatch.h>
|
256 |
+
#include <ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h>
|
257 |
+
#include <ATen/ops/complex_compositeexplicitautograd_dispatch.h>
|
258 |
+
#include <ATen/ops/conj_physical_compositeexplicitautograd_dispatch.h>
|
259 |
+
#include <ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h>
|
260 |
+
#include <ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h>
|
261 |
+
#include <ATen/ops/conv_tbc_compositeexplicitautograd_dispatch.h>
|
262 |
+
#include <ATen/ops/convolution_compositeexplicitautograd_dispatch.h>
|
263 |
+
#include <ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h>
|
264 |
+
#include <ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h>
|
265 |
+
#include <ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h>
|
266 |
+
#include <ATen/ops/copy_compositeexplicitautograd_dispatch.h>
|
267 |
+
#include <ATen/ops/copy_sparse_to_sparse_compositeexplicitautograd_dispatch.h>
|
268 |
+
#include <ATen/ops/copysign_compositeexplicitautograd_dispatch.h>
|
269 |
+
#include <ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h>
|
270 |
+
#include <ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h>
|
271 |
+
#include <ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h>
|
272 |
+
#include <ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h>
|
273 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h>
|
274 |
+
#include <ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h>
|
275 |
+
#include <ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
276 |
+
#include <ATen/ops/cudnn_convolution_add_relu_compositeexplicitautograd_dispatch.h>
|
277 |
+
#include <ATen/ops/cudnn_convolution_relu_compositeexplicitautograd_dispatch.h>
|
278 |
+
#include <ATen/ops/cudnn_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
279 |
+
#include <ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h>
|
280 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_compositeexplicitautograd_dispatch.h>
|
281 |
+
#include <ATen/ops/cummax_compositeexplicitautograd_dispatch.h>
|
282 |
+
#include <ATen/ops/cummin_compositeexplicitautograd_dispatch.h>
|
283 |
+
#include <ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h>
|
284 |
+
#include <ATen/ops/dequantize_compositeexplicitautograd_dispatch.h>
|
285 |
+
#include <ATen/ops/detach_compositeexplicitautograd_dispatch.h>
|
286 |
+
#include <ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h>
|
287 |
+
#include <ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h>
|
288 |
+
#include <ATen/ops/diagonal_compositeexplicitautograd_dispatch.h>
|
289 |
+
#include <ATen/ops/diagonal_backward_compositeexplicitautograd_dispatch.h>
|
290 |
+
#include <ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h>
|
291 |
+
#include <ATen/ops/diagonal_scatter_compositeexplicitautograd_dispatch.h>
|
292 |
+
#include <ATen/ops/dist_compositeexplicitautograd_dispatch.h>
|
293 |
+
#include <ATen/ops/div_compositeexplicitautograd_dispatch.h>
|
294 |
+
#include <ATen/ops/dot_compositeexplicitautograd_dispatch.h>
|
295 |
+
#include <ATen/ops/embedding_compositeexplicitautograd_dispatch.h>
|
296 |
+
#include <ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h>
|
297 |
+
#include <ATen/ops/embedding_renorm_compositeexplicitautograd_dispatch.h>
|
298 |
+
#include <ATen/ops/empty_compositeexplicitautograd_dispatch.h>
|
299 |
+
#include <ATen/ops/empty_like_compositeexplicitautograd_dispatch.h>
|
300 |
+
#include <ATen/ops/empty_permuted_compositeexplicitautograd_dispatch.h>
|
301 |
+
#include <ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h>
|
302 |
+
#include <ATen/ops/empty_strided_compositeexplicitautograd_dispatch.h>
|
303 |
+
#include <ATen/ops/expand_compositeexplicitautograd_dispatch.h>
|
304 |
+
#include <ATen/ops/expand_copy_compositeexplicitautograd_dispatch.h>
|
305 |
+
#include <ATen/ops/exponential_compositeexplicitautograd_dispatch.h>
|
306 |
+
#include <ATen/ops/eye_compositeexplicitautograd_dispatch.h>
|
307 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
308 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h>
|
309 |
+
#include <ATen/ops/fft_fftfreq_compositeexplicitautograd_dispatch.h>
|
310 |
+
#include <ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h>
|
311 |
+
#include <ATen/ops/fill_compositeexplicitautograd_dispatch.h>
|
312 |
+
#include <ATen/ops/flip_compositeexplicitautograd_dispatch.h>
|
313 |
+
#include <ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h>
|
314 |
+
#include <ATen/ops/fmod_compositeexplicitautograd_dispatch.h>
|
315 |
+
#include <ATen/ops/frexp_compositeexplicitautograd_dispatch.h>
|
316 |
+
#include <ATen/ops/from_file_compositeexplicitautograd_dispatch.h>
|
317 |
+
#include <ATen/ops/full_compositeexplicitautograd_dispatch.h>
|
318 |
+
#include <ATen/ops/full_like_compositeexplicitautograd_dispatch.h>
|
319 |
+
#include <ATen/ops/geometric_compositeexplicitautograd_dispatch.h>
|
320 |
+
#include <ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h>
|
321 |
+
#include <ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h>
|
322 |
+
#include <ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h>
|
323 |
+
#include <ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h>
|
324 |
+
#include <ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h>
|
325 |
+
#include <ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h>
|
326 |
+
#include <ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h>
|
327 |
+
#include <ATen/ops/hann_window_compositeexplicitautograd_dispatch.h>
|
328 |
+
#include <ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h>
|
329 |
+
#include <ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h>
|
330 |
+
#include <ATen/ops/index_fill_compositeexplicitautograd_dispatch.h>
|
331 |
+
#include <ATen/ops/index_put_compositeexplicitautograd_dispatch.h>
|
332 |
+
#include <ATen/ops/indices_compositeexplicitautograd_dispatch.h>
|
333 |
+
#include <ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h>
|
334 |
+
#include <ATen/ops/int_repr_compositeexplicitautograd_dispatch.h>
|
335 |
+
#include <ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h>
|
336 |
+
#include <ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h>
|
337 |
+
#include <ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h>
|
338 |
+
#include <ATen/ops/isinf_compositeexplicitautograd_dispatch.h>
|
339 |
+
#include <ATen/ops/isnan_compositeexplicitautograd_dispatch.h>
|
340 |
+
#include <ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h>
|
341 |
+
#include <ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h>
|
342 |
+
#include <ATen/ops/lift_compositeexplicitautograd_dispatch.h>
|
343 |
+
#include <ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h>
|
344 |
+
#include <ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h>
|
345 |
+
#include <ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h>
|
346 |
+
#include <ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h>
|
347 |
+
#include <ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h>
|
348 |
+
#include <ATen/ops/linear_compositeexplicitautograd_dispatch.h>
|
349 |
+
#include <ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h>
|
350 |
+
#include <ATen/ops/linspace_compositeexplicitautograd_dispatch.h>
|
351 |
+
#include <ATen/ops/log_normal_compositeexplicitautograd_dispatch.h>
|
352 |
+
#include <ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h>
|
353 |
+
#include <ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h>
|
354 |
+
#include <ATen/ops/logical_and_compositeexplicitautograd_dispatch.h>
|
355 |
+
#include <ATen/ops/logical_not_compositeexplicitautograd_dispatch.h>
|
356 |
+
#include <ATen/ops/logical_or_compositeexplicitautograd_dispatch.h>
|
357 |
+
#include <ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h>
|
358 |
+
#include <ATen/ops/logspace_compositeexplicitautograd_dispatch.h>
|
359 |
+
#include <ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h>
|
360 |
+
#include <ATen/ops/lshift_compositeexplicitautograd_dispatch.h>
|
361 |
+
#include <ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h>
|
362 |
+
#include <ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h>
|
363 |
+
#include <ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h>
|
364 |
+
#include <ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h>
|
365 |
+
#include <ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h>
|
366 |
+
#include <ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
367 |
+
#include <ATen/ops/mean_compositeexplicitautograd_dispatch.h>
|
368 |
+
#include <ATen/ops/median_compositeexplicitautograd_dispatch.h>
|
369 |
+
#include <ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h>
|
370 |
+
#include <ATen/ops/miopen_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
371 |
+
#include <ATen/ops/miopen_convolution_compositeexplicitautograd_dispatch.h>
|
372 |
+
#include <ATen/ops/miopen_convolution_transpose_compositeexplicitautograd_dispatch.h>
|
373 |
+
#include <ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h>
|
374 |
+
#include <ATen/ops/miopen_rnn_compositeexplicitautograd_dispatch.h>
|
375 |
+
#include <ATen/ops/miopen_rnn_backward_compositeexplicitautograd_dispatch.h>
|
376 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
377 |
+
#include <ATen/ops/mkldnn_convolution_compositeexplicitautograd_dispatch.h>
|
378 |
+
#include <ATen/ops/mkldnn_linear_compositeexplicitautograd_dispatch.h>
|
379 |
+
#include <ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h>
|
380 |
+
#include <ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h>
|
381 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h>
|
382 |
+
#include <ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h>
|
383 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h>
|
384 |
+
#include <ATen/ops/mkldnn_max_pool3d_compositeexplicitautograd_dispatch.h>
|
385 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_compositeexplicitautograd_dispatch.h>
|
386 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_compositeexplicitautograd_dispatch.h>
|
387 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h>
|
388 |
+
#include <ATen/ops/mkldnn_rnn_layer_compositeexplicitautograd_dispatch.h>
|
389 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h>
|
390 |
+
#include <ATen/ops/mode_compositeexplicitautograd_dispatch.h>
|
391 |
+
#include <ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h>
|
392 |
+
#include <ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h>
|
393 |
+
#include <ATen/ops/mul_compositeexplicitautograd_dispatch.h>
|
394 |
+
#include <ATen/ops/mv_compositeexplicitautograd_dispatch.h>
|
395 |
+
#include <ATen/ops/mvlgamma_compositeexplicitautograd_dispatch.h>
|
396 |
+
#include <ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h>
|
397 |
+
#include <ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h>
|
398 |
+
#include <ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h>
|
399 |
+
#include <ATen/ops/native_dropout_compositeexplicitautograd_dispatch.h>
|
400 |
+
#include <ATen/ops/native_dropout_backward_compositeexplicitautograd_dispatch.h>
|
401 |
+
#include <ATen/ops/native_group_norm_compositeexplicitautograd_dispatch.h>
|
402 |
+
#include <ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h>
|
403 |
+
#include <ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h>
|
404 |
+
#include <ATen/ops/native_layer_norm_backward_compositeexplicitautograd_dispatch.h>
|
405 |
+
#include <ATen/ops/native_norm_compositeexplicitautograd_dispatch.h>
|
406 |
+
#include <ATen/ops/new_empty_compositeexplicitautograd_dispatch.h>
|
407 |
+
#include <ATen/ops/new_empty_strided_compositeexplicitautograd_dispatch.h>
|
408 |
+
#include <ATen/ops/new_full_compositeexplicitautograd_dispatch.h>
|
409 |
+
#include <ATen/ops/new_ones_compositeexplicitautograd_dispatch.h>
|
410 |
+
#include <ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h>
|
411 |
+
#include <ATen/ops/norm_compositeexplicitautograd_dispatch.h>
|
412 |
+
#include <ATen/ops/normal_compositeexplicitautograd_dispatch.h>
|
413 |
+
#include <ATen/ops/ones_compositeexplicitautograd_dispatch.h>
|
414 |
+
#include <ATen/ops/ones_like_compositeexplicitautograd_dispatch.h>
|
415 |
+
#include <ATen/ops/permute_compositeexplicitautograd_dispatch.h>
|
416 |
+
#include <ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h>
|
417 |
+
#include <ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h>
|
418 |
+
#include <ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h>
|
419 |
+
#include <ATen/ops/poisson_compositeexplicitautograd_dispatch.h>
|
420 |
+
#include <ATen/ops/polar_compositeexplicitautograd_dispatch.h>
|
421 |
+
#include <ATen/ops/polygamma_compositeexplicitautograd_dispatch.h>
|
422 |
+
#include <ATen/ops/prod_compositeexplicitautograd_dispatch.h>
|
423 |
+
#include <ATen/ops/put_compositeexplicitautograd_dispatch.h>
|
424 |
+
#include <ATen/ops/q_per_channel_scales_compositeexplicitautograd_dispatch.h>
|
425 |
+
#include <ATen/ops/q_per_channel_zero_points_compositeexplicitautograd_dispatch.h>
|
426 |
+
#include <ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h>
|
427 |
+
#include <ATen/ops/quantize_per_tensor_compositeexplicitautograd_dispatch.h>
|
428 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h>
|
429 |
+
#include <ATen/ops/quantized_batch_norm_compositeexplicitautograd_dispatch.h>
|
430 |
+
#include <ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h>
|
431 |
+
#include <ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h>
|
432 |
+
#include <ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h>
|
433 |
+
#include <ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h>
|
434 |
+
#include <ATen/ops/rand_compositeexplicitautograd_dispatch.h>
|
435 |
+
#include <ATen/ops/rand_like_compositeexplicitautograd_dispatch.h>
|
436 |
+
#include <ATen/ops/randint_compositeexplicitautograd_dispatch.h>
|
437 |
+
#include <ATen/ops/randint_like_compositeexplicitautograd_dispatch.h>
|
438 |
+
#include <ATen/ops/randn_compositeexplicitautograd_dispatch.h>
|
439 |
+
#include <ATen/ops/randn_like_compositeexplicitautograd_dispatch.h>
|
440 |
+
#include <ATen/ops/random_compositeexplicitautograd_dispatch.h>
|
441 |
+
#include <ATen/ops/randperm_compositeexplicitautograd_dispatch.h>
|
442 |
+
#include <ATen/ops/range_compositeexplicitautograd_dispatch.h>
|
443 |
+
#include <ATen/ops/relu_compositeexplicitautograd_dispatch.h>
|
444 |
+
#include <ATen/ops/remainder_compositeexplicitautograd_dispatch.h>
|
445 |
+
#include <ATen/ops/repeat_compositeexplicitautograd_dispatch.h>
|
446 |
+
#include <ATen/ops/repeat_interleave_compositeexplicitautograd_dispatch.h>
|
447 |
+
#include <ATen/ops/resize_compositeexplicitautograd_dispatch.h>
|
448 |
+
#include <ATen/ops/resize_as_compositeexplicitautograd_dispatch.h>
|
449 |
+
#include <ATen/ops/resize_as_sparse_compositeexplicitautograd_dispatch.h>
|
450 |
+
#include <ATen/ops/roll_compositeexplicitautograd_dispatch.h>
|
451 |
+
#include <ATen/ops/rot90_compositeexplicitautograd_dispatch.h>
|
452 |
+
#include <ATen/ops/row_indices_compositeexplicitautograd_dispatch.h>
|
453 |
+
#include <ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h>
|
454 |
+
#include <ATen/ops/rrelu_with_noise_backward_compositeexplicitautograd_dispatch.h>
|
455 |
+
#include <ATen/ops/rshift_compositeexplicitautograd_dispatch.h>
|
456 |
+
#include <ATen/ops/rsub_compositeexplicitautograd_dispatch.h>
|
457 |
+
#include <ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h>
|
458 |
+
#include <ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h>
|
459 |
+
#include <ATen/ops/select_compositeexplicitautograd_dispatch.h>
|
460 |
+
#include <ATen/ops/select_backward_compositeexplicitautograd_dispatch.h>
|
461 |
+
#include <ATen/ops/select_copy_compositeexplicitautograd_dispatch.h>
|
462 |
+
#include <ATen/ops/select_scatter_compositeexplicitautograd_dispatch.h>
|
463 |
+
#include <ATen/ops/set_compositeexplicitautograd_dispatch.h>
|
464 |
+
#include <ATen/ops/slice_compositeexplicitautograd_dispatch.h>
|
465 |
+
#include <ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h>
|
466 |
+
#include <ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h>
|
467 |
+
#include <ATen/ops/slice_inverse_compositeexplicitautograd_dispatch.h>
|
468 |
+
#include <ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h>
|
469 |
+
#include <ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h>
|
470 |
+
#include <ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h>
|
471 |
+
#include <ATen/ops/smooth_l1_loss_backward_compositeexplicitautograd_dispatch.h>
|
472 |
+
#include <ATen/ops/soft_margin_loss_compositeexplicitautograd_dispatch.h>
|
473 |
+
#include <ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h>
|
474 |
+
#include <ATen/ops/softmax_compositeexplicitautograd_dispatch.h>
|
475 |
+
#include <ATen/ops/sort_compositeexplicitautograd_dispatch.h>
|
476 |
+
#include <ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h>
|
477 |
+
#include <ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h>
|
478 |
+
#include <ATen/ops/sparse_mask_compositeexplicitautograd_dispatch.h>
|
479 |
+
#include <ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h>
|
480 |
+
#include <ATen/ops/sparse_resize_and_clear_compositeexplicitautograd_dispatch.h>
|
481 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
482 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
483 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
484 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
485 |
+
#include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h>
|
486 |
+
#include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautograd_dispatch.h>
|
487 |
+
#include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautograd_dispatch.h>
|
488 |
+
#include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h>
|
489 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
|
490 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
|
491 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
|
492 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
|
493 |
+
#include <ATen/ops/special_xlog1py_compositeexplicitautograd_dispatch.h>
|
494 |
+
#include <ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h>
|
495 |
+
#include <ATen/ops/split_compositeexplicitautograd_dispatch.h>
|
496 |
+
#include <ATen/ops/split_copy_compositeexplicitautograd_dispatch.h>
|
497 |
+
#include <ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h>
|
498 |
+
#include <ATen/ops/split_with_sizes_copy_compositeexplicitautograd_dispatch.h>
|
499 |
+
#include <ATen/ops/squeeze_compositeexplicitautograd_dispatch.h>
|
500 |
+
#include <ATen/ops/squeeze_copy_compositeexplicitautograd_dispatch.h>
|
501 |
+
#include <ATen/ops/stack_compositeexplicitautograd_dispatch.h>
|
502 |
+
#include <ATen/ops/std_mean_compositeexplicitautograd_dispatch.h>
|
503 |
+
#include <ATen/ops/sub_compositeexplicitautograd_dispatch.h>
|
504 |
+
#include <ATen/ops/sum_compositeexplicitautograd_dispatch.h>
|
505 |
+
#include <ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h>
|
506 |
+
#include <ATen/ops/sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
|
507 |
+
#include <ATen/ops/t_compositeexplicitautograd_dispatch.h>
|
508 |
+
#include <ATen/ops/t_copy_compositeexplicitautograd_dispatch.h>
|
509 |
+
#include <ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h>
|
510 |
+
#include <ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h>
|
511 |
+
#include <ATen/ops/trace_compositeexplicitautograd_dispatch.h>
|
512 |
+
#include <ATen/ops/transpose_compositeexplicitautograd_dispatch.h>
|
513 |
+
#include <ATen/ops/transpose_copy_compositeexplicitautograd_dispatch.h>
|
514 |
+
#include <ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h>
|
515 |
+
#include <ATen/ops/triu_indices_compositeexplicitautograd_dispatch.h>
|
516 |
+
#include <ATen/ops/unbind_compositeexplicitautograd_dispatch.h>
|
517 |
+
#include <ATen/ops/unbind_copy_compositeexplicitautograd_dispatch.h>
|
518 |
+
#include <ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h>
|
519 |
+
#include <ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h>
|
520 |
+
#include <ATen/ops/uniform_compositeexplicitautograd_dispatch.h>
|
521 |
+
#include <ATen/ops/unique_consecutive_compositeexplicitautograd_dispatch.h>
|
522 |
+
#include <ATen/ops/unique_dim_compositeexplicitautograd_dispatch.h>
|
523 |
+
#include <ATen/ops/unique_dim_consecutive_compositeexplicitautograd_dispatch.h>
|
524 |
+
#include <ATen/ops/unsafe_split_compositeexplicitautograd_dispatch.h>
|
525 |
+
#include <ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h>
|
526 |
+
#include <ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h>
|
527 |
+
#include <ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h>
|
528 |
+
#include <ATen/ops/values_compositeexplicitautograd_dispatch.h>
|
529 |
+
#include <ATen/ops/values_copy_compositeexplicitautograd_dispatch.h>
|
530 |
+
#include <ATen/ops/var_mean_compositeexplicitautograd_dispatch.h>
|
531 |
+
#include <ATen/ops/vdot_compositeexplicitautograd_dispatch.h>
|
532 |
+
#include <ATen/ops/view_compositeexplicitautograd_dispatch.h>
|
533 |
+
#include <ATen/ops/view_as_complex_copy_compositeexplicitautograd_dispatch.h>
|
534 |
+
#include <ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h>
|
535 |
+
#include <ATen/ops/view_copy_compositeexplicitautograd_dispatch.h>
|
536 |
+
#include <ATen/ops/xlogy_compositeexplicitautograd_dispatch.h>
|
537 |
+
#include <ATen/ops/zero_compositeexplicitautograd_dispatch.h>
|
538 |
+
#include <ATen/ops/zeros_compositeexplicitautograd_dispatch.h>
|
539 |
+
#include <ATen/ops/zeros_like_compositeexplicitautograd_dispatch.h>
|
540 |
+
|
541 |
+
|
542 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_compositeexplicitautogradnonfunctional_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_addmm_activation_compositeexplicitautogradnonfunctional_dispatch.h>
|
20 |
+
#include <ATen/ops/_conj_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
21 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h>
|
22 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_compositeexplicitautogradnonfunctional_dispatch.h>
|
23 |
+
#include <ATen/ops/_fw_primal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
24 |
+
#include <ATen/ops/_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
25 |
+
#include <ATen/ops/_linalg_det_compositeexplicitautogradnonfunctional_dispatch.h>
|
26 |
+
#include <ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h>
|
27 |
+
#include <ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h>
|
28 |
+
#include <ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h>
|
29 |
+
#include <ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h>
|
30 |
+
#include <ATen/ops/_log_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
|
31 |
+
#include <ATen/ops/_log_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
|
32 |
+
#include <ATen/ops/_make_dual_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
33 |
+
#include <ATen/ops/_neg_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
34 |
+
#include <ATen/ops/_nested_get_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
35 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
36 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
37 |
+
#include <ATen/ops/_reshape_alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
38 |
+
#include <ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
|
39 |
+
#include <ATen/ops/_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
|
40 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
41 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
42 |
+
#include <ATen/ops/_trilinear_compositeexplicitautogradnonfunctional_dispatch.h>
|
43 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
|
44 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
45 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
|
46 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
47 |
+
#include <ATen/ops/_upsample_nearest_exact1d_compositeexplicitautogradnonfunctional_dispatch.h>
|
48 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
49 |
+
#include <ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
50 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
51 |
+
#include <ATen/ops/_upsample_nearest_exact3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
52 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
53 |
+
#include <ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
54 |
+
#include <ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h>
|
55 |
+
#include <ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h>
|
56 |
+
#include <ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
57 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
58 |
+
#include <ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
59 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
60 |
+
#include <ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h>
|
61 |
+
#include <ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h>
|
62 |
+
#include <ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h>
|
63 |
+
#include <ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h>
|
64 |
+
#include <ATen/ops/addmv_compositeexplicitautogradnonfunctional_dispatch.h>
|
65 |
+
#include <ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
66 |
+
#include <ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h>
|
67 |
+
#include <ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h>
|
68 |
+
#include <ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h>
|
69 |
+
#include <ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h>
|
70 |
+
#include <ATen/ops/any_compositeexplicitautogradnonfunctional_dispatch.h>
|
71 |
+
#include <ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h>
|
72 |
+
#include <ATen/ops/argmin_compositeexplicitautogradnonfunctional_dispatch.h>
|
73 |
+
#include <ATen/ops/as_strided_compositeexplicitautogradnonfunctional_dispatch.h>
|
74 |
+
#include <ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
75 |
+
#include <ATen/ops/as_strided_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
|
76 |
+
#include <ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h>
|
77 |
+
#include <ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h>
|
78 |
+
#include <ATen/ops/atan_compositeexplicitautogradnonfunctional_dispatch.h>
|
79 |
+
#include <ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h>
|
80 |
+
#include <ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h>
|
81 |
+
#include <ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
82 |
+
#include <ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
83 |
+
#include <ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
84 |
+
#include <ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
85 |
+
#include <ATen/ops/baddbmm_compositeexplicitautogradnonfunctional_dispatch.h>
|
86 |
+
#include <ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h>
|
87 |
+
#include <ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h>
|
88 |
+
#include <ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h>
|
89 |
+
#include <ATen/ops/bitwise_not_compositeexplicitautogradnonfunctional_dispatch.h>
|
90 |
+
#include <ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h>
|
91 |
+
#include <ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h>
|
92 |
+
#include <ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h>
|
93 |
+
#include <ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h>
|
94 |
+
#include <ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h>
|
95 |
+
#include <ATen/ops/ccol_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
96 |
+
#include <ATen/ops/ceil_compositeexplicitautogradnonfunctional_dispatch.h>
|
97 |
+
#include <ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h>
|
98 |
+
#include <ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h>
|
99 |
+
#include <ATen/ops/clamp_min_compositeexplicitautogradnonfunctional_dispatch.h>
|
100 |
+
#include <ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
101 |
+
#include <ATen/ops/copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
102 |
+
#include <ATen/ops/copysign_compositeexplicitautogradnonfunctional_dispatch.h>
|
103 |
+
#include <ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h>
|
104 |
+
#include <ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h>
|
105 |
+
#include <ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
106 |
+
#include <ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h>
|
107 |
+
#include <ATen/ops/cumsum_compositeexplicitautogradnonfunctional_dispatch.h>
|
108 |
+
#include <ATen/ops/detach_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
109 |
+
#include <ATen/ops/diag_embed_compositeexplicitautogradnonfunctional_dispatch.h>
|
110 |
+
#include <ATen/ops/diagonal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
111 |
+
#include <ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
|
112 |
+
#include <ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h>
|
113 |
+
#include <ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h>
|
114 |
+
#include <ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h>
|
115 |
+
#include <ATen/ops/elu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
116 |
+
#include <ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h>
|
117 |
+
#include <ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h>
|
118 |
+
#include <ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h>
|
119 |
+
#include <ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h>
|
120 |
+
#include <ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h>
|
121 |
+
#include <ATen/ops/exp2_compositeexplicitautogradnonfunctional_dispatch.h>
|
122 |
+
#include <ATen/ops/expand_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
123 |
+
#include <ATen/ops/expm1_compositeexplicitautogradnonfunctional_dispatch.h>
|
124 |
+
#include <ATen/ops/floor_compositeexplicitautogradnonfunctional_dispatch.h>
|
125 |
+
#include <ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h>
|
126 |
+
#include <ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h>
|
127 |
+
#include <ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h>
|
128 |
+
#include <ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h>
|
129 |
+
#include <ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
130 |
+
#include <ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
131 |
+
#include <ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
132 |
+
#include <ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h>
|
133 |
+
#include <ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h>
|
134 |
+
#include <ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h>
|
135 |
+
#include <ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h>
|
136 |
+
#include <ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
137 |
+
#include <ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h>
|
138 |
+
#include <ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h>
|
139 |
+
#include <ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h>
|
140 |
+
#include <ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
141 |
+
#include <ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
|
142 |
+
#include <ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
143 |
+
#include <ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h>
|
144 |
+
#include <ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h>
|
145 |
+
#include <ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h>
|
146 |
+
#include <ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h>
|
147 |
+
#include <ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h>
|
148 |
+
#include <ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h>
|
149 |
+
#include <ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h>
|
150 |
+
#include <ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
151 |
+
#include <ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
|
152 |
+
#include <ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
153 |
+
#include <ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h>
|
154 |
+
#include <ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h>
|
155 |
+
#include <ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h>
|
156 |
+
#include <ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h>
|
157 |
+
#include <ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h>
|
158 |
+
#include <ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h>
|
159 |
+
#include <ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
160 |
+
#include <ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h>
|
161 |
+
#include <ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h>
|
162 |
+
#include <ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
163 |
+
#include <ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h>
|
164 |
+
#include <ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h>
|
165 |
+
#include <ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h>
|
166 |
+
#include <ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
|
167 |
+
#include <ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h>
|
168 |
+
#include <ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h>
|
169 |
+
#include <ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
|
170 |
+
#include <ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h>
|
171 |
+
#include <ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h>
|
172 |
+
#include <ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h>
|
173 |
+
#include <ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h>
|
174 |
+
#include <ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h>
|
175 |
+
#include <ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h>
|
176 |
+
#include <ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h>
|
177 |
+
#include <ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h>
|
178 |
+
#include <ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h>
|
179 |
+
#include <ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h>
|
180 |
+
#include <ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
181 |
+
#include <ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h>
|
182 |
+
#include <ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h>
|
183 |
+
#include <ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h>
|
184 |
+
#include <ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h>
|
185 |
+
#include <ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h>
|
186 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
187 |
+
#include <ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h>
|
188 |
+
#include <ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h>
|
189 |
+
#include <ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h>
|
190 |
+
#include <ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h>
|
191 |
+
#include <ATen/ops/mish_compositeexplicitautogradnonfunctional_dispatch.h>
|
192 |
+
#include <ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h>
|
193 |
+
#include <ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h>
|
194 |
+
#include <ATen/ops/mul_compositeexplicitautogradnonfunctional_dispatch.h>
|
195 |
+
#include <ATen/ops/narrow_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
196 |
+
#include <ATen/ops/ne_compositeexplicitautogradnonfunctional_dispatch.h>
|
197 |
+
#include <ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h>
|
198 |
+
#include <ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h>
|
199 |
+
#include <ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h>
|
200 |
+
#include <ATen/ops/nll_loss_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
201 |
+
#include <ATen/ops/nll_loss_forward_compositeexplicitautogradnonfunctional_dispatch.h>
|
202 |
+
#include <ATen/ops/norm_compositeexplicitautogradnonfunctional_dispatch.h>
|
203 |
+
#include <ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
204 |
+
#include <ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h>
|
205 |
+
#include <ATen/ops/pixel_unshuffle_compositeexplicitautogradnonfunctional_dispatch.h>
|
206 |
+
#include <ATen/ops/polygamma_compositeexplicitautogradnonfunctional_dispatch.h>
|
207 |
+
#include <ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h>
|
208 |
+
#include <ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h>
|
209 |
+
#include <ATen/ops/reciprocal_compositeexplicitautogradnonfunctional_dispatch.h>
|
210 |
+
#include <ATen/ops/reflection_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
|
211 |
+
#include <ATen/ops/reflection_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
212 |
+
#include <ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
213 |
+
#include <ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
214 |
+
#include <ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h>
|
215 |
+
#include <ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h>
|
216 |
+
#include <ATen/ops/replication_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
|
217 |
+
#include <ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
218 |
+
#include <ATen/ops/replication_pad2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
219 |
+
#include <ATen/ops/replication_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
220 |
+
#include <ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h>
|
221 |
+
#include <ATen/ops/row_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
222 |
+
#include <ATen/ops/rsqrt_compositeexplicitautogradnonfunctional_dispatch.h>
|
223 |
+
#include <ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h>
|
224 |
+
#include <ATen/ops/scatter_add_compositeexplicitautogradnonfunctional_dispatch.h>
|
225 |
+
#include <ATen/ops/scatter_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
|
226 |
+
#include <ATen/ops/select_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
227 |
+
#include <ATen/ops/select_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
228 |
+
#include <ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
|
229 |
+
#include <ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h>
|
230 |
+
#include <ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
|
231 |
+
#include <ATen/ops/sigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
232 |
+
#include <ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h>
|
233 |
+
#include <ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h>
|
234 |
+
#include <ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h>
|
235 |
+
#include <ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
236 |
+
#include <ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h>
|
237 |
+
#include <ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h>
|
238 |
+
#include <ATen/ops/sinh_compositeexplicitautogradnonfunctional_dispatch.h>
|
239 |
+
#include <ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
240 |
+
#include <ATen/ops/slice_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
|
241 |
+
#include <ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
242 |
+
#include <ATen/ops/smooth_l1_loss_compositeexplicitautogradnonfunctional_dispatch.h>
|
243 |
+
#include <ATen/ops/softplus_compositeexplicitautogradnonfunctional_dispatch.h>
|
244 |
+
#include <ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
245 |
+
#include <ATen/ops/softshrink_compositeexplicitautogradnonfunctional_dispatch.h>
|
246 |
+
#include <ATen/ops/softshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
247 |
+
#include <ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h>
|
248 |
+
#include <ATen/ops/special_airy_ai_compositeexplicitautogradnonfunctional_dispatch.h>
|
249 |
+
#include <ATen/ops/special_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
|
250 |
+
#include <ATen/ops/special_bessel_j1_compositeexplicitautogradnonfunctional_dispatch.h>
|
251 |
+
#include <ATen/ops/special_bessel_y0_compositeexplicitautogradnonfunctional_dispatch.h>
|
252 |
+
#include <ATen/ops/special_bessel_y1_compositeexplicitautogradnonfunctional_dispatch.h>
|
253 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
|
254 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
|
255 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
|
256 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
|
257 |
+
#include <ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h>
|
258 |
+
#include <ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h>
|
259 |
+
#include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h>
|
260 |
+
#include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautogradnonfunctional_dispatch.h>
|
261 |
+
#include <ATen/ops/special_i0e_compositeexplicitautogradnonfunctional_dispatch.h>
|
262 |
+
#include <ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h>
|
263 |
+
#include <ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h>
|
264 |
+
#include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautogradnonfunctional_dispatch.h>
|
265 |
+
#include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautogradnonfunctional_dispatch.h>
|
266 |
+
#include <ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h>
|
267 |
+
#include <ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h>
|
268 |
+
#include <ATen/ops/special_modified_bessel_i1_compositeexplicitautogradnonfunctional_dispatch.h>
|
269 |
+
#include <ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
|
270 |
+
#include <ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
|
271 |
+
#include <ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h>
|
272 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
|
273 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
|
274 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
|
275 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
|
276 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
|
277 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
|
278 |
+
#include <ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
|
279 |
+
#include <ATen/ops/special_xlog1py_compositeexplicitautogradnonfunctional_dispatch.h>
|
280 |
+
#include <ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h>
|
281 |
+
#include <ATen/ops/split_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
282 |
+
#include <ATen/ops/split_with_sizes_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
283 |
+
#include <ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h>
|
284 |
+
#include <ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
285 |
+
#include <ATen/ops/sub_compositeexplicitautogradnonfunctional_dispatch.h>
|
286 |
+
#include <ATen/ops/sum_compositeexplicitautogradnonfunctional_dispatch.h>
|
287 |
+
#include <ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
288 |
+
#include <ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h>
|
289 |
+
#include <ATen/ops/tanh_compositeexplicitautogradnonfunctional_dispatch.h>
|
290 |
+
#include <ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
291 |
+
#include <ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h>
|
292 |
+
#include <ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
293 |
+
#include <ATen/ops/topk_compositeexplicitautogradnonfunctional_dispatch.h>
|
294 |
+
#include <ATen/ops/transpose_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
295 |
+
#include <ATen/ops/triangular_solve_compositeexplicitautogradnonfunctional_dispatch.h>
|
296 |
+
#include <ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h>
|
297 |
+
#include <ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h>
|
298 |
+
#include <ATen/ops/trunc_compositeexplicitautogradnonfunctional_dispatch.h>
|
299 |
+
#include <ATen/ops/unbind_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
300 |
+
#include <ATen/ops/unfold_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
301 |
+
#include <ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
302 |
+
#include <ATen/ops/upsample_bicubic2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
303 |
+
#include <ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
304 |
+
#include <ATen/ops/upsample_bilinear2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
305 |
+
#include <ATen/ops/upsample_bilinear2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
306 |
+
#include <ATen/ops/upsample_linear1d_compositeexplicitautogradnonfunctional_dispatch.h>
|
307 |
+
#include <ATen/ops/upsample_linear1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
308 |
+
#include <ATen/ops/upsample_nearest1d_compositeexplicitautogradnonfunctional_dispatch.h>
|
309 |
+
#include <ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
310 |
+
#include <ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h>
|
311 |
+
#include <ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
312 |
+
#include <ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
313 |
+
#include <ATen/ops/upsample_nearest3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
314 |
+
#include <ATen/ops/upsample_trilinear3d_compositeexplicitautogradnonfunctional_dispatch.h>
|
315 |
+
#include <ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
|
316 |
+
#include <ATen/ops/values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
317 |
+
#include <ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
318 |
+
#include <ATen/ops/view_as_real_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
319 |
+
#include <ATen/ops/view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
|
320 |
+
#include <ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h>
|
321 |
+
|
322 |
+
|
323 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/Config.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's
|
4 |
+
// obvious if you forgot to include Config.h
|
5 |
+
// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
|
6 |
+
//
|
7 |
+
// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h
|
8 |
+
|
9 |
+
#define AT_MKLDNN_ENABLED() 1
|
10 |
+
#define AT_MKLDNN_ACL_ENABLED() 0
|
11 |
+
#define AT_MKL_ENABLED() 1
|
12 |
+
#define AT_MKL_SEQUENTIAL() 0
|
13 |
+
#define AT_POCKETFFT_ENABLED() 0
|
14 |
+
#define AT_NNPACK_ENABLED() 1
|
15 |
+
#define CAFFE2_STATIC_LINK_CUDA() 0
|
16 |
+
#define AT_BUILD_WITH_BLAS() 1
|
17 |
+
#define AT_BUILD_WITH_LAPACK() 1
|
18 |
+
#define AT_PARALLEL_OPENMP 1
|
19 |
+
#define AT_PARALLEL_NATIVE 0
|
20 |
+
#define AT_PARALLEL_NATIVE_TBB 0
|
21 |
+
#define AT_BLAS_F2C() 0
|
22 |
+
#define AT_BLAS_USE_CBLAS_DOT() 0
|
venv/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceType.h>
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
|
6 |
+
#include <ATen/detail/MTIAHooksInterface.h>
|
7 |
+
#include <optional>
|
8 |
+
|
9 |
+
// This file defines the top level Accelerator concept for PyTorch.
|
10 |
+
// A device is an accelerator per the definition here if:
|
11 |
+
// - It is mutually exclusive with all other accelerators
|
12 |
+
// - It performs asynchronous compute via a Stream/Event system
|
13 |
+
// - It provides a set of common APIs as defined by AcceleratorHooksInterface
|
14 |
+
//
|
15 |
+
// As of today, accelerator devices are (in no particular order):
|
16 |
+
// CUDA, MTIA, PrivateUse1
|
17 |
+
// We want to add once all the proper APIs are supported and tested:
|
18 |
+
// HIP, MPS, XPU
|
19 |
+
|
20 |
+
namespace at {
|
21 |
+
|
22 |
+
// Ensures that only one accelerator is available (at
|
23 |
+
// compile time if possible) and return it.
|
24 |
+
// When checked is true, the returned optional always has a value.
|
25 |
+
TORCH_API std::optional<c10::DeviceType> getAccelerator(bool checked = false);
|
26 |
+
|
27 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/IListRef.h>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
#include <c10/core/DeviceGuard.h>
|
6 |
+
#include <c10/core/ScalarType.h> // TensorList whyyyyy
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
// Are you here because you're wondering why DeviceGuard(tensor) no
|
11 |
+
// longer works? For code organization reasons, we have temporarily(?)
|
12 |
+
// removed this constructor from DeviceGuard. The new way to
|
13 |
+
// spell it is:
|
14 |
+
//
|
15 |
+
// OptionalDeviceGuard guard(device_of(tensor));
|
16 |
+
|
17 |
+
/// Return the Device of a Tensor, if the Tensor is defined.
|
18 |
+
inline c10::optional<Device> device_of(const Tensor& t) {
|
19 |
+
if (t.defined()) {
|
20 |
+
return c10::make_optional(t.device());
|
21 |
+
} else {
|
22 |
+
return c10::nullopt;
|
23 |
+
}
|
24 |
+
}
|
25 |
+
|
26 |
+
inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
|
27 |
+
return t.has_value() ? device_of(t.value()) : c10::nullopt;
|
28 |
+
}
|
29 |
+
|
30 |
+
/// Return the Device of a TensorList, if the list is non-empty and
|
31 |
+
/// the first Tensor is defined. (This function implicitly assumes
|
32 |
+
/// that all tensors in the list have the same device.)
|
33 |
+
inline c10::optional<Device> device_of(ITensorListRef t) {
|
34 |
+
if (!t.empty()) {
|
35 |
+
return device_of(t.front());
|
36 |
+
} else {
|
37 |
+
return c10::nullopt;
|
38 |
+
}
|
39 |
+
}
|
40 |
+
|
41 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/DimVector.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/DimVector.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/Dimname.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <ATen/core/Dimname.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/DynamicLibrary.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Utils.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
|
7 |
+
namespace c10 {
|
8 |
+
|
9 |
+
class DynamicLibraryError : public Error {
|
10 |
+
using Error::Error;
|
11 |
+
};
|
12 |
+
|
13 |
+
} // namespace c10
|
14 |
+
|
15 |
+
namespace at {
|
16 |
+
|
17 |
+
struct DynamicLibrary {
|
18 |
+
AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary);
|
19 |
+
|
20 |
+
TORCH_API DynamicLibrary(
|
21 |
+
const char* name,
|
22 |
+
const char* alt_name = nullptr,
|
23 |
+
bool leak_handle = false);
|
24 |
+
|
25 |
+
TORCH_API void* sym(const char* name);
|
26 |
+
|
27 |
+
TORCH_API ~DynamicLibrary();
|
28 |
+
|
29 |
+
private:
|
30 |
+
bool leak_handle;
|
31 |
+
void* handle = nullptr;
|
32 |
+
};
|
33 |
+
|
34 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/TensorBase.h>
|
3 |
+
|
4 |
+
namespace at::detail {
|
5 |
+
|
6 |
+
inline void check_size_nonnegative(ArrayRef<int64_t> size) {
|
7 |
+
for (const auto& x : size) {
|
8 |
+
TORCH_CHECK(
|
9 |
+
x >= 0,
|
10 |
+
"Trying to create tensor with negative dimension ",
|
11 |
+
x,
|
12 |
+
": ",
|
13 |
+
size);
|
14 |
+
}
|
15 |
+
}
|
16 |
+
|
17 |
+
inline void check_size_nonnegative(ArrayRef<c10::SymInt> size) {
|
18 |
+
for (const auto& x : size) {
|
19 |
+
TORCH_CHECK(
|
20 |
+
x.expect_size(__FILE__, __LINE__),
|
21 |
+
"Trying to create tensor with negative dimension ",
|
22 |
+
x,
|
23 |
+
": ",
|
24 |
+
size);
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
TORCH_API size_t computeStorageNbytesContiguous(
|
29 |
+
IntArrayRef sizes,
|
30 |
+
size_t itemsize,
|
31 |
+
size_t storage_offset = 0);
|
32 |
+
TORCH_API SymInt computeStorageNbytesContiguous(
|
33 |
+
SymIntArrayRef sizes,
|
34 |
+
const SymInt& itemsize,
|
35 |
+
const SymInt& storage_offset = 0);
|
36 |
+
TORCH_API size_t computeStorageNbytes(
|
37 |
+
IntArrayRef sizes,
|
38 |
+
IntArrayRef strides,
|
39 |
+
size_t itemsize,
|
40 |
+
size_t storage_offset = 0);
|
41 |
+
TORCH_API SymInt computeStorageNbytes(
|
42 |
+
SymIntArrayRef sizes,
|
43 |
+
SymIntArrayRef strides,
|
44 |
+
const SymInt& itemsize,
|
45 |
+
const SymInt& storage_offset = 0);
|
46 |
+
|
47 |
+
TORCH_API TensorBase empty_generic(
|
48 |
+
IntArrayRef size,
|
49 |
+
c10::Allocator* allocator,
|
50 |
+
c10::DispatchKeySet ks,
|
51 |
+
ScalarType scalar_type,
|
52 |
+
c10::optional<c10::MemoryFormat> memory_format_opt);
|
53 |
+
|
54 |
+
TORCH_API TensorBase empty_strided_generic(
|
55 |
+
IntArrayRef size,
|
56 |
+
IntArrayRef stride,
|
57 |
+
c10::Allocator* allocator,
|
58 |
+
c10::DispatchKeySet ks,
|
59 |
+
ScalarType scalar_type);
|
60 |
+
|
61 |
+
TORCH_API TensorBase empty_strided_symint_generic(
|
62 |
+
SymIntArrayRef size,
|
63 |
+
SymIntArrayRef stride,
|
64 |
+
c10::Allocator* allocator,
|
65 |
+
c10::DispatchKeySet ks,
|
66 |
+
ScalarType scalar_type);
|
67 |
+
|
68 |
+
TORCH_API TensorBase empty_cpu(
|
69 |
+
IntArrayRef size,
|
70 |
+
ScalarType dtype,
|
71 |
+
bool pin_memory = false,
|
72 |
+
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
73 |
+
|
74 |
+
TORCH_API TensorBase empty_cpu(
|
75 |
+
IntArrayRef size,
|
76 |
+
c10::optional<ScalarType> dtype_opt,
|
77 |
+
c10::optional<Layout> layout_opt,
|
78 |
+
c10::optional<Device> device_opt,
|
79 |
+
c10::optional<bool> pin_memory_opt,
|
80 |
+
c10::optional<c10::MemoryFormat> memory_format_opt);
|
81 |
+
|
82 |
+
TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options);
|
83 |
+
|
84 |
+
TORCH_API TensorBase empty_strided_cpu(
|
85 |
+
IntArrayRef size,
|
86 |
+
IntArrayRef stride,
|
87 |
+
ScalarType dtype,
|
88 |
+
bool pin_memory = false);
|
89 |
+
|
90 |
+
TORCH_API TensorBase empty_strided_cpu(
|
91 |
+
IntArrayRef size,
|
92 |
+
IntArrayRef stride,
|
93 |
+
c10::optional<ScalarType> dtype_opt,
|
94 |
+
c10::optional<Layout> layout_opt,
|
95 |
+
c10::optional<Device> device_opt,
|
96 |
+
c10::optional<bool> pin_memory_opt);
|
97 |
+
|
98 |
+
TORCH_API TensorBase empty_strided_cpu(
|
99 |
+
IntArrayRef size,
|
100 |
+
IntArrayRef stride,
|
101 |
+
const TensorOptions& options);
|
102 |
+
|
103 |
+
TORCH_API TensorBase empty_meta(
|
104 |
+
IntArrayRef size,
|
105 |
+
ScalarType dtype,
|
106 |
+
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
107 |
+
|
108 |
+
TORCH_API TensorBase empty_meta(
|
109 |
+
IntArrayRef size,
|
110 |
+
c10::optional<ScalarType> dtype_opt,
|
111 |
+
c10::optional<Layout> layout_opt,
|
112 |
+
c10::optional<Device> device_opt,
|
113 |
+
c10::optional<bool> pin_memory_opt,
|
114 |
+
c10::optional<c10::MemoryFormat> memory_format_opt);
|
115 |
+
|
116 |
+
TORCH_API TensorBase empty_symint_meta(
|
117 |
+
SymIntArrayRef size,
|
118 |
+
c10::optional<ScalarType> dtype_opt,
|
119 |
+
c10::optional<Layout> layout_opt,
|
120 |
+
c10::optional<Device> device_opt,
|
121 |
+
c10::optional<bool> pin_memory_opt,
|
122 |
+
c10::optional<c10::MemoryFormat> memory_format_opt);
|
123 |
+
|
124 |
+
TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
|
125 |
+
|
126 |
+
TORCH_API TensorBase
|
127 |
+
empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype);
|
128 |
+
|
129 |
+
TORCH_API TensorBase empty_strided_meta(
|
130 |
+
IntArrayRef size,
|
131 |
+
IntArrayRef stride,
|
132 |
+
c10::optional<ScalarType> dtype_opt,
|
133 |
+
c10::optional<Layout> layout_opt,
|
134 |
+
c10::optional<Device> device_opt,
|
135 |
+
c10::optional<bool> pin_memory_opt);
|
136 |
+
|
137 |
+
TORCH_API TensorBase empty_strided_meta(
|
138 |
+
IntArrayRef size,
|
139 |
+
IntArrayRef stride,
|
140 |
+
const TensorOptions& options);
|
141 |
+
|
142 |
+
TORCH_API TensorBase empty_strided_symint_meta(
|
143 |
+
SymIntArrayRef size,
|
144 |
+
SymIntArrayRef stride,
|
145 |
+
ScalarType dtype);
|
146 |
+
|
147 |
+
TORCH_API TensorBase empty_strided_symint_meta(
|
148 |
+
SymIntArrayRef size,
|
149 |
+
SymIntArrayRef stride,
|
150 |
+
c10::optional<ScalarType> dtype_opt,
|
151 |
+
c10::optional<Layout> layout_opt,
|
152 |
+
c10::optional<Device> device_opt,
|
153 |
+
c10::optional<bool> pin_memory_opt);
|
154 |
+
|
155 |
+
TORCH_API TensorBase empty_strided_symint_meta(
|
156 |
+
SymIntArrayRef size,
|
157 |
+
SymIntArrayRef stride,
|
158 |
+
const TensorOptions& options);
|
159 |
+
|
160 |
+
} // namespace at::detail
|
venv/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h
ADDED
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
4 |
+
#include <ATen/Functions.h>
|
5 |
+
#else
|
6 |
+
#include <ATen/ops/view.h>
|
7 |
+
#include <ATen/ops/view_copy.h>
|
8 |
+
#endif
|
9 |
+
|
10 |
+
#include <ATen/Tensor.h>
|
11 |
+
#include <ATen/core/DimVector.h>
|
12 |
+
#include <c10/util/Exception.h>
|
13 |
+
#include <c10/util/MaybeOwned.h>
|
14 |
+
#include <c10/util/irange.h>
|
15 |
+
|
16 |
+
#include <functional>
|
17 |
+
#include <sstream>
|
18 |
+
#include <tuple>
|
19 |
+
#include <utility>
|
20 |
+
|
21 |
+
namespace at {
|
22 |
+
|
23 |
+
TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
24 |
+
TORCH_API std::vector<SymInt> infer_size_symint(
|
25 |
+
SymIntArrayRef a,
|
26 |
+
SymIntArrayRef b);
|
27 |
+
TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b);
|
28 |
+
TORCH_API SymDimVector
|
29 |
+
infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b);
|
30 |
+
|
31 |
+
// Named type instead of a pair/tuple so that we can be sure to
|
32 |
+
// construct the vectors in place and get NRVO.
|
33 |
+
template <typename Container>
|
34 |
+
struct InferExpandGeometryResult {
|
35 |
+
Container sizes;
|
36 |
+
Container strides;
|
37 |
+
explicit InferExpandGeometryResult(size_t ndim)
|
38 |
+
: sizes(ndim), strides(ndim) {}
|
39 |
+
explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim)
|
40 |
+
: sizes(sizes_.begin(), sizes_.end()), strides(ndim) {}
|
41 |
+
};
|
42 |
+
|
43 |
+
TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
44 |
+
inferExpandGeometry(
|
45 |
+
IntArrayRef tensor_sizes,
|
46 |
+
IntArrayRef tensor_strides,
|
47 |
+
IntArrayRef sizes);
|
48 |
+
|
49 |
+
TORCH_API InferExpandGeometryResult<DimVector> inferExpandGeometry_dimvector(
|
50 |
+
IntArrayRef tensor_sizes,
|
51 |
+
IntArrayRef tensor_strides,
|
52 |
+
IntArrayRef sizes);
|
53 |
+
|
54 |
+
TORCH_API std::vector<int64_t> infer_dense_strides(
|
55 |
+
IntArrayRef tensor_sizes,
|
56 |
+
IntArrayRef tensor_strides);
|
57 |
+
|
58 |
+
// True if input shapes are expandable
|
59 |
+
// NOTE: infer_size did a similar check, please keep them sync if change is
|
60 |
+
// needed
|
61 |
+
inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) {
|
62 |
+
size_t ndim1 = shape1.size();
|
63 |
+
size_t ndim2 = shape2.size();
|
64 |
+
size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2;
|
65 |
+
|
66 |
+
for (int64_t i = static_cast<int64_t>(ndim) - 1; i >= 0; --i) {
|
67 |
+
if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 ||
|
68 |
+
shape2[ndim2] == 1) {
|
69 |
+
continue;
|
70 |
+
}
|
71 |
+
return false;
|
72 |
+
}
|
73 |
+
return true;
|
74 |
+
}
|
75 |
+
|
76 |
+
// avoid copy-construction of Tensor by using a reference_wrapper.
|
77 |
+
inline void check_defined(
|
78 |
+
std::initializer_list<std::reference_wrapper<const Tensor>> tensors,
|
79 |
+
const char* api_name) {
|
80 |
+
for (auto& t : tensors) {
|
81 |
+
if (!t.get().defined()) {
|
82 |
+
AT_ERROR(api_name, "(...) called with an undefined Tensor");
|
83 |
+
}
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
// NOTE [ ExpandUtils Borrowing ]
|
88 |
+
//
|
89 |
+
// Functions in ExpandUtils return `c10::MaybeOwned<Tensor>` because
|
90 |
+
// expansion may not actually be needed, in which case we can improve
|
91 |
+
// efficiency by returning
|
92 |
+
// `c10::MaybeOwned<Tensor>::borrowed(to_expand)`. However, this means
|
93 |
+
// that you need to be careful: the returned `c10::MaybeOwned<Tensor>`
|
94 |
+
// must not outlive the original `Tensor` object that `to_expand`
|
95 |
+
// referred to! The deleted rvalue reference overloads of these
|
96 |
+
// functions help with this by preventing trivial use of a temporary
|
97 |
+
// resulting from a function call, but it is still possible to make a
|
98 |
+
// mistake.
|
99 |
+
|
100 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
101 |
+
const Tensor& tensor,
|
102 |
+
const Tensor& to_expand) {
|
103 |
+
if (tensor.sym_sizes().equals(to_expand.sym_sizes())) {
|
104 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
105 |
+
}
|
106 |
+
return c10::MaybeOwned<Tensor>::owned(
|
107 |
+
to_expand.expand_symint(tensor.sym_sizes()));
|
108 |
+
}
|
109 |
+
|
110 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
111 |
+
const Tensor& tensor,
|
112 |
+
Tensor&& to_expand) = delete;
|
113 |
+
|
114 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
115 |
+
const Tensor& tensor,
|
116 |
+
const Tensor& to_expand,
|
117 |
+
const char* api_name) {
|
118 |
+
check_defined({tensor, to_expand}, api_name);
|
119 |
+
return expand_inplace(tensor, to_expand);
|
120 |
+
}
|
121 |
+
|
122 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
123 |
+
const Tensor& tensor,
|
124 |
+
Tensor&& to_expand,
|
125 |
+
const char* api_name) = delete;
|
126 |
+
|
127 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
128 |
+
expand_inplace(
|
129 |
+
const Tensor& tensor,
|
130 |
+
const Tensor& to_expand1,
|
131 |
+
const Tensor& to_expand2) {
|
132 |
+
if (tensor.sizes().equals(to_expand1.sizes()) &&
|
133 |
+
tensor.sizes().equals((to_expand2.sizes()))) {
|
134 |
+
return std::make_tuple(
|
135 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
136 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
137 |
+
}
|
138 |
+
|
139 |
+
return std::make_tuple(
|
140 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(tensor.sizes())),
|
141 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(tensor.sizes())));
|
142 |
+
}
|
143 |
+
|
144 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
145 |
+
expand_inplace(
|
146 |
+
const Tensor& tensor,
|
147 |
+
Tensor&& to_expand1,
|
148 |
+
const Tensor& to_expand2) = delete;
|
149 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
150 |
+
expand_inplace(
|
151 |
+
const Tensor& tensor,
|
152 |
+
const Tensor& to_expand1,
|
153 |
+
Tensor&& to_expand2) = delete;
|
154 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
155 |
+
expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) =
|
156 |
+
delete;
|
157 |
+
|
158 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
159 |
+
expand_inplace(
|
160 |
+
const Tensor& tensor,
|
161 |
+
const Tensor& to_expand1,
|
162 |
+
const Tensor& to_expand2,
|
163 |
+
const char* api_name) {
|
164 |
+
check_defined({tensor, to_expand1, to_expand2}, api_name);
|
165 |
+
return expand_inplace(tensor, to_expand1, to_expand2);
|
166 |
+
}
|
167 |
+
|
168 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
169 |
+
expand_inplace(
|
170 |
+
const Tensor& tensor,
|
171 |
+
Tensor&& to_expand1,
|
172 |
+
const Tensor& to_expand2,
|
173 |
+
const char* api_name) = delete;
|
174 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
175 |
+
expand_inplace(
|
176 |
+
const Tensor& tensor,
|
177 |
+
const Tensor& to_expand1,
|
178 |
+
Tensor&& to_expand2,
|
179 |
+
const char* api_name) = delete;
|
180 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
181 |
+
expand_inplace(
|
182 |
+
const Tensor& tensor,
|
183 |
+
Tensor&& to_expand1,
|
184 |
+
Tensor&& to_expand2,
|
185 |
+
const char* api_name) = delete;
|
186 |
+
|
187 |
+
// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation.
|
188 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
189 |
+
expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) {
|
190 |
+
auto s1 = to_expand1.sym_sizes();
|
191 |
+
auto s2 = to_expand2.sym_sizes();
|
192 |
+
if (s1.equals(s2)) {
|
193 |
+
return std::make_tuple(
|
194 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
195 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
196 |
+
}
|
197 |
+
|
198 |
+
auto expanded_size = infer_size_symdimvector(s1, s2);
|
199 |
+
return std::make_tuple(
|
200 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand_symint(expanded_size)),
|
201 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand_symint(expanded_size)));
|
202 |
+
}
|
203 |
+
|
204 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
205 |
+
expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete;
|
206 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
207 |
+
expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete;
|
208 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
209 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete;
|
210 |
+
|
211 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
212 |
+
expand_outplace(
|
213 |
+
const Tensor& to_expand1,
|
214 |
+
const Tensor& to_expand2,
|
215 |
+
const char* api_name) {
|
216 |
+
check_defined({to_expand1, to_expand2}, api_name);
|
217 |
+
return expand_outplace(to_expand1, to_expand2);
|
218 |
+
}
|
219 |
+
|
220 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
221 |
+
expand_outplace(
|
222 |
+
Tensor&& to_expand1,
|
223 |
+
const Tensor& to_expand2,
|
224 |
+
const char* api_name) = delete;
|
225 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
226 |
+
expand_outplace(
|
227 |
+
const Tensor& to_expand1,
|
228 |
+
Tensor&& to_expand2,
|
229 |
+
const char* api_name) = delete;
|
230 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
231 |
+
expand_outplace(
|
232 |
+
Tensor&& to_expand1,
|
233 |
+
Tensor&& to_expand2,
|
234 |
+
const char* api_name) = delete;
|
235 |
+
|
236 |
+
inline std::tuple<
|
237 |
+
c10::MaybeOwned<Tensor>,
|
238 |
+
c10::MaybeOwned<Tensor>,
|
239 |
+
c10::MaybeOwned<Tensor>>
|
240 |
+
expand_outplace(
|
241 |
+
const Tensor& to_expand1,
|
242 |
+
const Tensor& to_expand2,
|
243 |
+
const Tensor& to_expand3) {
|
244 |
+
if (to_expand1.sizes().equals(to_expand2.sizes()) &&
|
245 |
+
to_expand1.sizes().equals(to_expand3.sizes())) {
|
246 |
+
return std::make_tuple(
|
247 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
248 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2),
|
249 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand3));
|
250 |
+
}
|
251 |
+
|
252 |
+
auto expanded_size12 =
|
253 |
+
infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes());
|
254 |
+
auto expanded_size =
|
255 |
+
infer_size_dimvector(expanded_size12, to_expand3.sizes());
|
256 |
+
return std::make_tuple(
|
257 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(expanded_size)),
|
258 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(expanded_size)),
|
259 |
+
c10::MaybeOwned<Tensor>::owned(to_expand3.expand(expanded_size)));
|
260 |
+
}
|
261 |
+
|
262 |
+
inline std::tuple<
|
263 |
+
c10::MaybeOwned<Tensor>,
|
264 |
+
c10::MaybeOwned<Tensor>,
|
265 |
+
c10::MaybeOwned<Tensor>>
|
266 |
+
expand_outplace(
|
267 |
+
Tensor&& to_expand1,
|
268 |
+
const Tensor& to_expand2,
|
269 |
+
const Tensor& to_expand3) = delete;
|
270 |
+
inline std::tuple<
|
271 |
+
c10::MaybeOwned<Tensor>,
|
272 |
+
c10::MaybeOwned<Tensor>,
|
273 |
+
c10::MaybeOwned<Tensor>>
|
274 |
+
expand_outplace(
|
275 |
+
const Tensor& to_expand1,
|
276 |
+
Tensor&& to_expand2,
|
277 |
+
const Tensor& to_expand3) = delete;
|
278 |
+
inline std::tuple<
|
279 |
+
c10::MaybeOwned<Tensor>,
|
280 |
+
c10::MaybeOwned<Tensor>,
|
281 |
+
c10::MaybeOwned<Tensor>>
|
282 |
+
expand_outplace(
|
283 |
+
Tensor&& to_expand1,
|
284 |
+
Tensor&& to_expand2,
|
285 |
+
const Tensor& to_expand3) = delete;
|
286 |
+
inline std::tuple<
|
287 |
+
c10::MaybeOwned<Tensor>,
|
288 |
+
c10::MaybeOwned<Tensor>,
|
289 |
+
c10::MaybeOwned<Tensor>>
|
290 |
+
expand_outplace(
|
291 |
+
const Tensor& to_expand1,
|
292 |
+
const Tensor& to_expand2,
|
293 |
+
Tensor&& to_expand3) = delete;
|
294 |
+
inline std::tuple<
|
295 |
+
c10::MaybeOwned<Tensor>,
|
296 |
+
c10::MaybeOwned<Tensor>,
|
297 |
+
c10::MaybeOwned<Tensor>>
|
298 |
+
expand_outplace(
|
299 |
+
Tensor&& to_expand1,
|
300 |
+
const Tensor& to_expand2,
|
301 |
+
Tensor&& to_expand3) = delete;
|
302 |
+
inline std::tuple<
|
303 |
+
c10::MaybeOwned<Tensor>,
|
304 |
+
c10::MaybeOwned<Tensor>,
|
305 |
+
c10::MaybeOwned<Tensor>>
|
306 |
+
expand_outplace(
|
307 |
+
const Tensor& to_expand1,
|
308 |
+
Tensor&& to_expand2,
|
309 |
+
Tensor&& to_expand3) = delete;
|
310 |
+
inline std::tuple<
|
311 |
+
c10::MaybeOwned<Tensor>,
|
312 |
+
c10::MaybeOwned<Tensor>,
|
313 |
+
c10::MaybeOwned<Tensor>>
|
314 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) =
|
315 |
+
delete;
|
316 |
+
|
317 |
+
inline std::tuple<
|
318 |
+
c10::MaybeOwned<Tensor>,
|
319 |
+
c10::MaybeOwned<Tensor>,
|
320 |
+
c10::MaybeOwned<Tensor>>
|
321 |
+
expand_outplace(
|
322 |
+
const Tensor& to_expand1,
|
323 |
+
const Tensor& to_expand2,
|
324 |
+
const Tensor& to_expand3,
|
325 |
+
const char* api_name) {
|
326 |
+
check_defined({to_expand1, to_expand2, to_expand3}, api_name);
|
327 |
+
return expand_outplace(to_expand1, to_expand2, to_expand3);
|
328 |
+
}
|
329 |
+
|
330 |
+
inline std::tuple<
|
331 |
+
c10::MaybeOwned<Tensor>,
|
332 |
+
c10::MaybeOwned<Tensor>,
|
333 |
+
c10::MaybeOwned<Tensor>>
|
334 |
+
expand_outplace(
|
335 |
+
Tensor&& to_expand1,
|
336 |
+
const Tensor& to_expand2,
|
337 |
+
const Tensor& to_expand3,
|
338 |
+
const char* api_name) = delete;
|
339 |
+
inline std::tuple<
|
340 |
+
c10::MaybeOwned<Tensor>,
|
341 |
+
c10::MaybeOwned<Tensor>,
|
342 |
+
c10::MaybeOwned<Tensor>>
|
343 |
+
expand_outplace(
|
344 |
+
const Tensor& to_expand1,
|
345 |
+
Tensor&& to_expand2,
|
346 |
+
const Tensor& to_expand3,
|
347 |
+
const char* api_name) = delete;
|
348 |
+
inline std::tuple<
|
349 |
+
c10::MaybeOwned<Tensor>,
|
350 |
+
c10::MaybeOwned<Tensor>,
|
351 |
+
c10::MaybeOwned<Tensor>>
|
352 |
+
expand_outplace(
|
353 |
+
Tensor&& to_expand1,
|
354 |
+
Tensor&& to_expand2,
|
355 |
+
const Tensor& to_expand3,
|
356 |
+
const char* api_name) = delete;
|
357 |
+
inline std::tuple<
|
358 |
+
c10::MaybeOwned<Tensor>,
|
359 |
+
c10::MaybeOwned<Tensor>,
|
360 |
+
c10::MaybeOwned<Tensor>>
|
361 |
+
expand_outplace(
|
362 |
+
const Tensor& to_expand1,
|
363 |
+
const Tensor& to_expand2,
|
364 |
+
Tensor&& to_expand3,
|
365 |
+
const char* api_name) = delete;
|
366 |
+
inline std::tuple<
|
367 |
+
c10::MaybeOwned<Tensor>,
|
368 |
+
c10::MaybeOwned<Tensor>,
|
369 |
+
c10::MaybeOwned<Tensor>>
|
370 |
+
expand_outplace(
|
371 |
+
Tensor&& to_expand1,
|
372 |
+
const Tensor& to_expand2,
|
373 |
+
Tensor&& to_expand3,
|
374 |
+
const char* api_name) = delete;
|
375 |
+
inline std::tuple<
|
376 |
+
c10::MaybeOwned<Tensor>,
|
377 |
+
c10::MaybeOwned<Tensor>,
|
378 |
+
c10::MaybeOwned<Tensor>>
|
379 |
+
expand_outplace(
|
380 |
+
const Tensor& to_expand1,
|
381 |
+
Tensor&& to_expand2,
|
382 |
+
Tensor&& to_expand3,
|
383 |
+
const char* api_name) = delete;
|
384 |
+
inline std::tuple<
|
385 |
+
c10::MaybeOwned<Tensor>,
|
386 |
+
c10::MaybeOwned<Tensor>,
|
387 |
+
c10::MaybeOwned<Tensor>>
|
388 |
+
expand_outplace(
|
389 |
+
Tensor&& to_expand1,
|
390 |
+
Tensor&& to_expand2,
|
391 |
+
Tensor&& to_expand3,
|
392 |
+
const char* api_name) = delete;
|
393 |
+
|
394 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
395 |
+
const Tensor& to_expand,
|
396 |
+
IntArrayRef sizes) {
|
397 |
+
if (to_expand.sizes().equals(sizes)) {
|
398 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
399 |
+
}
|
400 |
+
|
401 |
+
return c10::MaybeOwned<Tensor>::owned(to_expand.expand(sizes));
|
402 |
+
}
|
403 |
+
|
404 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
405 |
+
Tensor&& to_expand,
|
406 |
+
IntArrayRef sizes) = delete;
|
407 |
+
|
408 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
409 |
+
const Tensor& to_expand,
|
410 |
+
IntArrayRef sizes,
|
411 |
+
const char* api_name) {
|
412 |
+
check_defined({to_expand}, api_name);
|
413 |
+
return expand_size(to_expand, sizes);
|
414 |
+
}
|
415 |
+
|
416 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
417 |
+
Tensor&& to_expand,
|
418 |
+
IntArrayRef sizes,
|
419 |
+
const char* api_name) = delete;
|
420 |
+
|
421 |
+
inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
|
422 |
+
// expands a list of Tensors; ignores undefined (null) tensors
|
423 |
+
bool first = true;
|
424 |
+
DimVector sizes;
|
425 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
426 |
+
if (!to_expand[i].defined()) {
|
427 |
+
continue;
|
428 |
+
} else if (first) {
|
429 |
+
sizes = to_expand[i].sizes();
|
430 |
+
first = false;
|
431 |
+
} else {
|
432 |
+
sizes = infer_size_dimvector(sizes, to_expand[i].sizes());
|
433 |
+
}
|
434 |
+
}
|
435 |
+
|
436 |
+
std::vector<Tensor> result(to_expand.size());
|
437 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
438 |
+
if (!to_expand[i].defined()) {
|
439 |
+
continue;
|
440 |
+
} else if (to_expand[i].sizes().equals(sizes)) {
|
441 |
+
result[i] = to_expand[i];
|
442 |
+
} else {
|
443 |
+
result[i] = to_expand[i].expand(sizes);
|
444 |
+
}
|
445 |
+
}
|
446 |
+
return result;
|
447 |
+
}
|
448 |
+
|
449 |
+
template <typename T>
|
450 |
+
inline Tensor _sum_to(
|
451 |
+
Tensor tensor,
|
452 |
+
const c10::ArrayRef<T> shape,
|
453 |
+
bool always_return_non_view = false) {
|
454 |
+
if (shape.size() == 0) {
|
455 |
+
return tensor.sum();
|
456 |
+
}
|
457 |
+
|
458 |
+
auto sizes = at::symint::sizes<T>(tensor);
|
459 |
+
c10::SmallVector<int64_t, 8> reduce_dims;
|
460 |
+
const int64_t leading_dims = sizes.size() - shape.size();
|
461 |
+
for (const auto i : c10::irange(leading_dims)) {
|
462 |
+
reduce_dims.push_back(i);
|
463 |
+
}
|
464 |
+
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
|
465 |
+
if (shape[i - leading_dims] == 1 && sizes[i] != 1) {
|
466 |
+
reduce_dims.push_back(i);
|
467 |
+
}
|
468 |
+
}
|
469 |
+
|
470 |
+
if (!reduce_dims.empty()) {
|
471 |
+
tensor = tensor.sum(reduce_dims, /*keepdim=*/true);
|
472 |
+
}
|
473 |
+
|
474 |
+
if (always_return_non_view) {
|
475 |
+
// This is only actually used by the functionalization pass.
|
476 |
+
// We want to be able to guarantee that this function doesn't return a view
|
477 |
+
// of the input.
|
478 |
+
return leading_dims > 0 ? at::symint::view_copy<T>(tensor, shape)
|
479 |
+
: tensor.clone();
|
480 |
+
} else {
|
481 |
+
return leading_dims > 0 ? at::symint::view<T>(tensor, shape) : tensor;
|
482 |
+
}
|
483 |
+
}
|
484 |
+
|
485 |
+
inline Tensor sum_to(
|
486 |
+
Tensor tensor,
|
487 |
+
const c10::SymIntArrayRef shape,
|
488 |
+
bool always_return_non_view = false) {
|
489 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
490 |
+
}
|
491 |
+
|
492 |
+
// Sums `tensor` repeatedly to produce a tensor of shape `shape`.
|
493 |
+
// Precondition: is_expandable_to(shape, tensor.sizes()) must be true
|
494 |
+
inline Tensor sum_to(
|
495 |
+
Tensor tensor,
|
496 |
+
const IntArrayRef shape,
|
497 |
+
bool always_return_non_view = false) {
|
498 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
499 |
+
}
|
500 |
+
|
501 |
+
static inline bool is_expandable_to(
|
502 |
+
SymIntArrayRef shape,
|
503 |
+
c10::SymIntArrayRef desired) {
|
504 |
+
size_t ndim = shape.size();
|
505 |
+
size_t target_dim = desired.size();
|
506 |
+
if (ndim > target_dim) {
|
507 |
+
return false;
|
508 |
+
}
|
509 |
+
for (const auto i : c10::irange(ndim)) {
|
510 |
+
const auto& size = shape[ndim - i - 1];
|
511 |
+
const auto& target = desired[target_dim - i - 1];
|
512 |
+
if (size != target && size != 1) {
|
513 |
+
return false;
|
514 |
+
}
|
515 |
+
}
|
516 |
+
return true;
|
517 |
+
}
|
518 |
+
|
519 |
+
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
520 |
+
auto sym_shape = c10::SymIntArrayRef(
|
521 |
+
reinterpret_cast<const c10::SymInt*>(shape.data()), shape.size());
|
522 |
+
auto sym_desired = c10::SymIntArrayRef(
|
523 |
+
reinterpret_cast<const c10::SymInt*>(desired.data()), desired.size());
|
524 |
+
return is_expandable_to(sym_shape, sym_desired);
|
525 |
+
}
|
526 |
+
|
527 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Formatting.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <ATen/core/Formatting.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalTensorWrapper.h
ADDED
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#pragma once
|
3 |
+
|
4 |
+
#include <ATen/ArrayRef.h>
|
5 |
+
#include <ATen/FunctionalStorageImpl.h>
|
6 |
+
#include <ATen/core/IListRef.h>
|
7 |
+
#include <ATen/core/List.h>
|
8 |
+
#include <ATen/core/boxing/BoxedKernel.h>
|
9 |
+
#include <ATen/core/boxing/impl/boxing.h>
|
10 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
11 |
+
|
12 |
+
#include <c10/core/DispatchKey.h>
|
13 |
+
|
14 |
+
namespace at {
|
15 |
+
|
16 |
+
// Note [Functionalization Pass In Core]
|
17 |
+
// The Functionalization pass is used to remove aliasing from a pytorch program.
|
18 |
+
//
|
19 |
+
// This is useful for backends that don't support aliasing, like XLA and Vulkan.
|
20 |
+
// It's also necessary in order to remove mutation from a program, which is
|
21 |
+
// needed in Functorch.
|
22 |
+
//
|
23 |
+
// Consider this program:
|
24 |
+
// a = torch.ones(...)
|
25 |
+
// b = a.view(...)
|
26 |
+
// b.add_(1)
|
27 |
+
//
|
28 |
+
// In this program, b is meant to alias with a due to the use of view(). At the
|
29 |
+
// end of the program, both a and b are full of 2's. However, backends that
|
30 |
+
// don't support aliasing aren't able to correctly implement the view()
|
31 |
+
// operator. Instead, they can opt into the Functionalization pass, which will
|
32 |
+
// sit between the user and the backend, and provide the necessary aliasing
|
33 |
+
// logic.
|
34 |
+
//
|
35 |
+
// The functionalization pass will turn the above program into a slightly
|
36 |
+
// different program that has the same semantics, transparently to the user,
|
37 |
+
// that backends like XLA/Vulkan are able to implement a = torch.ones(...) b =
|
38 |
+
// a.view_copy(...) # view() replaced with view_copy(). Backends like
|
39 |
+
// XLA/Vulkan can implement this! b.add_(1) a.add_(1) # Our functionalization
|
40 |
+
// pass machinery knows that a and b are aliased - it applies b's mutation to a
|
41 |
+
// too.
|
42 |
+
//
|
43 |
+
// So, how does the functionalization pass keep track of which tensors are
|
44 |
+
// aliased? The pass works by wrapping EVERY tensor in the program inside of a
|
45 |
+
// FunctionalTensorWrapper, which knows about its alias'd tensors.
|
46 |
+
//
|
47 |
+
// See Note [Functionalization: Alias Removal] for details on the aliasing
|
48 |
+
// machinery. See Note [Functionalization: Mutation Removal] for details on
|
49 |
+
// mutation removal.
|
50 |
+
struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl {
|
51 |
+
explicit FunctionalTensorWrapper(const Tensor& value);
|
52 |
+
// Additional constructor to create a FunctionalTensorWrapper directly from an
|
53 |
+
// underlying tensor that was created from a view. For example, the code b =
|
54 |
+
// a.view1() will generate a constructor call to FunctionalTensorWrapper(b, a,
|
55 |
+
// view1_meta)
|
56 |
+
explicit FunctionalTensorWrapper(
|
57 |
+
const Tensor& view_value,
|
58 |
+
const FunctionalTensorWrapper* base,
|
59 |
+
const functionalization::ViewMeta& meta);
|
60 |
+
|
61 |
+
// Get the underlying, actual tensor, that doesn't know anything about
|
62 |
+
// functionalization.
|
63 |
+
const Tensor& value() const {
|
64 |
+
return value_;
|
65 |
+
};
|
66 |
+
// The concept of "level" is only ever important to functorch; it's exposed
|
67 |
+
// here as more of a hook for functorch to use.
|
68 |
+
int64_t level() const {
|
69 |
+
return level_;
|
70 |
+
};
|
71 |
+
void set_level(int64_t level) {
|
72 |
+
level_ = level;
|
73 |
+
}
|
74 |
+
bool has_metadata_mutation() const {
|
75 |
+
return has_metadata_mutation_;
|
76 |
+
};
|
77 |
+
|
78 |
+
// Denotes a mutation that's hidden from autograd,
|
79 |
+
// e.g. for the purposes of passing a tensor to a triton kernel
|
80 |
+
void mark_mutation_hidden_from_autograd() {
|
81 |
+
mutation_hidden_from_autograd_counter_++;
|
82 |
+
}
|
83 |
+
void mark_mutation_during_no_grad_or_inference_mode() {
|
84 |
+
mutation_during_no_grad_or_inference_mode_++;
|
85 |
+
}
|
86 |
+
// Are all the mutations happening to the tensor hidden from autograd
|
87 |
+
bool are_all_mutations_hidden_from_autograd() const {
|
88 |
+
return mutation_hidden_from_autograd_counter_ == mutation_counter_;
|
89 |
+
}
|
90 |
+
// Did all mutations happen under no_grad or inference_mode
|
91 |
+
// (We also need to ignore mutations fully hidden from autograd here)
|
92 |
+
bool are_all_mutations_under_no_grad_or_inference_mode() const {
|
93 |
+
return mutation_hidden_from_autograd_counter_ +
|
94 |
+
mutation_during_no_grad_or_inference_mode_ ==
|
95 |
+
mutation_counter_;
|
96 |
+
}
|
97 |
+
|
98 |
+
// Sync's the underlying tensor with its alias, if it's out of date. This
|
99 |
+
// involves two steps: 1) Apply any pending updates/mutations to the alias 2)
|
100 |
+
// Replay the views (if any) to regenerate the current tensor off of the
|
101 |
+
// updated alias.
|
102 |
+
void sync_();
|
103 |
+
// Performs step (1) of the sync. This is its own public API because it's
|
104 |
+
// needed by view_inplace ops like transpose_. See Note [Functionalization
|
105 |
+
// Pass - Inplace View Ops]
|
106 |
+
void regenerate_from_base();
|
107 |
+
// Performs step (2) of the sync. This is its own public API because it's
|
108 |
+
// needed by functorch. functorch wants to make sure that all input tensors to
|
109 |
+
// a functionalized program have been properly synced so it can properly
|
110 |
+
// propagate mutations to inputs. It can't just call sync_(), because the
|
111 |
+
// FunctionalTensorWrapper will look like it has no aliases and sync_ will be
|
112 |
+
// a noop. We use the reference count on storage_ to determine if the wrapper
|
113 |
+
// is aliased, and by the time functorch is ready to propagate updates to
|
114 |
+
// inputs, any intermediate views of the input created by the program will
|
115 |
+
// have been deallocated. This function also returns whether or not the base
|
116 |
+
// actually had any updates to apply.
|
117 |
+
bool apply_updates();
|
118 |
+
// Takes the current state of value_ and snapshots it, sending it as a pending
|
119 |
+
// update to the alias.
|
120 |
+
void commit_update();
|
121 |
+
// When any tensor is mutated, the tensor increments its alias's "generation".
|
122 |
+
// Separately, each tensor maintains its own "generation" counter, which is
|
123 |
+
// used to determine if it's up-to-date with its alias. The act of syncing a
|
124 |
+
// tensor will set a tensor's generation equal to its alias's generation.
|
125 |
+
bool is_up_to_date() const;
|
126 |
+
// Freezes the storage of this tensor, preventing subsequent mutations
|
127 |
+
void freeze_storage() const;
|
128 |
+
// Every FunctionalTensorWrapper contains a vector<ViewMeta> objects
|
129 |
+
// describing the series of view ops that ran to generate the current tensor
|
130 |
+
// from the base tensor. This method is used by inplace-view ops like
|
131 |
+
// transpose_. It appends a ViewMeta to the existing stack, and refreshes the
|
132 |
+
// tensor by replaying the views off of the alias.
|
133 |
+
void mutate_view_meta(const at::functionalization::ViewMeta& meta);
|
134 |
+
|
135 |
+
// Custom implementation of self.set_(src)
|
136 |
+
void set__impl(const FunctionalTensorWrapper* other);
|
137 |
+
|
138 |
+
// Returns whether the current tensor's data was ever mutated
|
139 |
+
bool has_data_mutation();
|
140 |
+
//
|
141 |
+
// Returns whether the current FunctionalTensorWrapper
|
142 |
+
// experienced a set_() call.
|
143 |
+
bool was_storage_changed() {
|
144 |
+
return was_storage_changed_;
|
145 |
+
}
|
146 |
+
|
147 |
+
// The functionalization pass can be used to remove mutations.
|
148 |
+
// It does so by replacing any mutation op with it's corresponding
|
149 |
+
// out-of-place op, followed by a call to replace_(). e.g:
|
150 |
+
//
|
151 |
+
// a.add_(1)
|
152 |
+
//
|
153 |
+
// will turn into:
|
154 |
+
//
|
155 |
+
// tmp = a.add(1)
|
156 |
+
// a.replace_(tmp)
|
157 |
+
//
|
158 |
+
// replace_() swaps out the wrapped tensor, value_, with tmp.
|
159 |
+
void replace_(const Tensor& other);
|
160 |
+
|
161 |
+
bool is_multi_output_view() {
|
162 |
+
return is_multi_output_view_;
|
163 |
+
}
|
164 |
+
|
165 |
+
// See Note[resize_() in functionalization pass]
|
166 |
+
void maybe_replace_storage(const Tensor& other);
|
167 |
+
|
168 |
+
// Replaces the storage with a new functional storage,
|
169 |
+
// and clears the view_metas_ stack.
|
170 |
+
// WARNING: Calling this function will sever the aliasing relationship between
|
171 |
+
// the current FunctionalTensorWrapper and any of its outstanding aliases.
|
172 |
+
// Please only call if you know what you're doing.
|
173 |
+
void _unsafe_reset_storage();
|
174 |
+
|
175 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
176 |
+
const c10::VariableVersion& version_counter,
|
177 |
+
bool allow_tensor_metadata_change) const override;
|
178 |
+
|
179 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
180 |
+
c10::VariableVersion&& version_counter,
|
181 |
+
bool allow_tensor_metadata_change) const override;
|
182 |
+
|
183 |
+
~FunctionalTensorWrapper() override = default;
|
184 |
+
|
185 |
+
// FunctionalTensorWrapper overrides all custom size/stride function,
|
186 |
+
// so that if the inner tensor has a custom implementation
|
187 |
+
// we make sure to call that implementation.
|
188 |
+
at::IntArrayRef sizes_custom() const override;
|
189 |
+
at::IntArrayRef strides_custom() const override;
|
190 |
+
int64_t dim_custom() const override;
|
191 |
+
int64_t numel_custom() const override;
|
192 |
+
bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
|
193 |
+
c10::SymIntArrayRef sym_sizes_custom() const override;
|
194 |
+
c10::SymInt sym_size_custom(int64_t d) const override;
|
195 |
+
c10::SymIntArrayRef sym_strides_custom() const override;
|
196 |
+
c10::SymInt sym_storage_offset_custom() const override;
|
197 |
+
c10::Device device_custom() const override;
|
198 |
+
|
199 |
+
private:
|
200 |
+
const char* tensorimpl_type_name() const override;
|
201 |
+
void set_constructor_metadata();
|
202 |
+
functionalization::FunctionalStorageImpl* functional_storage_impl() const;
|
203 |
+
|
204 |
+
// This is used to re-implement shallow_copy_and_detach for
|
205 |
+
// FunctionalTensorWrapper. The implementation is identical, but we just need
|
206 |
+
// to return a subclass instead of a plain TensorImpl.
|
207 |
+
// TODO: maybe it's possible to arrange for that to happen automatically
|
208 |
+
// without an override here?
|
209 |
+
template <typename VariableVersion>
|
210 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
|
211 |
+
VariableVersion&& version_counter,
|
212 |
+
bool allow_tensor_metadata_change) const;
|
213 |
+
|
214 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
|
215 |
+
void copy_tensor_metadata_and_refresh(
|
216 |
+
const FunctionalTensorWrapper* src_impl,
|
217 |
+
FunctionalTensorWrapper* dest_impl,
|
218 |
+
const c10::VariableVersion& version_counter,
|
219 |
+
bool allow_tensor_metadata_change) const;
|
220 |
+
|
221 |
+
// Note that value is not taken by reference: internally, the wrapper will
|
222 |
+
// change the value tensor that it points to over time.
|
223 |
+
Tensor value_;
|
224 |
+
int64_t level_{};
|
225 |
+
// These two counters are used for identifying
|
226 |
+
// whether all the mutations on a given tensor are hidden from autograd or
|
227 |
+
// not. If we have an input mutation that is hidden from autograd, then once
|
228 |
+
// we convert the input mutation to a copy_() we know it will be safe to hide
|
229 |
+
// the copy_() from autograd as well.
|
230 |
+
uint64_t mutation_counter_ = 0;
|
231 |
+
uint64_t mutation_hidden_from_autograd_counter_ = 0;
|
232 |
+
uint64_t mutation_during_no_grad_or_inference_mode_ = 0;
|
233 |
+
bool has_metadata_mutation_ = false;
|
234 |
+
bool is_multi_output_view_ = false;
|
235 |
+
// Did the tensor experience a set_() call.
|
236 |
+
bool was_storage_changed_ = false;
|
237 |
+
|
238 |
+
size_t generation_ = 0;
|
239 |
+
std::vector<at::functionalization::ViewMeta> view_metas_;
|
240 |
+
|
241 |
+
protected:
|
242 |
+
static void copy_tensor_metadata(
|
243 |
+
const FunctionalTensorWrapper* src_impl,
|
244 |
+
FunctionalTensorWrapper* dest_impl,
|
245 |
+
const c10::VariableVersion& version_counter,
|
246 |
+
bool allow_tensor_metadata_change);
|
247 |
+
};
|
248 |
+
|
249 |
+
// Utility functions for the functionalization pass.
|
250 |
+
|
251 |
+
namespace functionalization {
|
252 |
+
namespace impl {
|
253 |
+
|
254 |
+
TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
|
255 |
+
const Tensor& tensor) {
|
256 |
+
auto functional_impl =
|
257 |
+
static_cast<FunctionalTensorWrapper*>(tensor.unsafeGetTensorImpl());
|
258 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_impl != nullptr);
|
259 |
+
return functional_impl;
|
260 |
+
}
|
261 |
+
|
262 |
+
TORCH_API bool isFunctionalTensor(const at::Tensor& tensor);
|
263 |
+
TORCH_API bool isFunctionalTensor(const c10::optional<Tensor>& t);
|
264 |
+
TORCH_API bool isFunctionalTensor(
|
265 |
+
const c10::List<c10::optional<Tensor>>& t_list);
|
266 |
+
TORCH_API bool isFunctionalTensor(ITensorListRef list);
|
267 |
+
|
268 |
+
TORCH_API Tensor to_functional_tensor(const Tensor& tensor);
|
269 |
+
TORCH_API c10::optional<Tensor> to_functional_tensor(
|
270 |
+
const c10::optional<Tensor>& tensor);
|
271 |
+
TORCH_API c10::List<c10::optional<Tensor>> to_functional_tensor(
|
272 |
+
const c10::List<c10::optional<Tensor>>& t_list);
|
273 |
+
TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list);
|
274 |
+
|
275 |
+
TORCH_API void freeze_functional_tensor(const Tensor& tensor);
|
276 |
+
|
277 |
+
TORCH_API Tensor
|
278 |
+
from_functional_tensor(const Tensor& tensor, bool assert_functional = true);
|
279 |
+
TORCH_API c10::optional<Tensor> from_functional_tensor(
|
280 |
+
const c10::optional<Tensor>& t,
|
281 |
+
bool assert_functional = true);
|
282 |
+
TORCH_API c10::List<c10::optional<Tensor>> from_functional_tensor(
|
283 |
+
const c10::List<c10::optional<Tensor>>& t_list);
|
284 |
+
TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list);
|
285 |
+
|
286 |
+
TORCH_API void sync(const at::Tensor& t);
|
287 |
+
TORCH_API void sync(const c10::optional<Tensor>& t);
|
288 |
+
TORCH_API void sync(const c10::List<c10::optional<Tensor>>& t_list);
|
289 |
+
TORCH_API void sync(ITensorListRef t_list);
|
290 |
+
|
291 |
+
TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other);
|
292 |
+
TORCH_API void replace_(
|
293 |
+
const ITensorListRef functional_tensor,
|
294 |
+
ITensorListRef other);
|
295 |
+
|
296 |
+
TORCH_API void commit_update(const Tensor& functional_tensor);
|
297 |
+
TORCH_API void commit_update(ITensorListRef functional_tensor);
|
298 |
+
|
299 |
+
TORCH_API void unsafe_reset_storage(const Tensor& functional_tensor);
|
300 |
+
|
301 |
+
TORCH_API void mark_mutation_hidden_from_autograd(
|
302 |
+
const Tensor& functional_tensor);
|
303 |
+
|
304 |
+
TORCH_API bool are_all_mutations_hidden_from_autograd(
|
305 |
+
const Tensor& functional_tensor);
|
306 |
+
|
307 |
+
TORCH_API bool are_all_mutations_under_no_grad_or_inference_mode(
|
308 |
+
const Tensor& functional_tensor);
|
309 |
+
|
310 |
+
// These two methods are XLA-specific logic and are no-ops
|
311 |
+
// for the normal functionalization flow.
|
312 |
+
TORCH_API void propagate_xla_data(
|
313 |
+
const Tensor& functional_tensor,
|
314 |
+
const Tensor& other);
|
315 |
+
TORCH_API void propagate_xla_data(
|
316 |
+
const ITensorListRef functional_tensor,
|
317 |
+
ITensorListRef other);
|
318 |
+
|
319 |
+
Tensor create_functional_tensor_with_view_meta(
|
320 |
+
const Tensor& view_to_wrap,
|
321 |
+
const Tensor& base,
|
322 |
+
functionalization::ViewMeta meta,
|
323 |
+
int64_t out_idx = 0);
|
324 |
+
std::vector<Tensor> create_functional_tensor_with_view_meta(
|
325 |
+
ITensorListRef view_to_wrap,
|
326 |
+
const Tensor& base,
|
327 |
+
const functionalization::ViewMeta& meta);
|
328 |
+
|
329 |
+
void mutate_view_meta(
|
330 |
+
const Tensor& self,
|
331 |
+
const functionalization::ViewMeta& meta);
|
332 |
+
|
333 |
+
void set_sizes_strides_offset(const Tensor& out, const Tensor& meta_out);
|
334 |
+
void set_sizes_strides_offset(
|
335 |
+
const std::vector<Tensor>& outs,
|
336 |
+
const std::vector<Tensor>& meta_outs);
|
337 |
+
|
338 |
+
// ~~~~~ TLS used in functionalization ~~~~~
|
339 |
+
|
340 |
+
TORCH_API bool getFunctionalizationReapplyViewsTLS();
|
341 |
+
TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views);
|
342 |
+
|
343 |
+
class TORCH_API FunctionalizationReapplyViewsGuard {
|
344 |
+
public:
|
345 |
+
FunctionalizationReapplyViewsGuard(bool reapply_views)
|
346 |
+
: prev_(getFunctionalizationReapplyViewsTLS()) {
|
347 |
+
setFunctionalizationReapplyViewsTLS(reapply_views);
|
348 |
+
}
|
349 |
+
|
350 |
+
~FunctionalizationReapplyViewsGuard() {
|
351 |
+
setFunctionalizationReapplyViewsTLS(prev_);
|
352 |
+
}
|
353 |
+
|
354 |
+
FunctionalizationReapplyViewsGuard(
|
355 |
+
const FunctionalizationReapplyViewsGuard&) = delete;
|
356 |
+
FunctionalizationReapplyViewsGuard operator=(
|
357 |
+
const FunctionalizationReapplyViewsGuard&) = delete;
|
358 |
+
FunctionalizationReapplyViewsGuard(FunctionalizationReapplyViewsGuard&&) =
|
359 |
+
delete;
|
360 |
+
FunctionalizationReapplyViewsGuard operator=(
|
361 |
+
FunctionalizationReapplyViewsGuard&&) = delete;
|
362 |
+
|
363 |
+
private:
|
364 |
+
bool prev_;
|
365 |
+
};
|
366 |
+
|
367 |
+
} // namespace impl
|
368 |
+
|
369 |
+
// Helper function to call an out-of-place composite aten kernel that may use
|
370 |
+
// mutations / views internally, and functionalize them.
|
371 |
+
TORCH_API void functionalize_op_helper(
|
372 |
+
const c10::OperatorHandle& op,
|
373 |
+
torch::jit::Stack* stack);
|
374 |
+
|
375 |
+
template <class Op, bool symint, class ReturnType, class... ParameterTypes>
|
376 |
+
struct _functionalize_aten_op final {};
|
377 |
+
|
378 |
+
template <class Op, bool symint, class ReturnType, class... ParameterTypes>
|
379 |
+
struct _functionalize_aten_op<Op, symint, ReturnType(ParameterTypes...)> final {
|
380 |
+
static ReturnType call(
|
381 |
+
typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
|
382 |
+
using FuncType = ReturnType(
|
383 |
+
typename c10::maybe_keep_symint<symint, ParameterTypes>::type...);
|
384 |
+
auto op = c10::Dispatcher::singleton()
|
385 |
+
.findSchemaOrThrow(
|
386 |
+
(const char*)Op::name, (const char*)Op::overload_name)
|
387 |
+
.typed<FuncType>();
|
388 |
+
|
389 |
+
return c10::impl::BoxedKernelWrapper<FuncType>::call(
|
390 |
+
c10::BoxedKernel::makeFromFunction<functionalize_op_helper>(),
|
391 |
+
op,
|
392 |
+
// BoxedKernelWrapper knows to ignore this keyset argument,
|
393 |
+
// because functionalize_op_helper doesn't take in a DispatchKeySet
|
394 |
+
c10::DispatchKeySet(),
|
395 |
+
args...);
|
396 |
+
}
|
397 |
+
};
|
398 |
+
|
399 |
+
template <class Op>
|
400 |
+
using functionalize_aten_op =
|
401 |
+
_functionalize_aten_op<Op, false, typename Op::schema>;
|
402 |
+
|
403 |
+
template <class Op>
|
404 |
+
using functionalize_aten_op_symint =
|
405 |
+
_functionalize_aten_op<Op, true, typename Op::schema>;
|
406 |
+
|
407 |
+
} // namespace functionalization
|
408 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/TensorOptions.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
|
7 |
+
// Represents the initial TensorOptions, before the "defaults" are ever changed.
|
8 |
+
// This is designed to be used in library code, where the explicit devices,
|
9 |
+
// dtypes, etc. are known. NOTE: this is not a stable API.
|
10 |
+
inline TensorOptions initialTensorOptions() {
|
11 |
+
return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad(
|
12 |
+
false);
|
13 |
+
}
|
14 |
+
|
15 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Layout.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/Layout.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Allocator.h>
|
4 |
+
#include <c10/util/string_view.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
enum MappedAllocatorModes {
|
9 |
+
ALLOCATOR_MAPPED_SHARED = 1,
|
10 |
+
ALLOCATOR_MAPPED_SHAREDMEM = 2,
|
11 |
+
ALLOCATOR_MAPPED_EXCLUSIVE = 4,
|
12 |
+
ALLOCATOR_MAPPED_NOCREATE = 8,
|
13 |
+
ALLOCATOR_MAPPED_KEEPFD = 16,
|
14 |
+
ALLOCATOR_MAPPED_FROMFD = 32,
|
15 |
+
ALLOCATOR_MAPPED_UNLINK = 64
|
16 |
+
};
|
17 |
+
|
18 |
+
// Sentinel value/type to help distinguish the file descriptor constructor from
|
19 |
+
// the non-file descriptor constructor
|
20 |
+
enum WithFd { WITH_FD };
|
21 |
+
|
22 |
+
TORCH_API std::string NewProcessWideShmHandle();
|
23 |
+
|
24 |
+
class TORCH_API MapAllocator {
|
25 |
+
public:
|
26 |
+
MapAllocator(c10::string_view filename, int flags, size_t size);
|
27 |
+
MapAllocator(
|
28 |
+
WithFd,
|
29 |
+
c10::string_view filename,
|
30 |
+
int fd,
|
31 |
+
int flags,
|
32 |
+
size_t size);
|
33 |
+
MapAllocator(const MapAllocator&) = delete;
|
34 |
+
MapAllocator& operator=(const MapAllocator&) = delete;
|
35 |
+
MapAllocator(MapAllocator&&) = delete;
|
36 |
+
MapAllocator& operator=(MapAllocator&&) = delete;
|
37 |
+
|
38 |
+
const char* filename() const {
|
39 |
+
return filename_.c_str();
|
40 |
+
}
|
41 |
+
int fd() const {
|
42 |
+
#ifdef _WIN32
|
43 |
+
TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows");
|
44 |
+
#else
|
45 |
+
return fd_;
|
46 |
+
#endif
|
47 |
+
}
|
48 |
+
ptrdiff_t size() const {
|
49 |
+
return size_;
|
50 |
+
}
|
51 |
+
// Return a pointer to the actual data for this allocator
|
52 |
+
// (in the case of the refcounted allocator, this is offset
|
53 |
+
// from the base pointer.)
|
54 |
+
virtual void* data() const {
|
55 |
+
return base_ptr_;
|
56 |
+
}
|
57 |
+
|
58 |
+
static MapAllocator* fromDataPtr(const at::DataPtr&);
|
59 |
+
static at::DataPtr makeDataPtr(
|
60 |
+
c10::string_view filename,
|
61 |
+
int flags,
|
62 |
+
size_t size,
|
63 |
+
size_t* actual_size_out);
|
64 |
+
static at::DataPtr makeDataPtr(
|
65 |
+
WithFd,
|
66 |
+
const char* filename,
|
67 |
+
int fd,
|
68 |
+
int flags,
|
69 |
+
size_t size,
|
70 |
+
size_t* actual_size_out);
|
71 |
+
|
72 |
+
// Closes the data. Helps us avoid destructor shenanigans
|
73 |
+
virtual void close();
|
74 |
+
|
75 |
+
// This is very dangerous. You have to redefine this destructor for each
|
76 |
+
// subclass
|
77 |
+
virtual ~MapAllocator();
|
78 |
+
|
79 |
+
protected:
|
80 |
+
bool closed_ = false;
|
81 |
+
std::string filename_;
|
82 |
+
int flags_ = 0;
|
83 |
+
ptrdiff_t size_; /* mapped size */
|
84 |
+
#ifdef _WIN32
|
85 |
+
void* handle_;
|
86 |
+
void* event_;
|
87 |
+
std::string eventname_;
|
88 |
+
#else
|
89 |
+
int fd_ = -1;
|
90 |
+
#endif
|
91 |
+
void* base_ptr_ = nullptr;
|
92 |
+
};
|
93 |
+
|
94 |
+
// Base-from-member idiom
|
95 |
+
struct TORCH_API RefcountedMapAllocatorArgCheck {
|
96 |
+
RefcountedMapAllocatorArgCheck(int flags);
|
97 |
+
};
|
98 |
+
|
99 |
+
class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
|
100 |
+
public MapAllocator {
|
101 |
+
public:
|
102 |
+
RefcountedMapAllocator(const char* filename, int flags, size_t size);
|
103 |
+
RefcountedMapAllocator(
|
104 |
+
WithFd,
|
105 |
+
const char* filename,
|
106 |
+
int fd,
|
107 |
+
int flags,
|
108 |
+
size_t size);
|
109 |
+
|
110 |
+
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
|
111 |
+
static at::DataPtr makeDataPtr(
|
112 |
+
const char* filename,
|
113 |
+
int flags,
|
114 |
+
size_t size,
|
115 |
+
size_t* actual_size_out);
|
116 |
+
static at::DataPtr makeDataPtr(
|
117 |
+
WithFd,
|
118 |
+
const char* filename,
|
119 |
+
int fd,
|
120 |
+
int flags,
|
121 |
+
size_t size,
|
122 |
+
size_t* actual_size_out);
|
123 |
+
|
124 |
+
void* data() const override;
|
125 |
+
|
126 |
+
void incref();
|
127 |
+
int decref();
|
128 |
+
void close() override;
|
129 |
+
|
130 |
+
~RefcountedMapAllocator() override {
|
131 |
+
RefcountedMapAllocator::close();
|
132 |
+
}
|
133 |
+
|
134 |
+
protected:
|
135 |
+
void checkFlags();
|
136 |
+
void initializeAlloc();
|
137 |
+
};
|
138 |
+
|
139 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/Utils.h>
|
3 |
+
#include <c10/util/ArrayRef.h>
|
4 |
+
|
5 |
+
#include <vector>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
/// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that
|
9 |
+
/// we can easily view it as a multidimensional array.
|
10 |
+
///
|
11 |
+
/// Like ArrayRef, this class does not own the underlying data, it is expected
|
12 |
+
/// to be used in situations where the data resides in some other buffer.
|
13 |
+
///
|
14 |
+
/// This is intended to be trivially copyable, so it should be passed by
|
15 |
+
/// value.
|
16 |
+
///
|
17 |
+
/// For now, 2D only (so the copies are actually cheap, without having
|
18 |
+
/// to write a SmallVector class) and contiguous only (so we can
|
19 |
+
/// return non-strided ArrayRef on index).
|
20 |
+
///
|
21 |
+
/// P.S. dimension 0 indexes rows, dimension 1 indexes columns
|
22 |
+
template <typename T>
|
23 |
+
class MatrixRef {
|
24 |
+
public:
|
25 |
+
typedef size_t size_type;
|
26 |
+
|
27 |
+
private:
|
28 |
+
/// Underlying ArrayRef
|
29 |
+
ArrayRef<T> arr;
|
30 |
+
|
31 |
+
/// Stride of dim 0 (outer dimension)
|
32 |
+
size_type stride0;
|
33 |
+
|
34 |
+
// Stride of dim 1 is assumed to be 1
|
35 |
+
|
36 |
+
public:
|
37 |
+
/// Construct an empty Matrixref.
|
38 |
+
/*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {}
|
39 |
+
|
40 |
+
/// Construct an MatrixRef from an ArrayRef and outer stride.
|
41 |
+
/*implicit*/ MatrixRef(ArrayRef<T> arr, size_type stride0)
|
42 |
+
: arr(arr), stride0(stride0) {
|
43 |
+
TORCH_CHECK(
|
44 |
+
arr.size() % stride0 == 0,
|
45 |
+
"MatrixRef: ArrayRef size ",
|
46 |
+
arr.size(),
|
47 |
+
" not divisible by stride ",
|
48 |
+
stride0)
|
49 |
+
}
|
50 |
+
|
51 |
+
/// @}
|
52 |
+
/// @name Simple Operations
|
53 |
+
/// @{
|
54 |
+
|
55 |
+
/// empty - Check if the matrix is empty.
|
56 |
+
bool empty() const {
|
57 |
+
return arr.empty();
|
58 |
+
}
|
59 |
+
|
60 |
+
const T* data() const {
|
61 |
+
return arr.data();
|
62 |
+
}
|
63 |
+
|
64 |
+
/// size - Get size a dimension
|
65 |
+
size_t size(size_t dim) const {
|
66 |
+
if (dim == 0) {
|
67 |
+
return arr.size() / stride0;
|
68 |
+
} else if (dim == 1) {
|
69 |
+
return stride0;
|
70 |
+
} else {
|
71 |
+
TORCH_CHECK(
|
72 |
+
0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1");
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
size_t numel() const {
|
77 |
+
return arr.size();
|
78 |
+
}
|
79 |
+
|
80 |
+
/// equals - Check for element-wise equality.
|
81 |
+
bool equals(MatrixRef RHS) const {
|
82 |
+
return stride0 == RHS.stride0 && arr.equals(RHS.arr);
|
83 |
+
}
|
84 |
+
|
85 |
+
/// @}
|
86 |
+
/// @name Operator Overloads
|
87 |
+
/// @{
|
88 |
+
ArrayRef<T> operator[](size_t Index) const {
|
89 |
+
return arr.slice(Index * stride0, stride0);
|
90 |
+
}
|
91 |
+
|
92 |
+
/// Disallow accidental assignment from a temporary.
|
93 |
+
///
|
94 |
+
/// The declaration here is extra complicated so that "arrayRef = {}"
|
95 |
+
/// continues to select the move assignment operator.
|
96 |
+
template <typename U>
|
97 |
+
std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
|
98 |
+
U&& Temporary) = delete;
|
99 |
+
|
100 |
+
/// Disallow accidental assignment from a temporary.
|
101 |
+
///
|
102 |
+
/// The declaration here is extra complicated so that "arrayRef = {}"
|
103 |
+
/// continues to select the move assignment operator.
|
104 |
+
template <typename U>
|
105 |
+
std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
|
106 |
+
std::initializer_list<U>) = delete;
|
107 |
+
};
|
108 |
+
|
109 |
+
} // end namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
struct TensorImpl;
|
7 |
+
}
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
class TensorBase;
|
11 |
+
|
12 |
+
// MemOverlap: Whether or not there is memory overlap
|
13 |
+
//
|
14 |
+
// No: Absolutely no memory overlap
|
15 |
+
// Yes: Absolutely yes memory overlap
|
16 |
+
// TooHard: There might be memory overlap, but it was too expensive to compute.
|
17 |
+
//
|
18 |
+
// NB: Please update the python test for these if you renumber them.
|
19 |
+
enum class MemOverlap { No, Yes, TooHard };
|
20 |
+
|
21 |
+
enum class MemOverlapStatus { Full, Partial, No, TooHard };
|
22 |
+
|
23 |
+
TORCH_API MemOverlap has_internal_overlap(const TensorBase& t);
|
24 |
+
TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t);
|
25 |
+
|
26 |
+
TORCH_API void assert_no_internal_overlap(const TensorBase& t);
|
27 |
+
TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t);
|
28 |
+
|
29 |
+
TORCH_API MemOverlapStatus
|
30 |
+
get_overlap_status(const TensorBase& a, const TensorBase& b);
|
31 |
+
TORCH_API MemOverlapStatus
|
32 |
+
get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b);
|
33 |
+
|
34 |
+
TORCH_API void assert_no_partial_overlap(
|
35 |
+
const TensorBase& a,
|
36 |
+
const TensorBase& b);
|
37 |
+
void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
|
38 |
+
|
39 |
+
TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b);
|
40 |
+
TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
|
41 |
+
|
42 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/MetaFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/MethodOperators.h
ADDED
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from MethodOperators.h
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
// Forward declarations of any types needed in the operator signatures.
|
14 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
15 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
16 |
+
#include <ATen/core/ATen_fwd.h>
|
17 |
+
|
18 |
+
#include <ATen/ops/_addmm_activation_ops.h>
|
19 |
+
#include <ATen/ops/_autocast_to_full_precision_ops.h>
|
20 |
+
#include <ATen/ops/_autocast_to_reduced_precision_ops.h>
|
21 |
+
#include <ATen/ops/_backward_ops.h>
|
22 |
+
#include <ATen/ops/_coalesced_ops.h>
|
23 |
+
#include <ATen/ops/_conj_ops.h>
|
24 |
+
#include <ATen/ops/_conj_physical_ops.h>
|
25 |
+
#include <ATen/ops/_dimI_ops.h>
|
26 |
+
#include <ATen/ops/_dimV_ops.h>
|
27 |
+
#include <ATen/ops/_fw_primal_ops.h>
|
28 |
+
#include <ATen/ops/_indices_ops.h>
|
29 |
+
#include <ATen/ops/_is_all_true_ops.h>
|
30 |
+
#include <ATen/ops/_is_any_true_ops.h>
|
31 |
+
#include <ATen/ops/_is_zerotensor_ops.h>
|
32 |
+
#include <ATen/ops/_lazy_clone_ops.h>
|
33 |
+
#include <ATen/ops/_neg_view_ops.h>
|
34 |
+
#include <ATen/ops/_nested_tensor_size_ops.h>
|
35 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
|
36 |
+
#include <ATen/ops/_nested_tensor_strides_ops.h>
|
37 |
+
#include <ATen/ops/_nnz_ops.h>
|
38 |
+
#include <ATen/ops/_reshape_alias_ops.h>
|
39 |
+
#include <ATen/ops/_sparse_mask_projection_ops.h>
|
40 |
+
#include <ATen/ops/_to_dense_ops.h>
|
41 |
+
#include <ATen/ops/_to_sparse_bsc_ops.h>
|
42 |
+
#include <ATen/ops/_to_sparse_bsr_ops.h>
|
43 |
+
#include <ATen/ops/_to_sparse_csc_ops.h>
|
44 |
+
#include <ATen/ops/_to_sparse_csr_ops.h>
|
45 |
+
#include <ATen/ops/_to_sparse_ops.h>
|
46 |
+
#include <ATen/ops/_values_ops.h>
|
47 |
+
#include <ATen/ops/_version_ops.h>
|
48 |
+
#include <ATen/ops/abs_ops.h>
|
49 |
+
#include <ATen/ops/absolute_ops.h>
|
50 |
+
#include <ATen/ops/acos_ops.h>
|
51 |
+
#include <ATen/ops/acosh_ops.h>
|
52 |
+
#include <ATen/ops/add_ops.h>
|
53 |
+
#include <ATen/ops/addbmm_ops.h>
|
54 |
+
#include <ATen/ops/addcdiv_ops.h>
|
55 |
+
#include <ATen/ops/addcmul_ops.h>
|
56 |
+
#include <ATen/ops/addmm_ops.h>
|
57 |
+
#include <ATen/ops/addmv_ops.h>
|
58 |
+
#include <ATen/ops/addr_ops.h>
|
59 |
+
#include <ATen/ops/adjoint_ops.h>
|
60 |
+
#include <ATen/ops/alias_ops.h>
|
61 |
+
#include <ATen/ops/align_as_ops.h>
|
62 |
+
#include <ATen/ops/align_to_ops.h>
|
63 |
+
#include <ATen/ops/all_ops.h>
|
64 |
+
#include <ATen/ops/allclose_ops.h>
|
65 |
+
#include <ATen/ops/amax_ops.h>
|
66 |
+
#include <ATen/ops/amin_ops.h>
|
67 |
+
#include <ATen/ops/aminmax_ops.h>
|
68 |
+
#include <ATen/ops/and_ops.h>
|
69 |
+
#include <ATen/ops/angle_ops.h>
|
70 |
+
#include <ATen/ops/any_ops.h>
|
71 |
+
#include <ATen/ops/arccos_ops.h>
|
72 |
+
#include <ATen/ops/arccosh_ops.h>
|
73 |
+
#include <ATen/ops/arcsin_ops.h>
|
74 |
+
#include <ATen/ops/arcsinh_ops.h>
|
75 |
+
#include <ATen/ops/arctan2_ops.h>
|
76 |
+
#include <ATen/ops/arctan_ops.h>
|
77 |
+
#include <ATen/ops/arctanh_ops.h>
|
78 |
+
#include <ATen/ops/argmax_ops.h>
|
79 |
+
#include <ATen/ops/argmin_ops.h>
|
80 |
+
#include <ATen/ops/argsort_ops.h>
|
81 |
+
#include <ATen/ops/argwhere_ops.h>
|
82 |
+
#include <ATen/ops/as_strided_ops.h>
|
83 |
+
#include <ATen/ops/as_strided_scatter_ops.h>
|
84 |
+
#include <ATen/ops/asin_ops.h>
|
85 |
+
#include <ATen/ops/asinh_ops.h>
|
86 |
+
#include <ATen/ops/atan2_ops.h>
|
87 |
+
#include <ATen/ops/atan_ops.h>
|
88 |
+
#include <ATen/ops/atanh_ops.h>
|
89 |
+
#include <ATen/ops/baddbmm_ops.h>
|
90 |
+
#include <ATen/ops/bernoulli_ops.h>
|
91 |
+
#include <ATen/ops/bincount_ops.h>
|
92 |
+
#include <ATen/ops/bitwise_and_ops.h>
|
93 |
+
#include <ATen/ops/bitwise_left_shift_ops.h>
|
94 |
+
#include <ATen/ops/bitwise_not_ops.h>
|
95 |
+
#include <ATen/ops/bitwise_or_ops.h>
|
96 |
+
#include <ATen/ops/bitwise_right_shift_ops.h>
|
97 |
+
#include <ATen/ops/bitwise_xor_ops.h>
|
98 |
+
#include <ATen/ops/bmm_ops.h>
|
99 |
+
#include <ATen/ops/broadcast_to_ops.h>
|
100 |
+
#include <ATen/ops/cauchy_ops.h>
|
101 |
+
#include <ATen/ops/ccol_indices_ops.h>
|
102 |
+
#include <ATen/ops/ceil_ops.h>
|
103 |
+
#include <ATen/ops/chalf_ops.h>
|
104 |
+
#include <ATen/ops/cholesky_inverse_ops.h>
|
105 |
+
#include <ATen/ops/cholesky_ops.h>
|
106 |
+
#include <ATen/ops/cholesky_solve_ops.h>
|
107 |
+
#include <ATen/ops/chunk_ops.h>
|
108 |
+
#include <ATen/ops/clamp_max_ops.h>
|
109 |
+
#include <ATen/ops/clamp_min_ops.h>
|
110 |
+
#include <ATen/ops/clamp_ops.h>
|
111 |
+
#include <ATen/ops/clip_ops.h>
|
112 |
+
#include <ATen/ops/clone_ops.h>
|
113 |
+
#include <ATen/ops/coalesce_ops.h>
|
114 |
+
#include <ATen/ops/col_indices_ops.h>
|
115 |
+
#include <ATen/ops/conj_ops.h>
|
116 |
+
#include <ATen/ops/conj_physical_ops.h>
|
117 |
+
#include <ATen/ops/contiguous_ops.h>
|
118 |
+
#include <ATen/ops/copy_ops.h>
|
119 |
+
#include <ATen/ops/copysign_ops.h>
|
120 |
+
#include <ATen/ops/corrcoef_ops.h>
|
121 |
+
#include <ATen/ops/cos_ops.h>
|
122 |
+
#include <ATen/ops/cosh_ops.h>
|
123 |
+
#include <ATen/ops/count_nonzero_ops.h>
|
124 |
+
#include <ATen/ops/cov_ops.h>
|
125 |
+
#include <ATen/ops/cross_ops.h>
|
126 |
+
#include <ATen/ops/crow_indices_ops.h>
|
127 |
+
#include <ATen/ops/cummax_ops.h>
|
128 |
+
#include <ATen/ops/cummin_ops.h>
|
129 |
+
#include <ATen/ops/cumprod_ops.h>
|
130 |
+
#include <ATen/ops/cumsum_ops.h>
|
131 |
+
#include <ATen/ops/data_ops.h>
|
132 |
+
#include <ATen/ops/deg2rad_ops.h>
|
133 |
+
#include <ATen/ops/dense_dim_ops.h>
|
134 |
+
#include <ATen/ops/dequantize_ops.h>
|
135 |
+
#include <ATen/ops/det_ops.h>
|
136 |
+
#include <ATen/ops/detach_ops.h>
|
137 |
+
#include <ATen/ops/diag_embed_ops.h>
|
138 |
+
#include <ATen/ops/diag_ops.h>
|
139 |
+
#include <ATen/ops/diagflat_ops.h>
|
140 |
+
#include <ATen/ops/diagonal_ops.h>
|
141 |
+
#include <ATen/ops/diagonal_scatter_ops.h>
|
142 |
+
#include <ATen/ops/diff_ops.h>
|
143 |
+
#include <ATen/ops/digamma_ops.h>
|
144 |
+
#include <ATen/ops/dist_ops.h>
|
145 |
+
#include <ATen/ops/div_ops.h>
|
146 |
+
#include <ATen/ops/divide_ops.h>
|
147 |
+
#include <ATen/ops/dot_ops.h>
|
148 |
+
#include <ATen/ops/dsplit_ops.h>
|
149 |
+
#include <ATen/ops/eq_ops.h>
|
150 |
+
#include <ATen/ops/equal_ops.h>
|
151 |
+
#include <ATen/ops/erf_ops.h>
|
152 |
+
#include <ATen/ops/erfc_ops.h>
|
153 |
+
#include <ATen/ops/erfinv_ops.h>
|
154 |
+
#include <ATen/ops/exp2_ops.h>
|
155 |
+
#include <ATen/ops/exp_ops.h>
|
156 |
+
#include <ATen/ops/expand_as_ops.h>
|
157 |
+
#include <ATen/ops/expand_ops.h>
|
158 |
+
#include <ATen/ops/expm1_ops.h>
|
159 |
+
#include <ATen/ops/exponential_ops.h>
|
160 |
+
#include <ATen/ops/fill_diagonal_ops.h>
|
161 |
+
#include <ATen/ops/fill_ops.h>
|
162 |
+
#include <ATen/ops/fix_ops.h>
|
163 |
+
#include <ATen/ops/flatten_ops.h>
|
164 |
+
#include <ATen/ops/flip_ops.h>
|
165 |
+
#include <ATen/ops/fliplr_ops.h>
|
166 |
+
#include <ATen/ops/flipud_ops.h>
|
167 |
+
#include <ATen/ops/float_power_ops.h>
|
168 |
+
#include <ATen/ops/floor_divide_ops.h>
|
169 |
+
#include <ATen/ops/floor_ops.h>
|
170 |
+
#include <ATen/ops/fmax_ops.h>
|
171 |
+
#include <ATen/ops/fmin_ops.h>
|
172 |
+
#include <ATen/ops/fmod_ops.h>
|
173 |
+
#include <ATen/ops/frac_ops.h>
|
174 |
+
#include <ATen/ops/frexp_ops.h>
|
175 |
+
#include <ATen/ops/gather_ops.h>
|
176 |
+
#include <ATen/ops/gcd_ops.h>
|
177 |
+
#include <ATen/ops/ge_ops.h>
|
178 |
+
#include <ATen/ops/geometric_ops.h>
|
179 |
+
#include <ATen/ops/geqrf_ops.h>
|
180 |
+
#include <ATen/ops/ger_ops.h>
|
181 |
+
#include <ATen/ops/greater_equal_ops.h>
|
182 |
+
#include <ATen/ops/greater_ops.h>
|
183 |
+
#include <ATen/ops/gt_ops.h>
|
184 |
+
#include <ATen/ops/hardshrink_backward_ops.h>
|
185 |
+
#include <ATen/ops/hardshrink_ops.h>
|
186 |
+
#include <ATen/ops/heaviside_ops.h>
|
187 |
+
#include <ATen/ops/histc_ops.h>
|
188 |
+
#include <ATen/ops/histogram_ops.h>
|
189 |
+
#include <ATen/ops/hsplit_ops.h>
|
190 |
+
#include <ATen/ops/hypot_ops.h>
|
191 |
+
#include <ATen/ops/i0_ops.h>
|
192 |
+
#include <ATen/ops/igamma_ops.h>
|
193 |
+
#include <ATen/ops/igammac_ops.h>
|
194 |
+
#include <ATen/ops/index_add_ops.h>
|
195 |
+
#include <ATen/ops/index_copy_ops.h>
|
196 |
+
#include <ATen/ops/index_fill_ops.h>
|
197 |
+
#include <ATen/ops/index_ops.h>
|
198 |
+
#include <ATen/ops/index_put_ops.h>
|
199 |
+
#include <ATen/ops/index_reduce_ops.h>
|
200 |
+
#include <ATen/ops/index_select_ops.h>
|
201 |
+
#include <ATen/ops/indices_ops.h>
|
202 |
+
#include <ATen/ops/inner_ops.h>
|
203 |
+
#include <ATen/ops/int_repr_ops.h>
|
204 |
+
#include <ATen/ops/inverse_ops.h>
|
205 |
+
#include <ATen/ops/is_coalesced_ops.h>
|
206 |
+
#include <ATen/ops/is_complex_ops.h>
|
207 |
+
#include <ATen/ops/is_conj_ops.h>
|
208 |
+
#include <ATen/ops/is_distributed_ops.h>
|
209 |
+
#include <ATen/ops/is_floating_point_ops.h>
|
210 |
+
#include <ATen/ops/is_inference_ops.h>
|
211 |
+
#include <ATen/ops/is_leaf_ops.h>
|
212 |
+
#include <ATen/ops/is_neg_ops.h>
|
213 |
+
#include <ATen/ops/is_nonzero_ops.h>
|
214 |
+
#include <ATen/ops/is_pinned_ops.h>
|
215 |
+
#include <ATen/ops/is_same_size_ops.h>
|
216 |
+
#include <ATen/ops/is_set_to_ops.h>
|
217 |
+
#include <ATen/ops/is_signed_ops.h>
|
218 |
+
#include <ATen/ops/isclose_ops.h>
|
219 |
+
#include <ATen/ops/isfinite_ops.h>
|
220 |
+
#include <ATen/ops/isinf_ops.h>
|
221 |
+
#include <ATen/ops/isnan_ops.h>
|
222 |
+
#include <ATen/ops/isneginf_ops.h>
|
223 |
+
#include <ATen/ops/isposinf_ops.h>
|
224 |
+
#include <ATen/ops/isreal_ops.h>
|
225 |
+
#include <ATen/ops/istft_ops.h>
|
226 |
+
#include <ATen/ops/item_ops.h>
|
227 |
+
#include <ATen/ops/kron_ops.h>
|
228 |
+
#include <ATen/ops/kthvalue_ops.h>
|
229 |
+
#include <ATen/ops/lcm_ops.h>
|
230 |
+
#include <ATen/ops/ldexp_ops.h>
|
231 |
+
#include <ATen/ops/le_ops.h>
|
232 |
+
#include <ATen/ops/lerp_ops.h>
|
233 |
+
#include <ATen/ops/less_equal_ops.h>
|
234 |
+
#include <ATen/ops/less_ops.h>
|
235 |
+
#include <ATen/ops/lgamma_ops.h>
|
236 |
+
#include <ATen/ops/log10_ops.h>
|
237 |
+
#include <ATen/ops/log1p_ops.h>
|
238 |
+
#include <ATen/ops/log2_ops.h>
|
239 |
+
#include <ATen/ops/log_normal_ops.h>
|
240 |
+
#include <ATen/ops/log_ops.h>
|
241 |
+
#include <ATen/ops/log_softmax_ops.h>
|
242 |
+
#include <ATen/ops/logaddexp2_ops.h>
|
243 |
+
#include <ATen/ops/logaddexp_ops.h>
|
244 |
+
#include <ATen/ops/logcumsumexp_ops.h>
|
245 |
+
#include <ATen/ops/logdet_ops.h>
|
246 |
+
#include <ATen/ops/logical_and_ops.h>
|
247 |
+
#include <ATen/ops/logical_not_ops.h>
|
248 |
+
#include <ATen/ops/logical_or_ops.h>
|
249 |
+
#include <ATen/ops/logical_xor_ops.h>
|
250 |
+
#include <ATen/ops/logit_ops.h>
|
251 |
+
#include <ATen/ops/logsumexp_ops.h>
|
252 |
+
#include <ATen/ops/lshift_ops.h>
|
253 |
+
#include <ATen/ops/lt_ops.h>
|
254 |
+
#include <ATen/ops/lu_solve_ops.h>
|
255 |
+
#include <ATen/ops/mH_ops.h>
|
256 |
+
#include <ATen/ops/mT_ops.h>
|
257 |
+
#include <ATen/ops/masked_fill_ops.h>
|
258 |
+
#include <ATen/ops/masked_scatter_ops.h>
|
259 |
+
#include <ATen/ops/masked_select_ops.h>
|
260 |
+
#include <ATen/ops/matmul_ops.h>
|
261 |
+
#include <ATen/ops/matrix_H_ops.h>
|
262 |
+
#include <ATen/ops/matrix_exp_ops.h>
|
263 |
+
#include <ATen/ops/matrix_power_ops.h>
|
264 |
+
#include <ATen/ops/max_ops.h>
|
265 |
+
#include <ATen/ops/maximum_ops.h>
|
266 |
+
#include <ATen/ops/mean_ops.h>
|
267 |
+
#include <ATen/ops/median_ops.h>
|
268 |
+
#include <ATen/ops/min_ops.h>
|
269 |
+
#include <ATen/ops/minimum_ops.h>
|
270 |
+
#include <ATen/ops/mm_ops.h>
|
271 |
+
#include <ATen/ops/mode_ops.h>
|
272 |
+
#include <ATen/ops/moveaxis_ops.h>
|
273 |
+
#include <ATen/ops/movedim_ops.h>
|
274 |
+
#include <ATen/ops/msort_ops.h>
|
275 |
+
#include <ATen/ops/mul_ops.h>
|
276 |
+
#include <ATen/ops/multinomial_ops.h>
|
277 |
+
#include <ATen/ops/multiply_ops.h>
|
278 |
+
#include <ATen/ops/mv_ops.h>
|
279 |
+
#include <ATen/ops/mvlgamma_ops.h>
|
280 |
+
#include <ATen/ops/nan_to_num_ops.h>
|
281 |
+
#include <ATen/ops/nanmean_ops.h>
|
282 |
+
#include <ATen/ops/nanmedian_ops.h>
|
283 |
+
#include <ATen/ops/nanquantile_ops.h>
|
284 |
+
#include <ATen/ops/nansum_ops.h>
|
285 |
+
#include <ATen/ops/narrow_copy_ops.h>
|
286 |
+
#include <ATen/ops/narrow_ops.h>
|
287 |
+
#include <ATen/ops/ne_ops.h>
|
288 |
+
#include <ATen/ops/neg_ops.h>
|
289 |
+
#include <ATen/ops/negative_ops.h>
|
290 |
+
#include <ATen/ops/new_empty_ops.h>
|
291 |
+
#include <ATen/ops/new_empty_strided_ops.h>
|
292 |
+
#include <ATen/ops/new_full_ops.h>
|
293 |
+
#include <ATen/ops/new_ones_ops.h>
|
294 |
+
#include <ATen/ops/new_zeros_ops.h>
|
295 |
+
#include <ATen/ops/nextafter_ops.h>
|
296 |
+
#include <ATen/ops/nonzero_numpy_ops.h>
|
297 |
+
#include <ATen/ops/nonzero_ops.h>
|
298 |
+
#include <ATen/ops/nonzero_static_ops.h>
|
299 |
+
#include <ATen/ops/norm_ops.h>
|
300 |
+
#include <ATen/ops/normal_ops.h>
|
301 |
+
#include <ATen/ops/not_equal_ops.h>
|
302 |
+
#include <ATen/ops/numpy_T_ops.h>
|
303 |
+
#include <ATen/ops/or_ops.h>
|
304 |
+
#include <ATen/ops/orgqr_ops.h>
|
305 |
+
#include <ATen/ops/ormqr_ops.h>
|
306 |
+
#include <ATen/ops/outer_ops.h>
|
307 |
+
#include <ATen/ops/output_nr_ops.h>
|
308 |
+
#include <ATen/ops/permute_ops.h>
|
309 |
+
#include <ATen/ops/pin_memory_ops.h>
|
310 |
+
#include <ATen/ops/pinverse_ops.h>
|
311 |
+
#include <ATen/ops/polygamma_ops.h>
|
312 |
+
#include <ATen/ops/positive_ops.h>
|
313 |
+
#include <ATen/ops/pow_ops.h>
|
314 |
+
#include <ATen/ops/prelu_ops.h>
|
315 |
+
#include <ATen/ops/prod_ops.h>
|
316 |
+
#include <ATen/ops/put_ops.h>
|
317 |
+
#include <ATen/ops/q_per_channel_axis_ops.h>
|
318 |
+
#include <ATen/ops/q_per_channel_scales_ops.h>
|
319 |
+
#include <ATen/ops/q_per_channel_zero_points_ops.h>
|
320 |
+
#include <ATen/ops/q_scale_ops.h>
|
321 |
+
#include <ATen/ops/q_zero_point_ops.h>
|
322 |
+
#include <ATen/ops/qr_ops.h>
|
323 |
+
#include <ATen/ops/qscheme_ops.h>
|
324 |
+
#include <ATen/ops/quantile_ops.h>
|
325 |
+
#include <ATen/ops/rad2deg_ops.h>
|
326 |
+
#include <ATen/ops/random_ops.h>
|
327 |
+
#include <ATen/ops/ravel_ops.h>
|
328 |
+
#include <ATen/ops/reciprocal_ops.h>
|
329 |
+
#include <ATen/ops/record_stream_ops.h>
|
330 |
+
#include <ATen/ops/refine_names_ops.h>
|
331 |
+
#include <ATen/ops/relu_ops.h>
|
332 |
+
#include <ATen/ops/remainder_ops.h>
|
333 |
+
#include <ATen/ops/rename_ops.h>
|
334 |
+
#include <ATen/ops/renorm_ops.h>
|
335 |
+
#include <ATen/ops/repeat_interleave_ops.h>
|
336 |
+
#include <ATen/ops/repeat_ops.h>
|
337 |
+
#include <ATen/ops/requires_grad_ops.h>
|
338 |
+
#include <ATen/ops/reshape_as_ops.h>
|
339 |
+
#include <ATen/ops/reshape_ops.h>
|
340 |
+
#include <ATen/ops/resize_as_ops.h>
|
341 |
+
#include <ATen/ops/resize_as_sparse_ops.h>
|
342 |
+
#include <ATen/ops/resize_ops.h>
|
343 |
+
#include <ATen/ops/resolve_conj_ops.h>
|
344 |
+
#include <ATen/ops/resolve_neg_ops.h>
|
345 |
+
#include <ATen/ops/retain_grad_ops.h>
|
346 |
+
#include <ATen/ops/retains_grad_ops.h>
|
347 |
+
#include <ATen/ops/roll_ops.h>
|
348 |
+
#include <ATen/ops/rot90_ops.h>
|
349 |
+
#include <ATen/ops/round_ops.h>
|
350 |
+
#include <ATen/ops/row_indices_ops.h>
|
351 |
+
#include <ATen/ops/rshift_ops.h>
|
352 |
+
#include <ATen/ops/rsqrt_ops.h>
|
353 |
+
#include <ATen/ops/scatter_add_ops.h>
|
354 |
+
#include <ATen/ops/scatter_ops.h>
|
355 |
+
#include <ATen/ops/scatter_reduce_ops.h>
|
356 |
+
#include <ATen/ops/select_ops.h>
|
357 |
+
#include <ATen/ops/select_scatter_ops.h>
|
358 |
+
#include <ATen/ops/set_data_ops.h>
|
359 |
+
#include <ATen/ops/set_ops.h>
|
360 |
+
#include <ATen/ops/sgn_ops.h>
|
361 |
+
#include <ATen/ops/sigmoid_ops.h>
|
362 |
+
#include <ATen/ops/sign_ops.h>
|
363 |
+
#include <ATen/ops/signbit_ops.h>
|
364 |
+
#include <ATen/ops/sin_ops.h>
|
365 |
+
#include <ATen/ops/sinc_ops.h>
|
366 |
+
#include <ATen/ops/sinh_ops.h>
|
367 |
+
#include <ATen/ops/size_ops.h>
|
368 |
+
#include <ATen/ops/slice_inverse_ops.h>
|
369 |
+
#include <ATen/ops/slice_ops.h>
|
370 |
+
#include <ATen/ops/slice_scatter_ops.h>
|
371 |
+
#include <ATen/ops/slogdet_ops.h>
|
372 |
+
#include <ATen/ops/smm_ops.h>
|
373 |
+
#include <ATen/ops/softmax_ops.h>
|
374 |
+
#include <ATen/ops/sort_ops.h>
|
375 |
+
#include <ATen/ops/sparse_dim_ops.h>
|
376 |
+
#include <ATen/ops/sparse_mask_ops.h>
|
377 |
+
#include <ATen/ops/sparse_resize_and_clear_ops.h>
|
378 |
+
#include <ATen/ops/sparse_resize_ops.h>
|
379 |
+
#include <ATen/ops/split_ops.h>
|
380 |
+
#include <ATen/ops/split_with_sizes_ops.h>
|
381 |
+
#include <ATen/ops/sqrt_ops.h>
|
382 |
+
#include <ATen/ops/square_ops.h>
|
383 |
+
#include <ATen/ops/squeeze_ops.h>
|
384 |
+
#include <ATen/ops/sspaddmm_ops.h>
|
385 |
+
#include <ATen/ops/std_ops.h>
|
386 |
+
#include <ATen/ops/stft_ops.h>
|
387 |
+
#include <ATen/ops/stride_ops.h>
|
388 |
+
#include <ATen/ops/sub_ops.h>
|
389 |
+
#include <ATen/ops/subtract_ops.h>
|
390 |
+
#include <ATen/ops/sum_ops.h>
|
391 |
+
#include <ATen/ops/sum_to_size_ops.h>
|
392 |
+
#include <ATen/ops/svd_ops.h>
|
393 |
+
#include <ATen/ops/swapaxes_ops.h>
|
394 |
+
#include <ATen/ops/swapdims_ops.h>
|
395 |
+
#include <ATen/ops/t_ops.h>
|
396 |
+
#include <ATen/ops/take_along_dim_ops.h>
|
397 |
+
#include <ATen/ops/take_ops.h>
|
398 |
+
#include <ATen/ops/tan_ops.h>
|
399 |
+
#include <ATen/ops/tanh_ops.h>
|
400 |
+
#include <ATen/ops/tensor_split_ops.h>
|
401 |
+
#include <ATen/ops/tile_ops.h>
|
402 |
+
#include <ATen/ops/to_dense_ops.h>
|
403 |
+
#include <ATen/ops/to_mkldnn_ops.h>
|
404 |
+
#include <ATen/ops/to_ops.h>
|
405 |
+
#include <ATen/ops/to_padded_tensor_ops.h>
|
406 |
+
#include <ATen/ops/to_sparse_bsc_ops.h>
|
407 |
+
#include <ATen/ops/to_sparse_bsr_ops.h>
|
408 |
+
#include <ATen/ops/to_sparse_csc_ops.h>
|
409 |
+
#include <ATen/ops/to_sparse_csr_ops.h>
|
410 |
+
#include <ATen/ops/to_sparse_ops.h>
|
411 |
+
#include <ATen/ops/topk_ops.h>
|
412 |
+
#include <ATen/ops/trace_ops.h>
|
413 |
+
#include <ATen/ops/transpose_ops.h>
|
414 |
+
#include <ATen/ops/triangular_solve_ops.h>
|
415 |
+
#include <ATen/ops/tril_ops.h>
|
416 |
+
#include <ATen/ops/triu_ops.h>
|
417 |
+
#include <ATen/ops/true_divide_ops.h>
|
418 |
+
#include <ATen/ops/trunc_ops.h>
|
419 |
+
#include <ATen/ops/type_as_ops.h>
|
420 |
+
#include <ATen/ops/unbind_ops.h>
|
421 |
+
#include <ATen/ops/unflatten_ops.h>
|
422 |
+
#include <ATen/ops/unfold_ops.h>
|
423 |
+
#include <ATen/ops/uniform_ops.h>
|
424 |
+
#include <ATen/ops/unsafe_chunk_ops.h>
|
425 |
+
#include <ATen/ops/unsafe_split_ops.h>
|
426 |
+
#include <ATen/ops/unsafe_split_with_sizes_ops.h>
|
427 |
+
#include <ATen/ops/unsqueeze_ops.h>
|
428 |
+
#include <ATen/ops/values_ops.h>
|
429 |
+
#include <ATen/ops/var_ops.h>
|
430 |
+
#include <ATen/ops/vdot_ops.h>
|
431 |
+
#include <ATen/ops/view_as_ops.h>
|
432 |
+
#include <ATen/ops/view_ops.h>
|
433 |
+
#include <ATen/ops/vsplit_ops.h>
|
434 |
+
#include <ATen/ops/where_ops.h>
|
435 |
+
#include <ATen/ops/xlogy_ops.h>
|
436 |
+
#include <ATen/ops/xor_ops.h>
|
437 |
+
#include <ATen/ops/zero_ops.h>
|
438 |
+
|
439 |
+
namespace at {
|
440 |
+
namespace _ops {
|
441 |
+
|
442 |
+
} // namespace _ops
|
443 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensor.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <ATen/core/NamedTensor.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/MemoryOverlap.h>
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
#include <c10/core/DispatchKey.h>
|
5 |
+
#include <c10/core/DispatchKeySet.h>
|
6 |
+
#include <c10/core/MemoryFormat.h>
|
7 |
+
#include <c10/core/TensorImpl.h>
|
8 |
+
#include <c10/util/ArrayRef.h>
|
9 |
+
#include <c10/util/Exception.h>
|
10 |
+
#include <c10/util/Metaprogramming.h>
|
11 |
+
#include <c10/util/irange.h>
|
12 |
+
|
13 |
+
namespace at::native {
|
14 |
+
struct NestedTensorImpl;
|
15 |
+
inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
|
16 |
+
int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
|
17 |
+
|
18 |
+
struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
19 |
+
explicit NestedTensorImpl(
|
20 |
+
Storage storage,
|
21 |
+
c10::DispatchKeySet key_set,
|
22 |
+
const caffe2::TypeMeta data_type,
|
23 |
+
at::Tensor nested_sizes,
|
24 |
+
at::Tensor nested_strides,
|
25 |
+
at::Tensor storage_offsets);
|
26 |
+
|
27 |
+
explicit NestedTensorImpl(
|
28 |
+
const at::Tensor& buffer,
|
29 |
+
at::Tensor nested_sizes,
|
30 |
+
at::Tensor nested_strides,
|
31 |
+
at::Tensor storage_offsets);
|
32 |
+
// assume contiguous, `nested_strides` and `offsets`
|
33 |
+
// can be infered from `nested_sizes`
|
34 |
+
explicit NestedTensorImpl(
|
35 |
+
const at::Tensor& buffer,
|
36 |
+
const at::Tensor& nested_sizes);
|
37 |
+
|
38 |
+
// This constructor is used creating view tensors from nested tensors
|
39 |
+
explicit NestedTensorImpl(
|
40 |
+
c10::TensorImpl::ImplType impl_type,
|
41 |
+
const at::Tensor& base_tensor,
|
42 |
+
at::Tensor nested_sizes,
|
43 |
+
at::Tensor nested_strides,
|
44 |
+
at::Tensor storage_offsets);
|
45 |
+
|
46 |
+
// TODO: don't expose private implementation details like this; in
|
47 |
+
// particular, resizing this tensor will mess up our dim() and
|
48 |
+
// callers cannot fix it.
|
49 |
+
const Tensor& get_nested_sizes() const {
|
50 |
+
return nested_sizes_;
|
51 |
+
}
|
52 |
+
// TODO: don't expose private implementation details like this
|
53 |
+
const Tensor& get_nested_strides() const {
|
54 |
+
return nested_strides_;
|
55 |
+
}
|
56 |
+
const Tensor& get_storage_offsets() const {
|
57 |
+
return storage_offsets_;
|
58 |
+
}
|
59 |
+
// Returns nullopt if the ith dimension is irregular. The ith dimension
|
60 |
+
// of a NestedTensor is regular if the unbound tensors match in
|
61 |
+
// size at the (i-1)th dimension.
|
62 |
+
c10::optional<int64_t> opt_size(int64_t d) const;
|
63 |
+
|
64 |
+
int64_t size(int64_t d) const {
|
65 |
+
c10::optional<int64_t> optional_size = this->opt_size(d);
|
66 |
+
TORCH_CHECK(
|
67 |
+
optional_size.has_value(),
|
68 |
+
"Given dimension ",
|
69 |
+
d,
|
70 |
+
" is irregular and does not have a size.");
|
71 |
+
return *optional_size;
|
72 |
+
}
|
73 |
+
/**
|
74 |
+
* Return a view of the nested tensor as a 1 dimensional contiguous tensor.
|
75 |
+
*
|
76 |
+
* The buffer tensor created by this function shares the same storage_impl as
|
77 |
+
* the original nested tensor, and therefore can be seen as a view.
|
78 |
+
*
|
79 |
+
* @return A newly constructed view tensor
|
80 |
+
*/
|
81 |
+
at::Tensor get_buffer() const {
|
82 |
+
TORCH_CHECK(
|
83 |
+
nested_tensor_impl_is_contiguous(this),
|
84 |
+
"NestedTensor must be contiguous to get buffer.");
|
85 |
+
return get_unsafe_storage_as_tensor();
|
86 |
+
}
|
87 |
+
/**
|
88 |
+
* If possible use get_buffer() instead. This function returns the storage
|
89 |
+
* as a tensor directly, which is not safe to use in general. If using this
|
90 |
+
* function, The caller must ensure to account for nested_sizes,
|
91 |
+
* nested_strides and storage_offsets.
|
92 |
+
*
|
93 |
+
* @return A newly constructed view tensor
|
94 |
+
*/
|
95 |
+
at::Tensor get_unsafe_storage_as_tensor() const {
|
96 |
+
auto buffer_key_set_ = generate_buffer_key_set();
|
97 |
+
const auto buffer_size = get_buffer_size();
|
98 |
+
auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
|
99 |
+
c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
|
100 |
+
buffer_tensor_impl->set_sizes_contiguous(
|
101 |
+
c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
|
102 |
+
return Tensor(buffer_tensor_impl);
|
103 |
+
}
|
104 |
+
|
105 |
+
size_t get_buffer_size() const {
|
106 |
+
return storage_.nbytes() / data_type_.itemsize();
|
107 |
+
}
|
108 |
+
|
109 |
+
protected:
|
110 |
+
const char* tensorimpl_type_name() const override;
|
111 |
+
|
112 |
+
// TODO: numel_custom and is_contiguous_custom can be profitably overridden
|
113 |
+
// with real implementations
|
114 |
+
int64_t numel_custom() const override;
|
115 |
+
c10::SymInt sym_numel_custom() const override;
|
116 |
+
bool is_contiguous_custom(MemoryFormat) const override;
|
117 |
+
int64_t size_custom(int64_t d) const override {
|
118 |
+
return this->size(d);
|
119 |
+
}
|
120 |
+
c10::SymInt sym_size_custom(int64_t d) const override {
|
121 |
+
return c10::SymInt{this->size(d)};
|
122 |
+
}
|
123 |
+
IntArrayRef sizes_custom() const override;
|
124 |
+
c10::SymIntArrayRef sym_sizes_custom() const override;
|
125 |
+
IntArrayRef strides_custom() const override;
|
126 |
+
c10::SymIntArrayRef sym_strides_custom() const override;
|
127 |
+
|
128 |
+
// this one is real
|
129 |
+
int64_t dim_custom() const override;
|
130 |
+
|
131 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
132 |
+
const c10::VariableVersion& version_counter,
|
133 |
+
bool allow_tensor_metadata_change) const override;
|
134 |
+
|
135 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
136 |
+
c10::VariableVersion&& version_counter,
|
137 |
+
bool allow_tensor_metadata_change) const override;
|
138 |
+
|
139 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
|
140 |
+
copy_tensor_metadata(
|
141 |
+
/*src_impl=*/impl.get(),
|
142 |
+
/*dest_impl=*/this,
|
143 |
+
/*version_counter=*/version_counter(),
|
144 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
|
145 |
+
}
|
146 |
+
|
147 |
+
private:
|
148 |
+
// Must be called after any changes to our dim() to sync the state
|
149 |
+
// to TensorImpl.
|
150 |
+
void refresh_dim();
|
151 |
+
|
152 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
153 |
+
const at::Tensor nested_sizes_, nested_strides_;
|
154 |
+
// The starting positions of the underlying tensors in contiguous buffer
|
155 |
+
// i.e. the buffer memory offsets to get the underlying tensors
|
156 |
+
// The reason to keep this metadata is that, without strong enough constraint
|
157 |
+
// it cannot be derived from `nested_sizes_`
|
158 |
+
// and `nested_strides_`:
|
159 |
+
// 1. when buffer has blanks, e.g. [tensor1, blank, tensor2]
|
160 |
+
// this can happen e.g. after slicing a nested tensor
|
161 |
+
// 2. when multiple tensors share a same memory
|
162 |
+
// 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2]
|
163 |
+
// Some strong enough constraints are:
|
164 |
+
// 1. every underlying tensor is contiguous in memory
|
165 |
+
// && nesting in ascending order
|
166 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
167 |
+
const at::Tensor storage_offsets_;
|
168 |
+
// NOTE: -1 here means the size is missing
|
169 |
+
// Optional to allow it to be computed lazily from nested.
|
170 |
+
// TODO: maybe we can remove this metadata since
|
171 |
+
// we can compute it from `nested_sizes_`
|
172 |
+
mutable c10::optional<std::vector<int64_t>> opt_sizes_;
|
173 |
+
|
174 |
+
template <typename VariableVersion>
|
175 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
|
176 |
+
VariableVersion&& version_counter,
|
177 |
+
bool allow_tensor_metadata_change) const;
|
178 |
+
|
179 |
+
/**
|
180 |
+
* Generates a non-nested key_set from a nested tensor.
|
181 |
+
*
|
182 |
+
* For many nested tensor kernel implementations a buffer tensor
|
183 |
+
* is generated and redispatched to a non-nested kernel this function
|
184 |
+
* generates the key set used by that buffer tensor
|
185 |
+
*
|
186 |
+
* @return Appropriate key set for non-nested tensor
|
187 |
+
*/
|
188 |
+
inline c10::DispatchKeySet generate_buffer_key_set() const {
|
189 |
+
auto buffer_key_set = this->key_set();
|
190 |
+
const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset);
|
191 |
+
// Remove nested tensor specific keys
|
192 |
+
buffer_key_set = buffer_key_set -
|
193 |
+
c10::DispatchKeySet{
|
194 |
+
c10::DispatchKey::NestedTensor,
|
195 |
+
c10::DispatchKey::AutogradNestedTensor};
|
196 |
+
|
197 |
+
// Add dense tensor specific keys
|
198 |
+
buffer_key_set =
|
199 |
+
buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense};
|
200 |
+
buffer_key_set = Autograd
|
201 |
+
? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set
|
202 |
+
: buffer_key_set;
|
203 |
+
|
204 |
+
return buffer_key_set;
|
205 |
+
}
|
206 |
+
};
|
207 |
+
|
208 |
+
inline NestedTensorImpl* get_nested_tensor_impl_or_null(
|
209 |
+
const at::Tensor& tensor) {
|
210 |
+
if (tensor.is_nested()) {
|
211 |
+
return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
212 |
+
}
|
213 |
+
return nullptr;
|
214 |
+
}
|
215 |
+
|
216 |
+
inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) {
|
217 |
+
TORCH_CHECK(
|
218 |
+
tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor.");
|
219 |
+
return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
220 |
+
}
|
221 |
+
|
222 |
+
inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
|
223 |
+
int64_t ntensors = nt->size(0);
|
224 |
+
if (ntensors == 0) {
|
225 |
+
return true;
|
226 |
+
}
|
227 |
+
const Tensor &sizemat = nt->get_nested_sizes(),
|
228 |
+
&stridemat = nt->get_nested_strides();
|
229 |
+
int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr<int64_t>();
|
230 |
+
int64_t orig_dim = sizemat.size(1);
|
231 |
+
// nesting scalars
|
232 |
+
if (orig_dim == 0) {
|
233 |
+
// each scalar must be contiguous
|
234 |
+
// if there is blank memory between underlying scalars
|
235 |
+
for (int64_t i = 0; i < ntensors; i++) {
|
236 |
+
if (offsets_ptr[i] != i) {
|
237 |
+
return false;
|
238 |
+
}
|
239 |
+
}
|
240 |
+
}
|
241 |
+
// nesting tensors
|
242 |
+
else {
|
243 |
+
// if any underlying tensor is non-contiguous
|
244 |
+
const int64_t *sizemat_ptr = sizemat.data_ptr<int64_t>(),
|
245 |
+
*stridemat_ptr = stridemat.data_ptr<int64_t>();
|
246 |
+
for (int64_t i = 0; i < ntensors; i++) {
|
247 |
+
if (stridemat_ptr[orig_dim - 1] != 1) {
|
248 |
+
return false;
|
249 |
+
}
|
250 |
+
int64_t product = sizemat_ptr[orig_dim - 1];
|
251 |
+
for (int64_t j = orig_dim - 2; j >= 0; j--) {
|
252 |
+
if (stridemat_ptr[j] != product) {
|
253 |
+
return false;
|
254 |
+
}
|
255 |
+
product *= sizemat_ptr[j];
|
256 |
+
}
|
257 |
+
sizemat_ptr += orig_dim;
|
258 |
+
stridemat_ptr += orig_dim;
|
259 |
+
}
|
260 |
+
// if there is blank memory between underlying tensors
|
261 |
+
if (offsets_ptr[0] != 0) {
|
262 |
+
return false;
|
263 |
+
}
|
264 |
+
sizemat_ptr = sizemat.data_ptr<int64_t>();
|
265 |
+
stridemat_ptr = stridemat.data_ptr<int64_t>();
|
266 |
+
for (int64_t i = 1; i < ntensors; i++) {
|
267 |
+
if (offsets_ptr[i] !=
|
268 |
+
offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
|
269 |
+
return false;
|
270 |
+
}
|
271 |
+
sizemat_ptr += orig_dim;
|
272 |
+
stridemat_ptr += orig_dim;
|
273 |
+
}
|
274 |
+
}
|
275 |
+
// everything is fine
|
276 |
+
return true;
|
277 |
+
}
|
278 |
+
|
279 |
+
inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
|
280 |
+
return get_nested_tensor_impl(tensor)->get_nested_sizes();
|
281 |
+
}
|
282 |
+
|
283 |
+
} // namespace at::native
|
venv/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/ScalarType.h>
|
4 |
+
#include <c10/util/BFloat16.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/Float8_e4m3fn.h>
|
7 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
8 |
+
#include <c10/util/Float8_e5m2.h>
|
9 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
10 |
+
#include <c10/util/Half.h>
|
11 |
+
|
12 |
+
namespace at {
|
13 |
+
|
14 |
+
// For FP16 or BFloat16 inputs, ops should perform internal math in FP32.
|
15 |
+
template <typename scalar_t>
|
16 |
+
struct OpMathType {
|
17 |
+
using type = scalar_t;
|
18 |
+
};
|
19 |
+
template <>
|
20 |
+
struct OpMathType<at::Half> {
|
21 |
+
using type = float;
|
22 |
+
};
|
23 |
+
template <>
|
24 |
+
struct OpMathType<at::BFloat16> {
|
25 |
+
using type = float;
|
26 |
+
};
|
27 |
+
template <>
|
28 |
+
struct OpMathType<at::Float8_e5m2> {
|
29 |
+
using type = float;
|
30 |
+
};
|
31 |
+
template <>
|
32 |
+
struct OpMathType<at::Float8_e4m3fn> {
|
33 |
+
using type = float;
|
34 |
+
};
|
35 |
+
template <>
|
36 |
+
struct OpMathType<at::Float8_e5m2fnuz> {
|
37 |
+
using type = float;
|
38 |
+
};
|
39 |
+
template <>
|
40 |
+
struct OpMathType<at::Float8_e4m3fnuz> {
|
41 |
+
using type = float;
|
42 |
+
};
|
43 |
+
template <>
|
44 |
+
struct OpMathType<c10::complex<Half>> {
|
45 |
+
using type = c10::complex<float>;
|
46 |
+
};
|
47 |
+
|
48 |
+
template <typename T>
|
49 |
+
using opmath_type = typename OpMathType<T>::type;
|
50 |
+
|
51 |
+
namespace {
|
52 |
+
|
53 |
+
inline c10::ScalarType toOpMathType(const c10::ScalarType type) {
|
54 |
+
switch (type) {
|
55 |
+
#define DEFINE_CASE(scalar_t, TypeNum) \
|
56 |
+
case ScalarType::TypeNum: \
|
57 |
+
return CppTypeToScalarType<at::opmath_type<scalar_t>>::value;
|
58 |
+
|
59 |
+
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
|
60 |
+
#undef DEFINE_CASE
|
61 |
+
|
62 |
+
default:
|
63 |
+
TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
} // namespace
|
68 |
+
|
69 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/MemoryFormat.h>
|
4 |
+
#include <c10/core/SymIntArrayRef.h>
|
5 |
+
#include <c10/core/TensorImpl.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
// An "Opaque" TensorImpl -- there are no strides and (for now)
|
11 |
+
// even data() is not supported (thus no pointer arithmetic).
|
12 |
+
|
13 |
+
// NOTE: We could allow data() in the future, but would have to ensure pointer
|
14 |
+
// arithmetic code is properly guarded.
|
15 |
+
//
|
16 |
+
// NOTE: This does not support resize_ (and other metadata-changing ops) because
|
17 |
+
// of `shallow_copy_and_detach`. We would need to define an interface to
|
18 |
+
// "shallow copy" in order to add support.
|
19 |
+
|
20 |
+
template <typename OpaqueHandle>
|
21 |
+
struct TORCH_API OpaqueTensorImpl : public TensorImpl {
|
22 |
+
// public constructor for now...
|
23 |
+
OpaqueTensorImpl(
|
24 |
+
at::DispatchKeySet key_set,
|
25 |
+
const caffe2::TypeMeta data_type,
|
26 |
+
c10::Device device,
|
27 |
+
OpaqueHandle opaque_handle,
|
28 |
+
c10::IntArrayRef sizes,
|
29 |
+
bool is_non_overlapping_and_dense = true)
|
30 |
+
: TensorImpl(key_set, data_type, device),
|
31 |
+
opaque_handle_(std::move(opaque_handle)) {
|
32 |
+
set_storage_access_should_throw();
|
33 |
+
set_custom_sizes_strides(SizesStridesPolicy::CustomStrides);
|
34 |
+
sizes_and_strides_.set_sizes(sizes);
|
35 |
+
refresh_numel();
|
36 |
+
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
37 |
+
is_non_overlapping_and_dense_ = is_non_overlapping_and_dense;
|
38 |
+
}
|
39 |
+
|
40 |
+
// Destructor doesn't call release_resources because it's
|
41 |
+
// unnecessary; don't forget to change that if needed!
|
42 |
+
void release_resources() override {
|
43 |
+
TensorImpl::release_resources();
|
44 |
+
opaque_handle_ = {};
|
45 |
+
}
|
46 |
+
|
47 |
+
void set_size(int64_t dim, int64_t new_size) override {
|
48 |
+
AT_ERROR("opaque tensors do not have set_size");
|
49 |
+
}
|
50 |
+
|
51 |
+
void set_stride(int64_t dim, int64_t new_stride) override {
|
52 |
+
AT_ERROR("opaque tensors do not have set_stride");
|
53 |
+
}
|
54 |
+
|
55 |
+
void set_storage_offset(int64_t storage_offset) override {
|
56 |
+
AT_ERROR("opaque tensors do not have set_storage_offset");
|
57 |
+
}
|
58 |
+
|
59 |
+
#ifdef DEBUG
|
60 |
+
bool has_storage() const override {
|
61 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
62 |
+
!storage_, "OpaqueTensorImpl assumes that storage_ is never set");
|
63 |
+
return false;
|
64 |
+
}
|
65 |
+
#endif
|
66 |
+
|
67 |
+
/**
|
68 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
69 |
+
*
|
70 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
71 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
72 |
+
*/
|
73 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
74 |
+
const c10::VariableVersion& version_counter,
|
75 |
+
bool allow_tensor_metadata_change) const override {
|
76 |
+
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
|
77 |
+
key_set(),
|
78 |
+
dtype(),
|
79 |
+
device(),
|
80 |
+
opaque_handle_,
|
81 |
+
sizes_and_strides_.sizes_arrayref());
|
82 |
+
copy_tensor_metadata(
|
83 |
+
/*src_opaque_impl=*/this,
|
84 |
+
/*dest_opaque_impl=*/impl.get(),
|
85 |
+
/*version_counter=*/version_counter,
|
86 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
87 |
+
impl->refresh_numel();
|
88 |
+
return impl;
|
89 |
+
}
|
90 |
+
|
91 |
+
/**
|
92 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
93 |
+
*
|
94 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
95 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
96 |
+
*/
|
97 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
98 |
+
c10::VariableVersion&& version_counter,
|
99 |
+
bool allow_tensor_metadata_change) const override {
|
100 |
+
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
|
101 |
+
key_set(),
|
102 |
+
dtype(),
|
103 |
+
device(),
|
104 |
+
opaque_handle_,
|
105 |
+
sizes_and_strides_.sizes_arrayref());
|
106 |
+
copy_tensor_metadata(
|
107 |
+
/*src_opaque_impl=*/this,
|
108 |
+
/*dest_opaque_impl=*/impl.get(),
|
109 |
+
/*version_counter=*/std::move(version_counter),
|
110 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
111 |
+
impl->refresh_numel();
|
112 |
+
return impl;
|
113 |
+
}
|
114 |
+
|
115 |
+
/**
|
116 |
+
* Shallow-copies data from another TensorImpl into this TensorImpl.
|
117 |
+
*
|
118 |
+
* For why this function doesn't check this TensorImpl's
|
119 |
+
* `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
|
120 |
+
*/
|
121 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
|
122 |
+
AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
|
123 |
+
auto opaque_impl =
|
124 |
+
static_cast<const OpaqueTensorImpl<OpaqueHandle>*>(impl.get());
|
125 |
+
copy_tensor_metadata(
|
126 |
+
/*src_impl=*/opaque_impl,
|
127 |
+
/*dest_impl=*/this,
|
128 |
+
/*version_counter=*/version_counter(),
|
129 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
|
130 |
+
refresh_numel();
|
131 |
+
}
|
132 |
+
|
133 |
+
const OpaqueHandle& opaque_handle() const {
|
134 |
+
return opaque_handle_;
|
135 |
+
}
|
136 |
+
|
137 |
+
OpaqueHandle& unsafe_opaque_handle() {
|
138 |
+
return opaque_handle_;
|
139 |
+
}
|
140 |
+
|
141 |
+
protected:
|
142 |
+
/**
|
143 |
+
* Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
|
144 |
+
* storage_offset) from one TensorImpl to another TensorImpl.
|
145 |
+
*
|
146 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
|
147 |
+
* [ TensorImpl Shallow-Copying ].
|
148 |
+
*/
|
149 |
+
static void copy_tensor_metadata(
|
150 |
+
const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
|
151 |
+
OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
|
152 |
+
const c10::VariableVersion& version_counter,
|
153 |
+
bool allow_tensor_metadata_change) {
|
154 |
+
TensorImpl::copy_tensor_metadata(
|
155 |
+
src_opaque_impl,
|
156 |
+
dest_opaque_impl,
|
157 |
+
version_counter,
|
158 |
+
allow_tensor_metadata_change);
|
159 |
+
|
160 |
+
// OpaqueTensorImpl-specific fields.
|
161 |
+
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
|
162 |
+
}
|
163 |
+
|
164 |
+
static void copy_tensor_metadata(
|
165 |
+
const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
|
166 |
+
OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
|
167 |
+
c10::VariableVersion&& version_counter,
|
168 |
+
bool allow_tensor_metadata_change) {
|
169 |
+
TensorImpl::copy_tensor_metadata(
|
170 |
+
src_opaque_impl,
|
171 |
+
dest_opaque_impl,
|
172 |
+
std::move(version_counter),
|
173 |
+
allow_tensor_metadata_change);
|
174 |
+
|
175 |
+
// OpaqueTensorImpl-specific fields.
|
176 |
+
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
|
177 |
+
}
|
178 |
+
|
179 |
+
private:
|
180 |
+
const char* tensorimpl_type_name() const override {
|
181 |
+
return "OpaqueTensorImpl";
|
182 |
+
}
|
183 |
+
|
184 |
+
OpaqueHandle opaque_handle_;
|
185 |
+
};
|
186 |
+
|
187 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Operators.h
ADDED
@@ -0,0 +1,1358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operators.h
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_ops.h> \
|
17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#include <c10/core/SymInt.h>
|
21 |
+
#include <c10/core/SymIntArrayRef.h>
|
22 |
+
#include <c10/core/Scalar.h>
|
23 |
+
#include <c10/core/TensorOptions.h>
|
24 |
+
#include <c10/core/QScheme.h>
|
25 |
+
#include <c10/util/OptionalArrayRef.h>
|
26 |
+
#include <tuple>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
#include <ATen/ops/_adaptive_avg_pool2d_ops.h>
|
30 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h>
|
31 |
+
#include <ATen/ops/_adaptive_avg_pool3d_ops.h>
|
32 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
|
33 |
+
#include <ATen/ops/_add_batch_dim_ops.h>
|
34 |
+
#include <ATen/ops/_add_relu_ops.h>
|
35 |
+
#include <ATen/ops/_addmm_activation_ops.h>
|
36 |
+
#include <ATen/ops/_aminmax_ops.h>
|
37 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
|
38 |
+
#include <ATen/ops/_amp_update_scale_ops.h>
|
39 |
+
#include <ATen/ops/_assert_async_ops.h>
|
40 |
+
#include <ATen/ops/_assert_scalar_ops.h>
|
41 |
+
#include <ATen/ops/_assert_tensor_metadata_ops.h>
|
42 |
+
#include <ATen/ops/_autocast_to_full_precision_ops.h>
|
43 |
+
#include <ATen/ops/_autocast_to_reduced_precision_ops.h>
|
44 |
+
#include <ATen/ops/_backward_ops.h>
|
45 |
+
#include <ATen/ops/_batch_norm_impl_index_ops.h>
|
46 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
|
47 |
+
#include <ATen/ops/_cast_Byte_ops.h>
|
48 |
+
#include <ATen/ops/_cast_Char_ops.h>
|
49 |
+
#include <ATen/ops/_cast_Double_ops.h>
|
50 |
+
#include <ATen/ops/_cast_Float_ops.h>
|
51 |
+
#include <ATen/ops/_cast_Half_ops.h>
|
52 |
+
#include <ATen/ops/_cast_Int_ops.h>
|
53 |
+
#include <ATen/ops/_cast_Long_ops.h>
|
54 |
+
#include <ATen/ops/_cast_Short_ops.h>
|
55 |
+
#include <ATen/ops/_cdist_backward_ops.h>
|
56 |
+
#include <ATen/ops/_cdist_forward_ops.h>
|
57 |
+
#include <ATen/ops/_cholesky_solve_helper_ops.h>
|
58 |
+
#include <ATen/ops/_choose_qparams_per_tensor_ops.h>
|
59 |
+
#include <ATen/ops/_chunk_cat_ops.h>
|
60 |
+
#include <ATen/ops/_coalesce_ops.h>
|
61 |
+
#include <ATen/ops/_coalesced_ops.h>
|
62 |
+
#include <ATen/ops/_compute_linear_combination_ops.h>
|
63 |
+
#include <ATen/ops/_conj_ops.h>
|
64 |
+
#include <ATen/ops/_conj_copy_ops.h>
|
65 |
+
#include <ATen/ops/_conj_physical_ops.h>
|
66 |
+
#include <ATen/ops/_conv_depthwise2d_ops.h>
|
67 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h>
|
68 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h>
|
69 |
+
#include <ATen/ops/_convert_weight_to_int4pack_ops.h>
|
70 |
+
#include <ATen/ops/_convolution_ops.h>
|
71 |
+
#include <ATen/ops/_convolution_double_backward_ops.h>
|
72 |
+
#include <ATen/ops/_convolution_mode_ops.h>
|
73 |
+
#include <ATen/ops/_copy_from_ops.h>
|
74 |
+
#include <ATen/ops/_copy_from_and_resize_ops.h>
|
75 |
+
#include <ATen/ops/_cslt_compress_ops.h>
|
76 |
+
#include <ATen/ops/_cslt_sparse_mm_ops.h>
|
77 |
+
#include <ATen/ops/_cslt_sparse_mm_search_ops.h>
|
78 |
+
#include <ATen/ops/_ctc_loss_ops.h>
|
79 |
+
#include <ATen/ops/_ctc_loss_backward_ops.h>
|
80 |
+
#include <ATen/ops/_cudnn_ctc_loss_ops.h>
|
81 |
+
#include <ATen/ops/_cudnn_init_dropout_state_ops.h>
|
82 |
+
#include <ATen/ops/_cudnn_rnn_ops.h>
|
83 |
+
#include <ATen/ops/_cudnn_rnn_backward_ops.h>
|
84 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
|
85 |
+
#include <ATen/ops/_cufft_clear_plan_cache_ops.h>
|
86 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_ops.h>
|
87 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_ops.h>
|
88 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h>
|
89 |
+
#include <ATen/ops/_cummax_helper_ops.h>
|
90 |
+
#include <ATen/ops/_cummin_helper_ops.h>
|
91 |
+
#include <ATen/ops/_debug_has_internal_overlap_ops.h>
|
92 |
+
#include <ATen/ops/_dimI_ops.h>
|
93 |
+
#include <ATen/ops/_dimV_ops.h>
|
94 |
+
#include <ATen/ops/_dim_arange_ops.h>
|
95 |
+
#include <ATen/ops/_dirichlet_grad_ops.h>
|
96 |
+
#include <ATen/ops/_efficient_attention_backward_ops.h>
|
97 |
+
#include <ATen/ops/_efficient_attention_forward_ops.h>
|
98 |
+
#include <ATen/ops/_efficientzerotensor_ops.h>
|
99 |
+
#include <ATen/ops/_embedding_bag_ops.h>
|
100 |
+
#include <ATen/ops/_embedding_bag_backward_ops.h>
|
101 |
+
#include <ATen/ops/_embedding_bag_dense_backward_ops.h>
|
102 |
+
#include <ATen/ops/_embedding_bag_forward_only_ops.h>
|
103 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
|
104 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
|
105 |
+
#include <ATen/ops/_empty_affine_quantized_ops.h>
|
106 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
|
107 |
+
#include <ATen/ops/_euclidean_dist_ops.h>
|
108 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h>
|
109 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_ops.h>
|
110 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h>
|
111 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h>
|
112 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h>
|
113 |
+
#include <ATen/ops/_fft_c2c_ops.h>
|
114 |
+
#include <ATen/ops/_fft_c2r_ops.h>
|
115 |
+
#include <ATen/ops/_fft_r2c_ops.h>
|
116 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_ops.h>
|
117 |
+
#include <ATen/ops/_flash_attention_backward_ops.h>
|
118 |
+
#include <ATen/ops/_flash_attention_forward_ops.h>
|
119 |
+
#include <ATen/ops/_foobar_ops.h>
|
120 |
+
#include <ATen/ops/_foreach_abs_ops.h>
|
121 |
+
#include <ATen/ops/_foreach_acos_ops.h>
|
122 |
+
#include <ATen/ops/_foreach_add_ops.h>
|
123 |
+
#include <ATen/ops/_foreach_addcdiv_ops.h>
|
124 |
+
#include <ATen/ops/_foreach_addcmul_ops.h>
|
125 |
+
#include <ATen/ops/_foreach_asin_ops.h>
|
126 |
+
#include <ATen/ops/_foreach_atan_ops.h>
|
127 |
+
#include <ATen/ops/_foreach_ceil_ops.h>
|
128 |
+
#include <ATen/ops/_foreach_clamp_max_ops.h>
|
129 |
+
#include <ATen/ops/_foreach_clamp_min_ops.h>
|
130 |
+
#include <ATen/ops/_foreach_copy_ops.h>
|
131 |
+
#include <ATen/ops/_foreach_cos_ops.h>
|
132 |
+
#include <ATen/ops/_foreach_cosh_ops.h>
|
133 |
+
#include <ATen/ops/_foreach_div_ops.h>
|
134 |
+
#include <ATen/ops/_foreach_erf_ops.h>
|
135 |
+
#include <ATen/ops/_foreach_erfc_ops.h>
|
136 |
+
#include <ATen/ops/_foreach_exp_ops.h>
|
137 |
+
#include <ATen/ops/_foreach_expm1_ops.h>
|
138 |
+
#include <ATen/ops/_foreach_floor_ops.h>
|
139 |
+
#include <ATen/ops/_foreach_frac_ops.h>
|
140 |
+
#include <ATen/ops/_foreach_lerp_ops.h>
|
141 |
+
#include <ATen/ops/_foreach_lgamma_ops.h>
|
142 |
+
#include <ATen/ops/_foreach_log_ops.h>
|
143 |
+
#include <ATen/ops/_foreach_log10_ops.h>
|
144 |
+
#include <ATen/ops/_foreach_log1p_ops.h>
|
145 |
+
#include <ATen/ops/_foreach_log2_ops.h>
|
146 |
+
#include <ATen/ops/_foreach_maximum_ops.h>
|
147 |
+
#include <ATen/ops/_foreach_minimum_ops.h>
|
148 |
+
#include <ATen/ops/_foreach_mul_ops.h>
|
149 |
+
#include <ATen/ops/_foreach_neg_ops.h>
|
150 |
+
#include <ATen/ops/_foreach_norm_ops.h>
|
151 |
+
#include <ATen/ops/_foreach_pow_ops.h>
|
152 |
+
#include <ATen/ops/_foreach_reciprocal_ops.h>
|
153 |
+
#include <ATen/ops/_foreach_round_ops.h>
|
154 |
+
#include <ATen/ops/_foreach_sigmoid_ops.h>
|
155 |
+
#include <ATen/ops/_foreach_sign_ops.h>
|
156 |
+
#include <ATen/ops/_foreach_sin_ops.h>
|
157 |
+
#include <ATen/ops/_foreach_sinh_ops.h>
|
158 |
+
#include <ATen/ops/_foreach_sqrt_ops.h>
|
159 |
+
#include <ATen/ops/_foreach_sub_ops.h>
|
160 |
+
#include <ATen/ops/_foreach_tan_ops.h>
|
161 |
+
#include <ATen/ops/_foreach_tanh_ops.h>
|
162 |
+
#include <ATen/ops/_foreach_trunc_ops.h>
|
163 |
+
#include <ATen/ops/_foreach_zero_ops.h>
|
164 |
+
#include <ATen/ops/_functional_assert_async_ops.h>
|
165 |
+
#include <ATen/ops/_functional_assert_scalar_ops.h>
|
166 |
+
#include <ATen/ops/_functional_sym_constrain_range_ops.h>
|
167 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_ops.h>
|
168 |
+
#include <ATen/ops/_fused_adam_ops.h>
|
169 |
+
#include <ATen/ops/_fused_adamw_ops.h>
|
170 |
+
#include <ATen/ops/_fused_dropout_ops.h>
|
171 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h>
|
172 |
+
#include <ATen/ops/_fused_sdp_choice_ops.h>
|
173 |
+
#include <ATen/ops/_fused_sgd_ops.h>
|
174 |
+
#include <ATen/ops/_fw_primal_ops.h>
|
175 |
+
#include <ATen/ops/_fw_primal_copy_ops.h>
|
176 |
+
#include <ATen/ops/_gather_sparse_backward_ops.h>
|
177 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h>
|
178 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h>
|
179 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_ops.h>
|
180 |
+
#include <ATen/ops/_has_same_storage_numel_ops.h>
|
181 |
+
#include <ATen/ops/_histogramdd_bin_edges_ops.h>
|
182 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
|
183 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_ops.h>
|
184 |
+
#include <ATen/ops/_index_put_impl_ops.h>
|
185 |
+
#include <ATen/ops/_indices_ops.h>
|
186 |
+
#include <ATen/ops/_indices_copy_ops.h>
|
187 |
+
#include <ATen/ops/_int_mm_ops.h>
|
188 |
+
#include <ATen/ops/_is_all_true_ops.h>
|
189 |
+
#include <ATen/ops/_is_any_true_ops.h>
|
190 |
+
#include <ATen/ops/_is_zerotensor_ops.h>
|
191 |
+
#include <ATen/ops/_lazy_clone_ops.h>
|
192 |
+
#include <ATen/ops/_linalg_check_errors_ops.h>
|
193 |
+
#include <ATen/ops/_linalg_det_ops.h>
|
194 |
+
#include <ATen/ops/_linalg_eigh_ops.h>
|
195 |
+
#include <ATen/ops/_linalg_eigvals_ops.h>
|
196 |
+
#include <ATen/ops/_linalg_slogdet_ops.h>
|
197 |
+
#include <ATen/ops/_linalg_solve_ex_ops.h>
|
198 |
+
#include <ATen/ops/_linalg_svd_ops.h>
|
199 |
+
#include <ATen/ops/_local_scalar_dense_ops.h>
|
200 |
+
#include <ATen/ops/_log_softmax_ops.h>
|
201 |
+
#include <ATen/ops/_log_softmax_backward_data_ops.h>
|
202 |
+
#include <ATen/ops/_logcumsumexp_ops.h>
|
203 |
+
#include <ATen/ops/_lstm_mps_ops.h>
|
204 |
+
#include <ATen/ops/_lu_with_info_ops.h>
|
205 |
+
#include <ATen/ops/_make_dep_token_ops.h>
|
206 |
+
#include <ATen/ops/_make_dual_ops.h>
|
207 |
+
#include <ATen/ops/_make_dual_copy_ops.h>
|
208 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_ops.h>
|
209 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
|
210 |
+
#include <ATen/ops/_masked_scale_ops.h>
|
211 |
+
#include <ATen/ops/_masked_softmax_ops.h>
|
212 |
+
#include <ATen/ops/_masked_softmax_backward_ops.h>
|
213 |
+
#include <ATen/ops/_mixed_dtypes_linear_ops.h>
|
214 |
+
#include <ATen/ops/_mkldnn_reshape_ops.h>
|
215 |
+
#include <ATen/ops/_mkldnn_transpose_ops.h>
|
216 |
+
#include <ATen/ops/_mps_convolution_ops.h>
|
217 |
+
#include <ATen/ops/_mps_convolution_transpose_ops.h>
|
218 |
+
#include <ATen/ops/_native_batch_norm_legit_ops.h>
|
219 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_ops.h>
|
220 |
+
#include <ATen/ops/_native_multi_head_attention_ops.h>
|
221 |
+
#include <ATen/ops/_neg_view_ops.h>
|
222 |
+
#include <ATen/ops/_neg_view_copy_ops.h>
|
223 |
+
#include <ATen/ops/_nested_from_padded_ops.h>
|
224 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_ops.h>
|
225 |
+
#include <ATen/ops/_nested_get_jagged_dummy_ops.h>
|
226 |
+
#include <ATen/ops/_nested_get_lengths_ops.h>
|
227 |
+
#include <ATen/ops/_nested_get_offsets_ops.h>
|
228 |
+
#include <ATen/ops/_nested_get_ragged_idx_ops.h>
|
229 |
+
#include <ATen/ops/_nested_get_values_ops.h>
|
230 |
+
#include <ATen/ops/_nested_get_values_copy_ops.h>
|
231 |
+
#include <ATen/ops/_nested_select_backward_ops.h>
|
232 |
+
#include <ATen/ops/_nested_sum_backward_ops.h>
|
233 |
+
#include <ATen/ops/_nested_tensor_from_mask_ops.h>
|
234 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h>
|
235 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_ops.h>
|
236 |
+
#include <ATen/ops/_nested_tensor_size_ops.h>
|
237 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape_ops.h>
|
238 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
|
239 |
+
#include <ATen/ops/_nested_tensor_strides_ops.h>
|
240 |
+
#include <ATen/ops/_nested_view_from_buffer_ops.h>
|
241 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
|
242 |
+
#include <ATen/ops/_nested_view_from_jagged_ops.h>
|
243 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_ops.h>
|
244 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h>
|
245 |
+
#include <ATen/ops/_nnpack_available_ops.h>
|
246 |
+
#include <ATen/ops/_nnpack_spatial_convolution_ops.h>
|
247 |
+
#include <ATen/ops/_nnz_ops.h>
|
248 |
+
#include <ATen/ops/_pack_padded_sequence_ops.h>
|
249 |
+
#include <ATen/ops/_pack_padded_sequence_backward_ops.h>
|
250 |
+
#include <ATen/ops/_pad_circular_ops.h>
|
251 |
+
#include <ATen/ops/_pad_enum_ops.h>
|
252 |
+
#include <ATen/ops/_pad_packed_sequence_ops.h>
|
253 |
+
#include <ATen/ops/_pdist_backward_ops.h>
|
254 |
+
#include <ATen/ops/_pdist_forward_ops.h>
|
255 |
+
#include <ATen/ops/_pin_memory_ops.h>
|
256 |
+
#include <ATen/ops/_prelu_kernel_ops.h>
|
257 |
+
#include <ATen/ops/_prelu_kernel_backward_ops.h>
|
258 |
+
#include <ATen/ops/_print_ops.h>
|
259 |
+
#include <ATen/ops/_propagate_xla_data_ops.h>
|
260 |
+
#include <ATen/ops/_remove_batch_dim_ops.h>
|
261 |
+
#include <ATen/ops/_reshape_alias_ops.h>
|
262 |
+
#include <ATen/ops/_reshape_alias_copy_ops.h>
|
263 |
+
#include <ATen/ops/_reshape_copy_ops.h>
|
264 |
+
#include <ATen/ops/_reshape_from_tensor_ops.h>
|
265 |
+
#include <ATen/ops/_resize_output_ops.h>
|
266 |
+
#include <ATen/ops/_rowwise_prune_ops.h>
|
267 |
+
#include <ATen/ops/_sample_dirichlet_ops.h>
|
268 |
+
#include <ATen/ops/_saturate_weight_to_fp16_ops.h>
|
269 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_ops.h>
|
270 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_ops.h>
|
271 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_ops.h>
|
272 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h>
|
273 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_ops.h>
|
274 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h>
|
275 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_ops.h>
|
276 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_ops.h>
|
277 |
+
#include <ATen/ops/_scaled_mm_ops.h>
|
278 |
+
#include <ATen/ops/_segment_reduce_backward_ops.h>
|
279 |
+
#include <ATen/ops/_shape_as_tensor_ops.h>
|
280 |
+
#include <ATen/ops/_slow_conv2d_backward_ops.h>
|
281 |
+
#include <ATen/ops/_slow_conv2d_forward_ops.h>
|
282 |
+
#include <ATen/ops/_sobol_engine_draw_ops.h>
|
283 |
+
#include <ATen/ops/_sobol_engine_ff_ops.h>
|
284 |
+
#include <ATen/ops/_sobol_engine_initialize_state_ops.h>
|
285 |
+
#include <ATen/ops/_sobol_engine_scramble_ops.h>
|
286 |
+
#include <ATen/ops/_softmax_ops.h>
|
287 |
+
#include <ATen/ops/_softmax_backward_data_ops.h>
|
288 |
+
#include <ATen/ops/_sparse_addmm_ops.h>
|
289 |
+
#include <ATen/ops/_sparse_broadcast_to_ops.h>
|
290 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_ops.h>
|
291 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h>
|
292 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h>
|
293 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h>
|
294 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h>
|
295 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
|
296 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
|
297 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h>
|
298 |
+
#include <ATen/ops/_sparse_csr_prod_ops.h>
|
299 |
+
#include <ATen/ops/_sparse_csr_sum_ops.h>
|
300 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
|
301 |
+
#include <ATen/ops/_sparse_log_softmax_ops.h>
|
302 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_ops.h>
|
303 |
+
#include <ATen/ops/_sparse_mask_projection_ops.h>
|
304 |
+
#include <ATen/ops/_sparse_mm_ops.h>
|
305 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_ops.h>
|
306 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward_ops.h>
|
307 |
+
#include <ATen/ops/_sparse_semi_structured_linear_ops.h>
|
308 |
+
#include <ATen/ops/_sparse_softmax_ops.h>
|
309 |
+
#include <ATen/ops/_sparse_softmax_backward_data_ops.h>
|
310 |
+
#include <ATen/ops/_sparse_sparse_matmul_ops.h>
|
311 |
+
#include <ATen/ops/_sparse_sum_ops.h>
|
312 |
+
#include <ATen/ops/_sparse_sum_backward_ops.h>
|
313 |
+
#include <ATen/ops/_spdiags_ops.h>
|
314 |
+
#include <ATen/ops/_stack_ops.h>
|
315 |
+
#include <ATen/ops/_standard_gamma_ops.h>
|
316 |
+
#include <ATen/ops/_standard_gamma_grad_ops.h>
|
317 |
+
#include <ATen/ops/_test_ambiguous_defaults_ops.h>
|
318 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_ops.h>
|
319 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
|
320 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h>
|
321 |
+
#include <ATen/ops/_test_check_tensor_ops.h>
|
322 |
+
#include <ATen/ops/_test_functorch_fallback_ops.h>
|
323 |
+
#include <ATen/ops/_test_optional_filled_intlist_ops.h>
|
324 |
+
#include <ATen/ops/_test_optional_floatlist_ops.h>
|
325 |
+
#include <ATen/ops/_test_optional_intlist_ops.h>
|
326 |
+
#include <ATen/ops/_test_parallel_materialize_ops.h>
|
327 |
+
#include <ATen/ops/_test_serialization_subcmul_ops.h>
|
328 |
+
#include <ATen/ops/_test_string_default_ops.h>
|
329 |
+
#include <ATen/ops/_test_warn_in_autograd_ops.h>
|
330 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_ops.h>
|
331 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h>
|
332 |
+
#include <ATen/ops/_thnn_fused_gru_cell_ops.h>
|
333 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h>
|
334 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
|
335 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_ops.h>
|
336 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
|
337 |
+
#include <ATen/ops/_to_copy_ops.h>
|
338 |
+
#include <ATen/ops/_to_cpu_ops.h>
|
339 |
+
#include <ATen/ops/_to_dense_ops.h>
|
340 |
+
#include <ATen/ops/_to_sparse_ops.h>
|
341 |
+
#include <ATen/ops/_to_sparse_bsc_ops.h>
|
342 |
+
#include <ATen/ops/_to_sparse_bsr_ops.h>
|
343 |
+
#include <ATen/ops/_to_sparse_csc_ops.h>
|
344 |
+
#include <ATen/ops/_to_sparse_csr_ops.h>
|
345 |
+
#include <ATen/ops/_to_sparse_semi_structured_ops.h>
|
346 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
|
347 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
|
348 |
+
#include <ATen/ops/_trilinear_ops.h>
|
349 |
+
#include <ATen/ops/_triton_multi_head_attention_ops.h>
|
350 |
+
#include <ATen/ops/_triton_scaled_dot_attention_ops.h>
|
351 |
+
#include <ATen/ops/_unique_ops.h>
|
352 |
+
#include <ATen/ops/_unique2_ops.h>
|
353 |
+
#include <ATen/ops/_unpack_dual_ops.h>
|
354 |
+
#include <ATen/ops/_unsafe_index_ops.h>
|
355 |
+
#include <ATen/ops/_unsafe_index_put_ops.h>
|
356 |
+
#include <ATen/ops/_unsafe_view_ops.h>
|
357 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_ops.h>
|
358 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h>
|
359 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
|
360 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h>
|
361 |
+
#include <ATen/ops/_upsample_nearest_exact1d_ops.h>
|
362 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h>
|
363 |
+
#include <ATen/ops/_upsample_nearest_exact2d_ops.h>
|
364 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h>
|
365 |
+
#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
|
366 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h>
|
367 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
|
368 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h>
|
369 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_ops.h>
|
370 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_ops.h>
|
371 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_ops.h>
|
372 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_ops.h>
|
373 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_ops.h>
|
374 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h>
|
375 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_ops.h>
|
376 |
+
#include <ATen/ops/_values_ops.h>
|
377 |
+
#include <ATen/ops/_values_copy_ops.h>
|
378 |
+
#include <ATen/ops/_version_ops.h>
|
379 |
+
#include <ATen/ops/_weight_int4pack_mm_ops.h>
|
380 |
+
#include <ATen/ops/_weight_int8pack_mm_ops.h>
|
381 |
+
#include <ATen/ops/_weight_norm_ops.h>
|
382 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_ops.h>
|
383 |
+
#include <ATen/ops/_weight_norm_interface_ops.h>
|
384 |
+
#include <ATen/ops/_weight_norm_interface_backward_ops.h>
|
385 |
+
#include <ATen/ops/abs_ops.h>
|
386 |
+
#include <ATen/ops/absolute_ops.h>
|
387 |
+
#include <ATen/ops/acos_ops.h>
|
388 |
+
#include <ATen/ops/acosh_ops.h>
|
389 |
+
#include <ATen/ops/adaptive_avg_pool1d_ops.h>
|
390 |
+
#include <ATen/ops/adaptive_avg_pool2d_ops.h>
|
391 |
+
#include <ATen/ops/adaptive_avg_pool3d_ops.h>
|
392 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_ops.h>
|
393 |
+
#include <ATen/ops/adaptive_max_pool1d_ops.h>
|
394 |
+
#include <ATen/ops/adaptive_max_pool2d_ops.h>
|
395 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
|
396 |
+
#include <ATen/ops/adaptive_max_pool3d_ops.h>
|
397 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_ops.h>
|
398 |
+
#include <ATen/ops/add_ops.h>
|
399 |
+
#include <ATen/ops/addbmm_ops.h>
|
400 |
+
#include <ATen/ops/addcdiv_ops.h>
|
401 |
+
#include <ATen/ops/addcmul_ops.h>
|
402 |
+
#include <ATen/ops/addmm_ops.h>
|
403 |
+
#include <ATen/ops/addmv_ops.h>
|
404 |
+
#include <ATen/ops/addr_ops.h>
|
405 |
+
#include <ATen/ops/adjoint_ops.h>
|
406 |
+
#include <ATen/ops/affine_grid_generator_ops.h>
|
407 |
+
#include <ATen/ops/affine_grid_generator_backward_ops.h>
|
408 |
+
#include <ATen/ops/alias_ops.h>
|
409 |
+
#include <ATen/ops/alias_copy_ops.h>
|
410 |
+
#include <ATen/ops/align_as_ops.h>
|
411 |
+
#include <ATen/ops/align_tensors_ops.h>
|
412 |
+
#include <ATen/ops/align_to_ops.h>
|
413 |
+
#include <ATen/ops/all_ops.h>
|
414 |
+
#include <ATen/ops/allclose_ops.h>
|
415 |
+
#include <ATen/ops/alpha_dropout_ops.h>
|
416 |
+
#include <ATen/ops/amax_ops.h>
|
417 |
+
#include <ATen/ops/amin_ops.h>
|
418 |
+
#include <ATen/ops/aminmax_ops.h>
|
419 |
+
#include <ATen/ops/and_ops.h>
|
420 |
+
#include <ATen/ops/angle_ops.h>
|
421 |
+
#include <ATen/ops/any_ops.h>
|
422 |
+
#include <ATen/ops/arange_ops.h>
|
423 |
+
#include <ATen/ops/arccos_ops.h>
|
424 |
+
#include <ATen/ops/arccosh_ops.h>
|
425 |
+
#include <ATen/ops/arcsin_ops.h>
|
426 |
+
#include <ATen/ops/arcsinh_ops.h>
|
427 |
+
#include <ATen/ops/arctan_ops.h>
|
428 |
+
#include <ATen/ops/arctan2_ops.h>
|
429 |
+
#include <ATen/ops/arctanh_ops.h>
|
430 |
+
#include <ATen/ops/argmax_ops.h>
|
431 |
+
#include <ATen/ops/argmin_ops.h>
|
432 |
+
#include <ATen/ops/argsort_ops.h>
|
433 |
+
#include <ATen/ops/argwhere_ops.h>
|
434 |
+
#include <ATen/ops/as_strided_ops.h>
|
435 |
+
#include <ATen/ops/as_strided_copy_ops.h>
|
436 |
+
#include <ATen/ops/as_strided_scatter_ops.h>
|
437 |
+
#include <ATen/ops/asin_ops.h>
|
438 |
+
#include <ATen/ops/asinh_ops.h>
|
439 |
+
#include <ATen/ops/atan_ops.h>
|
440 |
+
#include <ATen/ops/atan2_ops.h>
|
441 |
+
#include <ATen/ops/atanh_ops.h>
|
442 |
+
#include <ATen/ops/atleast_1d_ops.h>
|
443 |
+
#include <ATen/ops/atleast_2d_ops.h>
|
444 |
+
#include <ATen/ops/atleast_3d_ops.h>
|
445 |
+
#include <ATen/ops/avg_pool1d_ops.h>
|
446 |
+
#include <ATen/ops/avg_pool2d_ops.h>
|
447 |
+
#include <ATen/ops/avg_pool2d_backward_ops.h>
|
448 |
+
#include <ATen/ops/avg_pool3d_ops.h>
|
449 |
+
#include <ATen/ops/avg_pool3d_backward_ops.h>
|
450 |
+
#include <ATen/ops/baddbmm_ops.h>
|
451 |
+
#include <ATen/ops/bartlett_window_ops.h>
|
452 |
+
#include <ATen/ops/batch_norm_ops.h>
|
453 |
+
#include <ATen/ops/batch_norm_backward_elemt_ops.h>
|
454 |
+
#include <ATen/ops/batch_norm_backward_reduce_ops.h>
|
455 |
+
#include <ATen/ops/batch_norm_elemt_ops.h>
|
456 |
+
#include <ATen/ops/batch_norm_gather_stats_ops.h>
|
457 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h>
|
458 |
+
#include <ATen/ops/batch_norm_stats_ops.h>
|
459 |
+
#include <ATen/ops/batch_norm_update_stats_ops.h>
|
460 |
+
#include <ATen/ops/bernoulli_ops.h>
|
461 |
+
#include <ATen/ops/bilinear_ops.h>
|
462 |
+
#include <ATen/ops/binary_cross_entropy_ops.h>
|
463 |
+
#include <ATen/ops/binary_cross_entropy_backward_ops.h>
|
464 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
|
465 |
+
#include <ATen/ops/bincount_ops.h>
|
466 |
+
#include <ATen/ops/binomial_ops.h>
|
467 |
+
#include <ATen/ops/bitwise_and_ops.h>
|
468 |
+
#include <ATen/ops/bitwise_left_shift_ops.h>
|
469 |
+
#include <ATen/ops/bitwise_not_ops.h>
|
470 |
+
#include <ATen/ops/bitwise_or_ops.h>
|
471 |
+
#include <ATen/ops/bitwise_right_shift_ops.h>
|
472 |
+
#include <ATen/ops/bitwise_xor_ops.h>
|
473 |
+
#include <ATen/ops/blackman_window_ops.h>
|
474 |
+
#include <ATen/ops/block_diag_ops.h>
|
475 |
+
#include <ATen/ops/bmm_ops.h>
|
476 |
+
#include <ATen/ops/broadcast_tensors_ops.h>
|
477 |
+
#include <ATen/ops/broadcast_to_ops.h>
|
478 |
+
#include <ATen/ops/bucketize_ops.h>
|
479 |
+
#include <ATen/ops/can_cast_ops.h>
|
480 |
+
#include <ATen/ops/cartesian_prod_ops.h>
|
481 |
+
#include <ATen/ops/cat_ops.h>
|
482 |
+
#include <ATen/ops/cauchy_ops.h>
|
483 |
+
#include <ATen/ops/ccol_indices_ops.h>
|
484 |
+
#include <ATen/ops/ccol_indices_copy_ops.h>
|
485 |
+
#include <ATen/ops/cdist_ops.h>
|
486 |
+
#include <ATen/ops/ceil_ops.h>
|
487 |
+
#include <ATen/ops/celu_ops.h>
|
488 |
+
#include <ATen/ops/chain_matmul_ops.h>
|
489 |
+
#include <ATen/ops/chalf_ops.h>
|
490 |
+
#include <ATen/ops/channel_shuffle_ops.h>
|
491 |
+
#include <ATen/ops/cholesky_ops.h>
|
492 |
+
#include <ATen/ops/cholesky_inverse_ops.h>
|
493 |
+
#include <ATen/ops/cholesky_solve_ops.h>
|
494 |
+
#include <ATen/ops/choose_qparams_optimized_ops.h>
|
495 |
+
#include <ATen/ops/chunk_ops.h>
|
496 |
+
#include <ATen/ops/clamp_ops.h>
|
497 |
+
#include <ATen/ops/clamp_max_ops.h>
|
498 |
+
#include <ATen/ops/clamp_min_ops.h>
|
499 |
+
#include <ATen/ops/clip_ops.h>
|
500 |
+
#include <ATen/ops/clone_ops.h>
|
501 |
+
#include <ATen/ops/coalesce_ops.h>
|
502 |
+
#include <ATen/ops/col2im_ops.h>
|
503 |
+
#include <ATen/ops/col_indices_ops.h>
|
504 |
+
#include <ATen/ops/col_indices_copy_ops.h>
|
505 |
+
#include <ATen/ops/column_stack_ops.h>
|
506 |
+
#include <ATen/ops/combinations_ops.h>
|
507 |
+
#include <ATen/ops/complex_ops.h>
|
508 |
+
#include <ATen/ops/concat_ops.h>
|
509 |
+
#include <ATen/ops/concatenate_ops.h>
|
510 |
+
#include <ATen/ops/conj_ops.h>
|
511 |
+
#include <ATen/ops/conj_physical_ops.h>
|
512 |
+
#include <ATen/ops/constant_pad_nd_ops.h>
|
513 |
+
#include <ATen/ops/contiguous_ops.h>
|
514 |
+
#include <ATen/ops/conv1d_ops.h>
|
515 |
+
#include <ATen/ops/conv2d_ops.h>
|
516 |
+
#include <ATen/ops/conv3d_ops.h>
|
517 |
+
#include <ATen/ops/conv_depthwise3d_ops.h>
|
518 |
+
#include <ATen/ops/conv_tbc_ops.h>
|
519 |
+
#include <ATen/ops/conv_tbc_backward_ops.h>
|
520 |
+
#include <ATen/ops/conv_transpose1d_ops.h>
|
521 |
+
#include <ATen/ops/conv_transpose2d_ops.h>
|
522 |
+
#include <ATen/ops/conv_transpose3d_ops.h>
|
523 |
+
#include <ATen/ops/convolution_ops.h>
|
524 |
+
#include <ATen/ops/convolution_backward_ops.h>
|
525 |
+
#include <ATen/ops/convolution_backward_overrideable_ops.h>
|
526 |
+
#include <ATen/ops/convolution_overrideable_ops.h>
|
527 |
+
#include <ATen/ops/copy_ops.h>
|
528 |
+
#include <ATen/ops/copy_sparse_to_sparse_ops.h>
|
529 |
+
#include <ATen/ops/copysign_ops.h>
|
530 |
+
#include <ATen/ops/corrcoef_ops.h>
|
531 |
+
#include <ATen/ops/cos_ops.h>
|
532 |
+
#include <ATen/ops/cosh_ops.h>
|
533 |
+
#include <ATen/ops/cosine_embedding_loss_ops.h>
|
534 |
+
#include <ATen/ops/cosine_similarity_ops.h>
|
535 |
+
#include <ATen/ops/count_nonzero_ops.h>
|
536 |
+
#include <ATen/ops/cov_ops.h>
|
537 |
+
#include <ATen/ops/cross_ops.h>
|
538 |
+
#include <ATen/ops/cross_entropy_loss_ops.h>
|
539 |
+
#include <ATen/ops/crow_indices_ops.h>
|
540 |
+
#include <ATen/ops/crow_indices_copy_ops.h>
|
541 |
+
#include <ATen/ops/ctc_loss_ops.h>
|
542 |
+
#include <ATen/ops/cudnn_affine_grid_generator_ops.h>
|
543 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h>
|
544 |
+
#include <ATen/ops/cudnn_batch_norm_ops.h>
|
545 |
+
#include <ATen/ops/cudnn_batch_norm_backward_ops.h>
|
546 |
+
#include <ATen/ops/cudnn_convolution_ops.h>
|
547 |
+
#include <ATen/ops/cudnn_convolution_add_relu_ops.h>
|
548 |
+
#include <ATen/ops/cudnn_convolution_relu_ops.h>
|
549 |
+
#include <ATen/ops/cudnn_convolution_transpose_ops.h>
|
550 |
+
#include <ATen/ops/cudnn_grid_sampler_ops.h>
|
551 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
|
552 |
+
#include <ATen/ops/cudnn_is_acceptable_ops.h>
|
553 |
+
#include <ATen/ops/cummax_ops.h>
|
554 |
+
#include <ATen/ops/cummaxmin_backward_ops.h>
|
555 |
+
#include <ATen/ops/cummin_ops.h>
|
556 |
+
#include <ATen/ops/cumprod_ops.h>
|
557 |
+
#include <ATen/ops/cumprod_backward_ops.h>
|
558 |
+
#include <ATen/ops/cumsum_ops.h>
|
559 |
+
#include <ATen/ops/cumulative_trapezoid_ops.h>
|
560 |
+
#include <ATen/ops/data_ops.h>
|
561 |
+
#include <ATen/ops/deg2rad_ops.h>
|
562 |
+
#include <ATen/ops/dense_dim_ops.h>
|
563 |
+
#include <ATen/ops/dequantize_ops.h>
|
564 |
+
#include <ATen/ops/det_ops.h>
|
565 |
+
#include <ATen/ops/detach_ops.h>
|
566 |
+
#include <ATen/ops/detach_copy_ops.h>
|
567 |
+
#include <ATen/ops/diag_ops.h>
|
568 |
+
#include <ATen/ops/diag_embed_ops.h>
|
569 |
+
#include <ATen/ops/diagflat_ops.h>
|
570 |
+
#include <ATen/ops/diagonal_ops.h>
|
571 |
+
#include <ATen/ops/diagonal_backward_ops.h>
|
572 |
+
#include <ATen/ops/diagonal_copy_ops.h>
|
573 |
+
#include <ATen/ops/diagonal_scatter_ops.h>
|
574 |
+
#include <ATen/ops/diff_ops.h>
|
575 |
+
#include <ATen/ops/digamma_ops.h>
|
576 |
+
#include <ATen/ops/dist_ops.h>
|
577 |
+
#include <ATen/ops/div_ops.h>
|
578 |
+
#include <ATen/ops/divide_ops.h>
|
579 |
+
#include <ATen/ops/dot_ops.h>
|
580 |
+
#include <ATen/ops/dropout_ops.h>
|
581 |
+
#include <ATen/ops/dsplit_ops.h>
|
582 |
+
#include <ATen/ops/dstack_ops.h>
|
583 |
+
#include <ATen/ops/einsum_ops.h>
|
584 |
+
#include <ATen/ops/elu_ops.h>
|
585 |
+
#include <ATen/ops/elu_backward_ops.h>
|
586 |
+
#include <ATen/ops/embedding_ops.h>
|
587 |
+
#include <ATen/ops/embedding_backward_ops.h>
|
588 |
+
#include <ATen/ops/embedding_bag_ops.h>
|
589 |
+
#include <ATen/ops/embedding_dense_backward_ops.h>
|
590 |
+
#include <ATen/ops/embedding_renorm_ops.h>
|
591 |
+
#include <ATen/ops/embedding_sparse_backward_ops.h>
|
592 |
+
#include <ATen/ops/empty_ops.h>
|
593 |
+
#include <ATen/ops/empty_like_ops.h>
|
594 |
+
#include <ATen/ops/empty_permuted_ops.h>
|
595 |
+
#include <ATen/ops/empty_quantized_ops.h>
|
596 |
+
#include <ATen/ops/empty_strided_ops.h>
|
597 |
+
#include <ATen/ops/eq_ops.h>
|
598 |
+
#include <ATen/ops/equal_ops.h>
|
599 |
+
#include <ATen/ops/erf_ops.h>
|
600 |
+
#include <ATen/ops/erfc_ops.h>
|
601 |
+
#include <ATen/ops/erfinv_ops.h>
|
602 |
+
#include <ATen/ops/exp_ops.h>
|
603 |
+
#include <ATen/ops/exp2_ops.h>
|
604 |
+
#include <ATen/ops/expand_ops.h>
|
605 |
+
#include <ATen/ops/expand_as_ops.h>
|
606 |
+
#include <ATen/ops/expand_copy_ops.h>
|
607 |
+
#include <ATen/ops/expm1_ops.h>
|
608 |
+
#include <ATen/ops/exponential_ops.h>
|
609 |
+
#include <ATen/ops/eye_ops.h>
|
610 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_ops.h>
|
611 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h>
|
612 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h>
|
613 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_ops.h>
|
614 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
|
615 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_ops.h>
|
616 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_ops.h>
|
617 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h>
|
618 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_ops.h>
|
619 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h>
|
620 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_ops.h>
|
621 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h>
|
622 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h>
|
623 |
+
#include <ATen/ops/feature_alpha_dropout_ops.h>
|
624 |
+
#include <ATen/ops/feature_dropout_ops.h>
|
625 |
+
#include <ATen/ops/fft_fft_ops.h>
|
626 |
+
#include <ATen/ops/fft_fft2_ops.h>
|
627 |
+
#include <ATen/ops/fft_fftfreq_ops.h>
|
628 |
+
#include <ATen/ops/fft_fftn_ops.h>
|
629 |
+
#include <ATen/ops/fft_fftshift_ops.h>
|
630 |
+
#include <ATen/ops/fft_hfft_ops.h>
|
631 |
+
#include <ATen/ops/fft_hfft2_ops.h>
|
632 |
+
#include <ATen/ops/fft_hfftn_ops.h>
|
633 |
+
#include <ATen/ops/fft_ifft_ops.h>
|
634 |
+
#include <ATen/ops/fft_ifft2_ops.h>
|
635 |
+
#include <ATen/ops/fft_ifftn_ops.h>
|
636 |
+
#include <ATen/ops/fft_ifftshift_ops.h>
|
637 |
+
#include <ATen/ops/fft_ihfft_ops.h>
|
638 |
+
#include <ATen/ops/fft_ihfft2_ops.h>
|
639 |
+
#include <ATen/ops/fft_ihfftn_ops.h>
|
640 |
+
#include <ATen/ops/fft_irfft_ops.h>
|
641 |
+
#include <ATen/ops/fft_irfft2_ops.h>
|
642 |
+
#include <ATen/ops/fft_irfftn_ops.h>
|
643 |
+
#include <ATen/ops/fft_rfft_ops.h>
|
644 |
+
#include <ATen/ops/fft_rfft2_ops.h>
|
645 |
+
#include <ATen/ops/fft_rfftfreq_ops.h>
|
646 |
+
#include <ATen/ops/fft_rfftn_ops.h>
|
647 |
+
#include <ATen/ops/fill_ops.h>
|
648 |
+
#include <ATen/ops/fill_diagonal_ops.h>
|
649 |
+
#include <ATen/ops/fix_ops.h>
|
650 |
+
#include <ATen/ops/flatten_ops.h>
|
651 |
+
#include <ATen/ops/flatten_dense_tensors_ops.h>
|
652 |
+
#include <ATen/ops/flip_ops.h>
|
653 |
+
#include <ATen/ops/fliplr_ops.h>
|
654 |
+
#include <ATen/ops/flipud_ops.h>
|
655 |
+
#include <ATen/ops/float_power_ops.h>
|
656 |
+
#include <ATen/ops/floor_ops.h>
|
657 |
+
#include <ATen/ops/floor_divide_ops.h>
|
658 |
+
#include <ATen/ops/fmax_ops.h>
|
659 |
+
#include <ATen/ops/fmin_ops.h>
|
660 |
+
#include <ATen/ops/fmod_ops.h>
|
661 |
+
#include <ATen/ops/frac_ops.h>
|
662 |
+
#include <ATen/ops/fractional_max_pool2d_ops.h>
|
663 |
+
#include <ATen/ops/fractional_max_pool2d_backward_ops.h>
|
664 |
+
#include <ATen/ops/fractional_max_pool3d_ops.h>
|
665 |
+
#include <ATen/ops/fractional_max_pool3d_backward_ops.h>
|
666 |
+
#include <ATen/ops/frexp_ops.h>
|
667 |
+
#include <ATen/ops/frobenius_norm_ops.h>
|
668 |
+
#include <ATen/ops/from_file_ops.h>
|
669 |
+
#include <ATen/ops/full_ops.h>
|
670 |
+
#include <ATen/ops/full_like_ops.h>
|
671 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_ops.h>
|
672 |
+
#include <ATen/ops/gather_ops.h>
|
673 |
+
#include <ATen/ops/gather_backward_ops.h>
|
674 |
+
#include <ATen/ops/gcd_ops.h>
|
675 |
+
#include <ATen/ops/ge_ops.h>
|
676 |
+
#include <ATen/ops/gelu_ops.h>
|
677 |
+
#include <ATen/ops/gelu_backward_ops.h>
|
678 |
+
#include <ATen/ops/geometric_ops.h>
|
679 |
+
#include <ATen/ops/geqrf_ops.h>
|
680 |
+
#include <ATen/ops/ger_ops.h>
|
681 |
+
#include <ATen/ops/glu_ops.h>
|
682 |
+
#include <ATen/ops/glu_backward_ops.h>
|
683 |
+
#include <ATen/ops/glu_backward_jvp_ops.h>
|
684 |
+
#include <ATen/ops/glu_jvp_ops.h>
|
685 |
+
#include <ATen/ops/gradient_ops.h>
|
686 |
+
#include <ATen/ops/greater_ops.h>
|
687 |
+
#include <ATen/ops/greater_equal_ops.h>
|
688 |
+
#include <ATen/ops/grid_sampler_ops.h>
|
689 |
+
#include <ATen/ops/grid_sampler_2d_ops.h>
|
690 |
+
#include <ATen/ops/grid_sampler_2d_backward_ops.h>
|
691 |
+
#include <ATen/ops/grid_sampler_3d_ops.h>
|
692 |
+
#include <ATen/ops/grid_sampler_3d_backward_ops.h>
|
693 |
+
#include <ATen/ops/group_norm_ops.h>
|
694 |
+
#include <ATen/ops/gru_ops.h>
|
695 |
+
#include <ATen/ops/gru_cell_ops.h>
|
696 |
+
#include <ATen/ops/gt_ops.h>
|
697 |
+
#include <ATen/ops/hamming_window_ops.h>
|
698 |
+
#include <ATen/ops/hann_window_ops.h>
|
699 |
+
#include <ATen/ops/hardshrink_ops.h>
|
700 |
+
#include <ATen/ops/hardshrink_backward_ops.h>
|
701 |
+
#include <ATen/ops/hardsigmoid_ops.h>
|
702 |
+
#include <ATen/ops/hardsigmoid_backward_ops.h>
|
703 |
+
#include <ATen/ops/hardswish_ops.h>
|
704 |
+
#include <ATen/ops/hardswish_backward_ops.h>
|
705 |
+
#include <ATen/ops/hardtanh_ops.h>
|
706 |
+
#include <ATen/ops/hardtanh_backward_ops.h>
|
707 |
+
#include <ATen/ops/heaviside_ops.h>
|
708 |
+
#include <ATen/ops/hinge_embedding_loss_ops.h>
|
709 |
+
#include <ATen/ops/histc_ops.h>
|
710 |
+
#include <ATen/ops/histogram_ops.h>
|
711 |
+
#include <ATen/ops/histogramdd_ops.h>
|
712 |
+
#include <ATen/ops/hsplit_ops.h>
|
713 |
+
#include <ATen/ops/hspmm_ops.h>
|
714 |
+
#include <ATen/ops/hstack_ops.h>
|
715 |
+
#include <ATen/ops/huber_loss_ops.h>
|
716 |
+
#include <ATen/ops/huber_loss_backward_ops.h>
|
717 |
+
#include <ATen/ops/hypot_ops.h>
|
718 |
+
#include <ATen/ops/i0_ops.h>
|
719 |
+
#include <ATen/ops/igamma_ops.h>
|
720 |
+
#include <ATen/ops/igammac_ops.h>
|
721 |
+
#include <ATen/ops/im2col_ops.h>
|
722 |
+
#include <ATen/ops/imag_ops.h>
|
723 |
+
#include <ATen/ops/index_ops.h>
|
724 |
+
#include <ATen/ops/index_add_ops.h>
|
725 |
+
#include <ATen/ops/index_copy_ops.h>
|
726 |
+
#include <ATen/ops/index_fill_ops.h>
|
727 |
+
#include <ATen/ops/index_put_ops.h>
|
728 |
+
#include <ATen/ops/index_reduce_ops.h>
|
729 |
+
#include <ATen/ops/index_select_ops.h>
|
730 |
+
#include <ATen/ops/index_select_backward_ops.h>
|
731 |
+
#include <ATen/ops/indices_ops.h>
|
732 |
+
#include <ATen/ops/indices_copy_ops.h>
|
733 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_ops.h>
|
734 |
+
#include <ATen/ops/inner_ops.h>
|
735 |
+
#include <ATen/ops/instance_norm_ops.h>
|
736 |
+
#include <ATen/ops/int_repr_ops.h>
|
737 |
+
#include <ATen/ops/inverse_ops.h>
|
738 |
+
#include <ATen/ops/is_coalesced_ops.h>
|
739 |
+
#include <ATen/ops/is_complex_ops.h>
|
740 |
+
#include <ATen/ops/is_conj_ops.h>
|
741 |
+
#include <ATen/ops/is_distributed_ops.h>
|
742 |
+
#include <ATen/ops/is_floating_point_ops.h>
|
743 |
+
#include <ATen/ops/is_inference_ops.h>
|
744 |
+
#include <ATen/ops/is_leaf_ops.h>
|
745 |
+
#include <ATen/ops/is_neg_ops.h>
|
746 |
+
#include <ATen/ops/is_nonzero_ops.h>
|
747 |
+
#include <ATen/ops/is_pinned_ops.h>
|
748 |
+
#include <ATen/ops/is_same_size_ops.h>
|
749 |
+
#include <ATen/ops/is_set_to_ops.h>
|
750 |
+
#include <ATen/ops/is_signed_ops.h>
|
751 |
+
#include <ATen/ops/is_vulkan_available_ops.h>
|
752 |
+
#include <ATen/ops/isclose_ops.h>
|
753 |
+
#include <ATen/ops/isfinite_ops.h>
|
754 |
+
#include <ATen/ops/isin_ops.h>
|
755 |
+
#include <ATen/ops/isinf_ops.h>
|
756 |
+
#include <ATen/ops/isnan_ops.h>
|
757 |
+
#include <ATen/ops/isneginf_ops.h>
|
758 |
+
#include <ATen/ops/isposinf_ops.h>
|
759 |
+
#include <ATen/ops/isreal_ops.h>
|
760 |
+
#include <ATen/ops/istft_ops.h>
|
761 |
+
#include <ATen/ops/item_ops.h>
|
762 |
+
#include <ATen/ops/kaiser_window_ops.h>
|
763 |
+
#include <ATen/ops/kl_div_ops.h>
|
764 |
+
#include <ATen/ops/kron_ops.h>
|
765 |
+
#include <ATen/ops/kthvalue_ops.h>
|
766 |
+
#include <ATen/ops/l1_loss_ops.h>
|
767 |
+
#include <ATen/ops/layer_norm_ops.h>
|
768 |
+
#include <ATen/ops/lcm_ops.h>
|
769 |
+
#include <ATen/ops/ldexp_ops.h>
|
770 |
+
#include <ATen/ops/le_ops.h>
|
771 |
+
#include <ATen/ops/leaky_relu_ops.h>
|
772 |
+
#include <ATen/ops/leaky_relu_backward_ops.h>
|
773 |
+
#include <ATen/ops/lerp_ops.h>
|
774 |
+
#include <ATen/ops/less_ops.h>
|
775 |
+
#include <ATen/ops/less_equal_ops.h>
|
776 |
+
#include <ATen/ops/lgamma_ops.h>
|
777 |
+
#include <ATen/ops/lift_ops.h>
|
778 |
+
#include <ATen/ops/lift_fresh_ops.h>
|
779 |
+
#include <ATen/ops/lift_fresh_copy_ops.h>
|
780 |
+
#include <ATen/ops/linalg_cholesky_ops.h>
|
781 |
+
#include <ATen/ops/linalg_cholesky_ex_ops.h>
|
782 |
+
#include <ATen/ops/linalg_cond_ops.h>
|
783 |
+
#include <ATen/ops/linalg_cross_ops.h>
|
784 |
+
#include <ATen/ops/linalg_det_ops.h>
|
785 |
+
#include <ATen/ops/linalg_diagonal_ops.h>
|
786 |
+
#include <ATen/ops/linalg_eig_ops.h>
|
787 |
+
#include <ATen/ops/linalg_eigh_ops.h>
|
788 |
+
#include <ATen/ops/linalg_eigvals_ops.h>
|
789 |
+
#include <ATen/ops/linalg_eigvalsh_ops.h>
|
790 |
+
#include <ATen/ops/linalg_householder_product_ops.h>
|
791 |
+
#include <ATen/ops/linalg_inv_ops.h>
|
792 |
+
#include <ATen/ops/linalg_inv_ex_ops.h>
|
793 |
+
#include <ATen/ops/linalg_ldl_factor_ops.h>
|
794 |
+
#include <ATen/ops/linalg_ldl_factor_ex_ops.h>
|
795 |
+
#include <ATen/ops/linalg_ldl_solve_ops.h>
|
796 |
+
#include <ATen/ops/linalg_lstsq_ops.h>
|
797 |
+
#include <ATen/ops/linalg_lu_ops.h>
|
798 |
+
#include <ATen/ops/linalg_lu_factor_ops.h>
|
799 |
+
#include <ATen/ops/linalg_lu_factor_ex_ops.h>
|
800 |
+
#include <ATen/ops/linalg_lu_solve_ops.h>
|
801 |
+
#include <ATen/ops/linalg_matmul_ops.h>
|
802 |
+
#include <ATen/ops/linalg_matrix_exp_ops.h>
|
803 |
+
#include <ATen/ops/linalg_matrix_norm_ops.h>
|
804 |
+
#include <ATen/ops/linalg_matrix_power_ops.h>
|
805 |
+
#include <ATen/ops/linalg_matrix_rank_ops.h>
|
806 |
+
#include <ATen/ops/linalg_multi_dot_ops.h>
|
807 |
+
#include <ATen/ops/linalg_norm_ops.h>
|
808 |
+
#include <ATen/ops/linalg_pinv_ops.h>
|
809 |
+
#include <ATen/ops/linalg_qr_ops.h>
|
810 |
+
#include <ATen/ops/linalg_slogdet_ops.h>
|
811 |
+
#include <ATen/ops/linalg_solve_ops.h>
|
812 |
+
#include <ATen/ops/linalg_solve_ex_ops.h>
|
813 |
+
#include <ATen/ops/linalg_solve_triangular_ops.h>
|
814 |
+
#include <ATen/ops/linalg_svd_ops.h>
|
815 |
+
#include <ATen/ops/linalg_svdvals_ops.h>
|
816 |
+
#include <ATen/ops/linalg_tensorinv_ops.h>
|
817 |
+
#include <ATen/ops/linalg_tensorsolve_ops.h>
|
818 |
+
#include <ATen/ops/linalg_vander_ops.h>
|
819 |
+
#include <ATen/ops/linalg_vecdot_ops.h>
|
820 |
+
#include <ATen/ops/linalg_vector_norm_ops.h>
|
821 |
+
#include <ATen/ops/linear_ops.h>
|
822 |
+
#include <ATen/ops/linear_backward_ops.h>
|
823 |
+
#include <ATen/ops/linspace_ops.h>
|
824 |
+
#include <ATen/ops/log_ops.h>
|
825 |
+
#include <ATen/ops/log10_ops.h>
|
826 |
+
#include <ATen/ops/log1p_ops.h>
|
827 |
+
#include <ATen/ops/log2_ops.h>
|
828 |
+
#include <ATen/ops/log_normal_ops.h>
|
829 |
+
#include <ATen/ops/log_sigmoid_ops.h>
|
830 |
+
#include <ATen/ops/log_sigmoid_backward_ops.h>
|
831 |
+
#include <ATen/ops/log_sigmoid_forward_ops.h>
|
832 |
+
#include <ATen/ops/log_softmax_ops.h>
|
833 |
+
#include <ATen/ops/logaddexp_ops.h>
|
834 |
+
#include <ATen/ops/logaddexp2_ops.h>
|
835 |
+
#include <ATen/ops/logcumsumexp_ops.h>
|
836 |
+
#include <ATen/ops/logdet_ops.h>
|
837 |
+
#include <ATen/ops/logical_and_ops.h>
|
838 |
+
#include <ATen/ops/logical_not_ops.h>
|
839 |
+
#include <ATen/ops/logical_or_ops.h>
|
840 |
+
#include <ATen/ops/logical_xor_ops.h>
|
841 |
+
#include <ATen/ops/logit_ops.h>
|
842 |
+
#include <ATen/ops/logit_backward_ops.h>
|
843 |
+
#include <ATen/ops/logspace_ops.h>
|
844 |
+
#include <ATen/ops/logsumexp_ops.h>
|
845 |
+
#include <ATen/ops/lshift_ops.h>
|
846 |
+
#include <ATen/ops/lstm_ops.h>
|
847 |
+
#include <ATen/ops/lstm_cell_ops.h>
|
848 |
+
#include <ATen/ops/lstm_mps_backward_ops.h>
|
849 |
+
#include <ATen/ops/lt_ops.h>
|
850 |
+
#include <ATen/ops/lu_solve_ops.h>
|
851 |
+
#include <ATen/ops/lu_unpack_ops.h>
|
852 |
+
#include <ATen/ops/mH_ops.h>
|
853 |
+
#include <ATen/ops/mT_ops.h>
|
854 |
+
#include <ATen/ops/margin_ranking_loss_ops.h>
|
855 |
+
#include <ATen/ops/masked_fill_ops.h>
|
856 |
+
#include <ATen/ops/masked_scatter_ops.h>
|
857 |
+
#include <ATen/ops/masked_scatter_backward_ops.h>
|
858 |
+
#include <ATen/ops/masked_select_ops.h>
|
859 |
+
#include <ATen/ops/masked_select_backward_ops.h>
|
860 |
+
#include <ATen/ops/matmul_ops.h>
|
861 |
+
#include <ATen/ops/matmul_backward_ops.h>
|
862 |
+
#include <ATen/ops/matrix_H_ops.h>
|
863 |
+
#include <ATen/ops/matrix_exp_ops.h>
|
864 |
+
#include <ATen/ops/matrix_exp_backward_ops.h>
|
865 |
+
#include <ATen/ops/matrix_power_ops.h>
|
866 |
+
#include <ATen/ops/max_ops.h>
|
867 |
+
#include <ATen/ops/max_pool1d_ops.h>
|
868 |
+
#include <ATen/ops/max_pool1d_with_indices_ops.h>
|
869 |
+
#include <ATen/ops/max_pool2d_ops.h>
|
870 |
+
#include <ATen/ops/max_pool2d_backward_ops.h>
|
871 |
+
#include <ATen/ops/max_pool2d_with_indices_ops.h>
|
872 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_ops.h>
|
873 |
+
#include <ATen/ops/max_pool3d_ops.h>
|
874 |
+
#include <ATen/ops/max_pool3d_with_indices_ops.h>
|
875 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
|
876 |
+
#include <ATen/ops/max_unpool2d_ops.h>
|
877 |
+
#include <ATen/ops/max_unpool3d_ops.h>
|
878 |
+
#include <ATen/ops/maximum_ops.h>
|
879 |
+
#include <ATen/ops/mean_ops.h>
|
880 |
+
#include <ATen/ops/median_ops.h>
|
881 |
+
#include <ATen/ops/meshgrid_ops.h>
|
882 |
+
#include <ATen/ops/min_ops.h>
|
883 |
+
#include <ATen/ops/minimum_ops.h>
|
884 |
+
#include <ATen/ops/miopen_batch_norm_ops.h>
|
885 |
+
#include <ATen/ops/miopen_batch_norm_backward_ops.h>
|
886 |
+
#include <ATen/ops/miopen_convolution_ops.h>
|
887 |
+
#include <ATen/ops/miopen_convolution_add_relu_ops.h>
|
888 |
+
#include <ATen/ops/miopen_convolution_relu_ops.h>
|
889 |
+
#include <ATen/ops/miopen_convolution_transpose_ops.h>
|
890 |
+
#include <ATen/ops/miopen_depthwise_convolution_ops.h>
|
891 |
+
#include <ATen/ops/miopen_rnn_ops.h>
|
892 |
+
#include <ATen/ops/miopen_rnn_backward_ops.h>
|
893 |
+
#include <ATen/ops/mish_ops.h>
|
894 |
+
#include <ATen/ops/mish_backward_ops.h>
|
895 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h>
|
896 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h>
|
897 |
+
#include <ATen/ops/mkldnn_convolution_ops.h>
|
898 |
+
#include <ATen/ops/mkldnn_linear_ops.h>
|
899 |
+
#include <ATen/ops/mkldnn_linear_backward_ops.h>
|
900 |
+
#include <ATen/ops/mkldnn_linear_backward_input_ops.h>
|
901 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_ops.h>
|
902 |
+
#include <ATen/ops/mkldnn_max_pool2d_ops.h>
|
903 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_ops.h>
|
904 |
+
#include <ATen/ops/mkldnn_max_pool3d_ops.h>
|
905 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
|
906 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h>
|
907 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
|
908 |
+
#include <ATen/ops/mkldnn_rnn_layer_ops.h>
|
909 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_ops.h>
|
910 |
+
#include <ATen/ops/mm_ops.h>
|
911 |
+
#include <ATen/ops/mode_ops.h>
|
912 |
+
#include <ATen/ops/moveaxis_ops.h>
|
913 |
+
#include <ATen/ops/movedim_ops.h>
|
914 |
+
#include <ATen/ops/mps_convolution_backward_ops.h>
|
915 |
+
#include <ATen/ops/mps_convolution_transpose_backward_ops.h>
|
916 |
+
#include <ATen/ops/mse_loss_ops.h>
|
917 |
+
#include <ATen/ops/mse_loss_backward_ops.h>
|
918 |
+
#include <ATen/ops/msort_ops.h>
|
919 |
+
#include <ATen/ops/mul_ops.h>
|
920 |
+
#include <ATen/ops/multi_margin_loss_ops.h>
|
921 |
+
#include <ATen/ops/multi_margin_loss_backward_ops.h>
|
922 |
+
#include <ATen/ops/multilabel_margin_loss_ops.h>
|
923 |
+
#include <ATen/ops/multilabel_margin_loss_backward_ops.h>
|
924 |
+
#include <ATen/ops/multilabel_margin_loss_forward_ops.h>
|
925 |
+
#include <ATen/ops/multinomial_ops.h>
|
926 |
+
#include <ATen/ops/multiply_ops.h>
|
927 |
+
#include <ATen/ops/mv_ops.h>
|
928 |
+
#include <ATen/ops/mvlgamma_ops.h>
|
929 |
+
#include <ATen/ops/nan_to_num_ops.h>
|
930 |
+
#include <ATen/ops/nanmean_ops.h>
|
931 |
+
#include <ATen/ops/nanmedian_ops.h>
|
932 |
+
#include <ATen/ops/nanquantile_ops.h>
|
933 |
+
#include <ATen/ops/nansum_ops.h>
|
934 |
+
#include <ATen/ops/narrow_ops.h>
|
935 |
+
#include <ATen/ops/narrow_copy_ops.h>
|
936 |
+
#include <ATen/ops/native_batch_norm_ops.h>
|
937 |
+
#include <ATen/ops/native_batch_norm_backward_ops.h>
|
938 |
+
#include <ATen/ops/native_channel_shuffle_ops.h>
|
939 |
+
#include <ATen/ops/native_dropout_ops.h>
|
940 |
+
#include <ATen/ops/native_dropout_backward_ops.h>
|
941 |
+
#include <ATen/ops/native_group_norm_ops.h>
|
942 |
+
#include <ATen/ops/native_group_norm_backward_ops.h>
|
943 |
+
#include <ATen/ops/native_layer_norm_ops.h>
|
944 |
+
#include <ATen/ops/native_layer_norm_backward_ops.h>
|
945 |
+
#include <ATen/ops/native_norm_ops.h>
|
946 |
+
#include <ATen/ops/ne_ops.h>
|
947 |
+
#include <ATen/ops/neg_ops.h>
|
948 |
+
#include <ATen/ops/negative_ops.h>
|
949 |
+
#include <ATen/ops/nested_to_padded_tensor_ops.h>
|
950 |
+
#include <ATen/ops/new_empty_ops.h>
|
951 |
+
#include <ATen/ops/new_empty_strided_ops.h>
|
952 |
+
#include <ATen/ops/new_full_ops.h>
|
953 |
+
#include <ATen/ops/new_ones_ops.h>
|
954 |
+
#include <ATen/ops/new_zeros_ops.h>
|
955 |
+
#include <ATen/ops/nextafter_ops.h>
|
956 |
+
#include <ATen/ops/nll_loss_ops.h>
|
957 |
+
#include <ATen/ops/nll_loss2d_ops.h>
|
958 |
+
#include <ATen/ops/nll_loss2d_backward_ops.h>
|
959 |
+
#include <ATen/ops/nll_loss2d_forward_ops.h>
|
960 |
+
#include <ATen/ops/nll_loss_backward_ops.h>
|
961 |
+
#include <ATen/ops/nll_loss_forward_ops.h>
|
962 |
+
#include <ATen/ops/nll_loss_nd_ops.h>
|
963 |
+
#include <ATen/ops/nonzero_ops.h>
|
964 |
+
#include <ATen/ops/nonzero_numpy_ops.h>
|
965 |
+
#include <ATen/ops/nonzero_static_ops.h>
|
966 |
+
#include <ATen/ops/norm_ops.h>
|
967 |
+
#include <ATen/ops/norm_except_dim_ops.h>
|
968 |
+
#include <ATen/ops/normal_ops.h>
|
969 |
+
#include <ATen/ops/not_equal_ops.h>
|
970 |
+
#include <ATen/ops/nuclear_norm_ops.h>
|
971 |
+
#include <ATen/ops/numpy_T_ops.h>
|
972 |
+
#include <ATen/ops/one_hot_ops.h>
|
973 |
+
#include <ATen/ops/ones_ops.h>
|
974 |
+
#include <ATen/ops/ones_like_ops.h>
|
975 |
+
#include <ATen/ops/or_ops.h>
|
976 |
+
#include <ATen/ops/orgqr_ops.h>
|
977 |
+
#include <ATen/ops/ormqr_ops.h>
|
978 |
+
#include <ATen/ops/outer_ops.h>
|
979 |
+
#include <ATen/ops/output_nr_ops.h>
|
980 |
+
#include <ATen/ops/pad_ops.h>
|
981 |
+
#include <ATen/ops/pad_sequence_ops.h>
|
982 |
+
#include <ATen/ops/pairwise_distance_ops.h>
|
983 |
+
#include <ATen/ops/pdist_ops.h>
|
984 |
+
#include <ATen/ops/permute_ops.h>
|
985 |
+
#include <ATen/ops/permute_copy_ops.h>
|
986 |
+
#include <ATen/ops/pin_memory_ops.h>
|
987 |
+
#include <ATen/ops/pinverse_ops.h>
|
988 |
+
#include <ATen/ops/pixel_shuffle_ops.h>
|
989 |
+
#include <ATen/ops/pixel_unshuffle_ops.h>
|
990 |
+
#include <ATen/ops/poisson_ops.h>
|
991 |
+
#include <ATen/ops/poisson_nll_loss_ops.h>
|
992 |
+
#include <ATen/ops/polar_ops.h>
|
993 |
+
#include <ATen/ops/polygamma_ops.h>
|
994 |
+
#include <ATen/ops/positive_ops.h>
|
995 |
+
#include <ATen/ops/pow_ops.h>
|
996 |
+
#include <ATen/ops/prelu_ops.h>
|
997 |
+
#include <ATen/ops/prod_ops.h>
|
998 |
+
#include <ATen/ops/promote_types_ops.h>
|
999 |
+
#include <ATen/ops/put_ops.h>
|
1000 |
+
#include <ATen/ops/q_per_channel_axis_ops.h>
|
1001 |
+
#include <ATen/ops/q_per_channel_scales_ops.h>
|
1002 |
+
#include <ATen/ops/q_per_channel_zero_points_ops.h>
|
1003 |
+
#include <ATen/ops/q_scale_ops.h>
|
1004 |
+
#include <ATen/ops/q_zero_point_ops.h>
|
1005 |
+
#include <ATen/ops/qr_ops.h>
|
1006 |
+
#include <ATen/ops/qscheme_ops.h>
|
1007 |
+
#include <ATen/ops/quantile_ops.h>
|
1008 |
+
#include <ATen/ops/quantize_per_channel_ops.h>
|
1009 |
+
#include <ATen/ops/quantize_per_tensor_ops.h>
|
1010 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_ops.h>
|
1011 |
+
#include <ATen/ops/quantized_batch_norm_ops.h>
|
1012 |
+
#include <ATen/ops/quantized_gru_cell_ops.h>
|
1013 |
+
#include <ATen/ops/quantized_lstm_cell_ops.h>
|
1014 |
+
#include <ATen/ops/quantized_max_pool1d_ops.h>
|
1015 |
+
#include <ATen/ops/quantized_max_pool2d_ops.h>
|
1016 |
+
#include <ATen/ops/quantized_max_pool3d_ops.h>
|
1017 |
+
#include <ATen/ops/quantized_rnn_relu_cell_ops.h>
|
1018 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_ops.h>
|
1019 |
+
#include <ATen/ops/rad2deg_ops.h>
|
1020 |
+
#include <ATen/ops/rand_ops.h>
|
1021 |
+
#include <ATen/ops/rand_like_ops.h>
|
1022 |
+
#include <ATen/ops/randint_ops.h>
|
1023 |
+
#include <ATen/ops/randint_like_ops.h>
|
1024 |
+
#include <ATen/ops/randn_ops.h>
|
1025 |
+
#include <ATen/ops/randn_like_ops.h>
|
1026 |
+
#include <ATen/ops/random_ops.h>
|
1027 |
+
#include <ATen/ops/randperm_ops.h>
|
1028 |
+
#include <ATen/ops/range_ops.h>
|
1029 |
+
#include <ATen/ops/ravel_ops.h>
|
1030 |
+
#include <ATen/ops/real_ops.h>
|
1031 |
+
#include <ATen/ops/reciprocal_ops.h>
|
1032 |
+
#include <ATen/ops/record_stream_ops.h>
|
1033 |
+
#include <ATen/ops/refine_names_ops.h>
|
1034 |
+
#include <ATen/ops/reflection_pad1d_ops.h>
|
1035 |
+
#include <ATen/ops/reflection_pad1d_backward_ops.h>
|
1036 |
+
#include <ATen/ops/reflection_pad2d_ops.h>
|
1037 |
+
#include <ATen/ops/reflection_pad2d_backward_ops.h>
|
1038 |
+
#include <ATen/ops/reflection_pad3d_ops.h>
|
1039 |
+
#include <ATen/ops/reflection_pad3d_backward_ops.h>
|
1040 |
+
#include <ATen/ops/relu_ops.h>
|
1041 |
+
#include <ATen/ops/relu6_ops.h>
|
1042 |
+
#include <ATen/ops/remainder_ops.h>
|
1043 |
+
#include <ATen/ops/rename_ops.h>
|
1044 |
+
#include <ATen/ops/renorm_ops.h>
|
1045 |
+
#include <ATen/ops/repeat_ops.h>
|
1046 |
+
#include <ATen/ops/repeat_interleave_ops.h>
|
1047 |
+
#include <ATen/ops/replication_pad1d_ops.h>
|
1048 |
+
#include <ATen/ops/replication_pad1d_backward_ops.h>
|
1049 |
+
#include <ATen/ops/replication_pad2d_ops.h>
|
1050 |
+
#include <ATen/ops/replication_pad2d_backward_ops.h>
|
1051 |
+
#include <ATen/ops/replication_pad3d_ops.h>
|
1052 |
+
#include <ATen/ops/replication_pad3d_backward_ops.h>
|
1053 |
+
#include <ATen/ops/requires_grad_ops.h>
|
1054 |
+
#include <ATen/ops/reshape_ops.h>
|
1055 |
+
#include <ATen/ops/reshape_as_ops.h>
|
1056 |
+
#include <ATen/ops/resize_ops.h>
|
1057 |
+
#include <ATen/ops/resize_as_ops.h>
|
1058 |
+
#include <ATen/ops/resize_as_sparse_ops.h>
|
1059 |
+
#include <ATen/ops/resolve_conj_ops.h>
|
1060 |
+
#include <ATen/ops/resolve_neg_ops.h>
|
1061 |
+
#include <ATen/ops/result_type_ops.h>
|
1062 |
+
#include <ATen/ops/retain_grad_ops.h>
|
1063 |
+
#include <ATen/ops/retains_grad_ops.h>
|
1064 |
+
#include <ATen/ops/rnn_relu_ops.h>
|
1065 |
+
#include <ATen/ops/rnn_relu_cell_ops.h>
|
1066 |
+
#include <ATen/ops/rnn_tanh_ops.h>
|
1067 |
+
#include <ATen/ops/rnn_tanh_cell_ops.h>
|
1068 |
+
#include <ATen/ops/roll_ops.h>
|
1069 |
+
#include <ATen/ops/rot90_ops.h>
|
1070 |
+
#include <ATen/ops/round_ops.h>
|
1071 |
+
#include <ATen/ops/row_indices_ops.h>
|
1072 |
+
#include <ATen/ops/row_indices_copy_ops.h>
|
1073 |
+
#include <ATen/ops/row_stack_ops.h>
|
1074 |
+
#include <ATen/ops/rrelu_ops.h>
|
1075 |
+
#include <ATen/ops/rrelu_with_noise_ops.h>
|
1076 |
+
#include <ATen/ops/rrelu_with_noise_backward_ops.h>
|
1077 |
+
#include <ATen/ops/rshift_ops.h>
|
1078 |
+
#include <ATen/ops/rsqrt_ops.h>
|
1079 |
+
#include <ATen/ops/rsub_ops.h>
|
1080 |
+
#include <ATen/ops/scalar_tensor_ops.h>
|
1081 |
+
#include <ATen/ops/scaled_dot_product_attention_ops.h>
|
1082 |
+
#include <ATen/ops/scatter_ops.h>
|
1083 |
+
#include <ATen/ops/scatter_add_ops.h>
|
1084 |
+
#include <ATen/ops/scatter_reduce_ops.h>
|
1085 |
+
#include <ATen/ops/searchsorted_ops.h>
|
1086 |
+
#include <ATen/ops/segment_reduce_ops.h>
|
1087 |
+
#include <ATen/ops/select_ops.h>
|
1088 |
+
#include <ATen/ops/select_backward_ops.h>
|
1089 |
+
#include <ATen/ops/select_copy_ops.h>
|
1090 |
+
#include <ATen/ops/select_scatter_ops.h>
|
1091 |
+
#include <ATen/ops/selu_ops.h>
|
1092 |
+
#include <ATen/ops/set_ops.h>
|
1093 |
+
#include <ATen/ops/set_data_ops.h>
|
1094 |
+
#include <ATen/ops/sgn_ops.h>
|
1095 |
+
#include <ATen/ops/sigmoid_ops.h>
|
1096 |
+
#include <ATen/ops/sigmoid_backward_ops.h>
|
1097 |
+
#include <ATen/ops/sign_ops.h>
|
1098 |
+
#include <ATen/ops/signbit_ops.h>
|
1099 |
+
#include <ATen/ops/silu_ops.h>
|
1100 |
+
#include <ATen/ops/silu_backward_ops.h>
|
1101 |
+
#include <ATen/ops/sin_ops.h>
|
1102 |
+
#include <ATen/ops/sinc_ops.h>
|
1103 |
+
#include <ATen/ops/sinh_ops.h>
|
1104 |
+
#include <ATen/ops/size_ops.h>
|
1105 |
+
#include <ATen/ops/slice_ops.h>
|
1106 |
+
#include <ATen/ops/slice_backward_ops.h>
|
1107 |
+
#include <ATen/ops/slice_copy_ops.h>
|
1108 |
+
#include <ATen/ops/slice_inverse_ops.h>
|
1109 |
+
#include <ATen/ops/slice_scatter_ops.h>
|
1110 |
+
#include <ATen/ops/slogdet_ops.h>
|
1111 |
+
#include <ATen/ops/slow_conv3d_ops.h>
|
1112 |
+
#include <ATen/ops/slow_conv3d_forward_ops.h>
|
1113 |
+
#include <ATen/ops/slow_conv_dilated2d_ops.h>
|
1114 |
+
#include <ATen/ops/slow_conv_dilated3d_ops.h>
|
1115 |
+
#include <ATen/ops/slow_conv_transpose2d_ops.h>
|
1116 |
+
#include <ATen/ops/slow_conv_transpose3d_ops.h>
|
1117 |
+
#include <ATen/ops/smm_ops.h>
|
1118 |
+
#include <ATen/ops/smooth_l1_loss_ops.h>
|
1119 |
+
#include <ATen/ops/smooth_l1_loss_backward_ops.h>
|
1120 |
+
#include <ATen/ops/soft_margin_loss_ops.h>
|
1121 |
+
#include <ATen/ops/soft_margin_loss_backward_ops.h>
|
1122 |
+
#include <ATen/ops/softmax_ops.h>
|
1123 |
+
#include <ATen/ops/softplus_ops.h>
|
1124 |
+
#include <ATen/ops/softplus_backward_ops.h>
|
1125 |
+
#include <ATen/ops/softshrink_ops.h>
|
1126 |
+
#include <ATen/ops/softshrink_backward_ops.h>
|
1127 |
+
#include <ATen/ops/sort_ops.h>
|
1128 |
+
#include <ATen/ops/sparse_bsc_tensor_ops.h>
|
1129 |
+
#include <ATen/ops/sparse_bsr_tensor_ops.h>
|
1130 |
+
#include <ATen/ops/sparse_compressed_tensor_ops.h>
|
1131 |
+
#include <ATen/ops/sparse_coo_tensor_ops.h>
|
1132 |
+
#include <ATen/ops/sparse_csc_tensor_ops.h>
|
1133 |
+
#include <ATen/ops/sparse_csr_tensor_ops.h>
|
1134 |
+
#include <ATen/ops/sparse_dim_ops.h>
|
1135 |
+
#include <ATen/ops/sparse_mask_ops.h>
|
1136 |
+
#include <ATen/ops/sparse_resize_ops.h>
|
1137 |
+
#include <ATen/ops/sparse_resize_and_clear_ops.h>
|
1138 |
+
#include <ATen/ops/sparse_sampled_addmm_ops.h>
|
1139 |
+
#include <ATen/ops/special_airy_ai_ops.h>
|
1140 |
+
#include <ATen/ops/special_bessel_j0_ops.h>
|
1141 |
+
#include <ATen/ops/special_bessel_j1_ops.h>
|
1142 |
+
#include <ATen/ops/special_bessel_y0_ops.h>
|
1143 |
+
#include <ATen/ops/special_bessel_y1_ops.h>
|
1144 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
|
1145 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
|
1146 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
|
1147 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
|
1148 |
+
#include <ATen/ops/special_digamma_ops.h>
|
1149 |
+
#include <ATen/ops/special_entr_ops.h>
|
1150 |
+
#include <ATen/ops/special_erf_ops.h>
|
1151 |
+
#include <ATen/ops/special_erfc_ops.h>
|
1152 |
+
#include <ATen/ops/special_erfcx_ops.h>
|
1153 |
+
#include <ATen/ops/special_erfinv_ops.h>
|
1154 |
+
#include <ATen/ops/special_exp2_ops.h>
|
1155 |
+
#include <ATen/ops/special_expit_ops.h>
|
1156 |
+
#include <ATen/ops/special_expm1_ops.h>
|
1157 |
+
#include <ATen/ops/special_gammainc_ops.h>
|
1158 |
+
#include <ATen/ops/special_gammaincc_ops.h>
|
1159 |
+
#include <ATen/ops/special_gammaln_ops.h>
|
1160 |
+
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
|
1161 |
+
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
|
1162 |
+
#include <ATen/ops/special_i0_ops.h>
|
1163 |
+
#include <ATen/ops/special_i0e_ops.h>
|
1164 |
+
#include <ATen/ops/special_i1_ops.h>
|
1165 |
+
#include <ATen/ops/special_i1e_ops.h>
|
1166 |
+
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
|
1167 |
+
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
|
1168 |
+
#include <ATen/ops/special_log1p_ops.h>
|
1169 |
+
#include <ATen/ops/special_log_ndtr_ops.h>
|
1170 |
+
#include <ATen/ops/special_log_softmax_ops.h>
|
1171 |
+
#include <ATen/ops/special_logit_ops.h>
|
1172 |
+
#include <ATen/ops/special_logsumexp_ops.h>
|
1173 |
+
#include <ATen/ops/special_modified_bessel_i0_ops.h>
|
1174 |
+
#include <ATen/ops/special_modified_bessel_i1_ops.h>
|
1175 |
+
#include <ATen/ops/special_modified_bessel_k0_ops.h>
|
1176 |
+
#include <ATen/ops/special_modified_bessel_k1_ops.h>
|
1177 |
+
#include <ATen/ops/special_multigammaln_ops.h>
|
1178 |
+
#include <ATen/ops/special_ndtr_ops.h>
|
1179 |
+
#include <ATen/ops/special_ndtri_ops.h>
|
1180 |
+
#include <ATen/ops/special_polygamma_ops.h>
|
1181 |
+
#include <ATen/ops/special_psi_ops.h>
|
1182 |
+
#include <ATen/ops/special_round_ops.h>
|
1183 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_ops.h>
|
1184 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_ops.h>
|
1185 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
|
1186 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
|
1187 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
|
1188 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
|
1189 |
+
#include <ATen/ops/special_sinc_ops.h>
|
1190 |
+
#include <ATen/ops/special_softmax_ops.h>
|
1191 |
+
#include <ATen/ops/special_spherical_bessel_j0_ops.h>
|
1192 |
+
#include <ATen/ops/special_xlog1py_ops.h>
|
1193 |
+
#include <ATen/ops/special_xlogy_ops.h>
|
1194 |
+
#include <ATen/ops/special_zeta_ops.h>
|
1195 |
+
#include <ATen/ops/split_ops.h>
|
1196 |
+
#include <ATen/ops/split_copy_ops.h>
|
1197 |
+
#include <ATen/ops/split_with_sizes_ops.h>
|
1198 |
+
#include <ATen/ops/split_with_sizes_copy_ops.h>
|
1199 |
+
#include <ATen/ops/sqrt_ops.h>
|
1200 |
+
#include <ATen/ops/square_ops.h>
|
1201 |
+
#include <ATen/ops/squeeze_ops.h>
|
1202 |
+
#include <ATen/ops/squeeze_copy_ops.h>
|
1203 |
+
#include <ATen/ops/sspaddmm_ops.h>
|
1204 |
+
#include <ATen/ops/stack_ops.h>
|
1205 |
+
#include <ATen/ops/std_ops.h>
|
1206 |
+
#include <ATen/ops/std_mean_ops.h>
|
1207 |
+
#include <ATen/ops/stft_ops.h>
|
1208 |
+
#include <ATen/ops/stride_ops.h>
|
1209 |
+
#include <ATen/ops/sub_ops.h>
|
1210 |
+
#include <ATen/ops/subtract_ops.h>
|
1211 |
+
#include <ATen/ops/sum_ops.h>
|
1212 |
+
#include <ATen/ops/sum_to_size_ops.h>
|
1213 |
+
#include <ATen/ops/svd_ops.h>
|
1214 |
+
#include <ATen/ops/swapaxes_ops.h>
|
1215 |
+
#include <ATen/ops/swapdims_ops.h>
|
1216 |
+
#include <ATen/ops/sym_constrain_range_ops.h>
|
1217 |
+
#include <ATen/ops/sym_constrain_range_for_size_ops.h>
|
1218 |
+
#include <ATen/ops/sym_numel_ops.h>
|
1219 |
+
#include <ATen/ops/sym_size_ops.h>
|
1220 |
+
#include <ATen/ops/sym_storage_offset_ops.h>
|
1221 |
+
#include <ATen/ops/sym_stride_ops.h>
|
1222 |
+
#include <ATen/ops/t_ops.h>
|
1223 |
+
#include <ATen/ops/t_copy_ops.h>
|
1224 |
+
#include <ATen/ops/take_ops.h>
|
1225 |
+
#include <ATen/ops/take_along_dim_ops.h>
|
1226 |
+
#include <ATen/ops/tan_ops.h>
|
1227 |
+
#include <ATen/ops/tanh_ops.h>
|
1228 |
+
#include <ATen/ops/tanh_backward_ops.h>
|
1229 |
+
#include <ATen/ops/tensor_split_ops.h>
|
1230 |
+
#include <ATen/ops/tensordot_ops.h>
|
1231 |
+
#include <ATen/ops/thnn_conv2d_ops.h>
|
1232 |
+
#include <ATen/ops/threshold_ops.h>
|
1233 |
+
#include <ATen/ops/threshold_backward_ops.h>
|
1234 |
+
#include <ATen/ops/tile_ops.h>
|
1235 |
+
#include <ATen/ops/to_ops.h>
|
1236 |
+
#include <ATen/ops/to_dense_ops.h>
|
1237 |
+
#include <ATen/ops/to_dense_backward_ops.h>
|
1238 |
+
#include <ATen/ops/to_mkldnn_ops.h>
|
1239 |
+
#include <ATen/ops/to_mkldnn_backward_ops.h>
|
1240 |
+
#include <ATen/ops/to_padded_tensor_ops.h>
|
1241 |
+
#include <ATen/ops/to_sparse_ops.h>
|
1242 |
+
#include <ATen/ops/to_sparse_bsc_ops.h>
|
1243 |
+
#include <ATen/ops/to_sparse_bsr_ops.h>
|
1244 |
+
#include <ATen/ops/to_sparse_csc_ops.h>
|
1245 |
+
#include <ATen/ops/to_sparse_csr_ops.h>
|
1246 |
+
#include <ATen/ops/topk_ops.h>
|
1247 |
+
#include <ATen/ops/trace_ops.h>
|
1248 |
+
#include <ATen/ops/trace_backward_ops.h>
|
1249 |
+
#include <ATen/ops/transpose_ops.h>
|
1250 |
+
#include <ATen/ops/transpose_copy_ops.h>
|
1251 |
+
#include <ATen/ops/trapezoid_ops.h>
|
1252 |
+
#include <ATen/ops/trapz_ops.h>
|
1253 |
+
#include <ATen/ops/triangular_solve_ops.h>
|
1254 |
+
#include <ATen/ops/tril_ops.h>
|
1255 |
+
#include <ATen/ops/tril_indices_ops.h>
|
1256 |
+
#include <ATen/ops/triplet_margin_loss_ops.h>
|
1257 |
+
#include <ATen/ops/triu_ops.h>
|
1258 |
+
#include <ATen/ops/triu_indices_ops.h>
|
1259 |
+
#include <ATen/ops/true_divide_ops.h>
|
1260 |
+
#include <ATen/ops/trunc_ops.h>
|
1261 |
+
#include <ATen/ops/type_as_ops.h>
|
1262 |
+
#include <ATen/ops/unbind_ops.h>
|
1263 |
+
#include <ATen/ops/unbind_copy_ops.h>
|
1264 |
+
#include <ATen/ops/unflatten_ops.h>
|
1265 |
+
#include <ATen/ops/unflatten_dense_tensors_ops.h>
|
1266 |
+
#include <ATen/ops/unfold_ops.h>
|
1267 |
+
#include <ATen/ops/unfold_backward_ops.h>
|
1268 |
+
#include <ATen/ops/unfold_copy_ops.h>
|
1269 |
+
#include <ATen/ops/uniform_ops.h>
|
1270 |
+
#include <ATen/ops/unique_consecutive_ops.h>
|
1271 |
+
#include <ATen/ops/unique_dim_ops.h>
|
1272 |
+
#include <ATen/ops/unique_dim_consecutive_ops.h>
|
1273 |
+
#include <ATen/ops/unsafe_chunk_ops.h>
|
1274 |
+
#include <ATen/ops/unsafe_split_ops.h>
|
1275 |
+
#include <ATen/ops/unsafe_split_with_sizes_ops.h>
|
1276 |
+
#include <ATen/ops/unsqueeze_ops.h>
|
1277 |
+
#include <ATen/ops/unsqueeze_copy_ops.h>
|
1278 |
+
#include <ATen/ops/upsample_bicubic2d_ops.h>
|
1279 |
+
#include <ATen/ops/upsample_bicubic2d_backward_ops.h>
|
1280 |
+
#include <ATen/ops/upsample_bilinear2d_ops.h>
|
1281 |
+
#include <ATen/ops/upsample_bilinear2d_backward_ops.h>
|
1282 |
+
#include <ATen/ops/upsample_linear1d_ops.h>
|
1283 |
+
#include <ATen/ops/upsample_linear1d_backward_ops.h>
|
1284 |
+
#include <ATen/ops/upsample_nearest1d_ops.h>
|
1285 |
+
#include <ATen/ops/upsample_nearest1d_backward_ops.h>
|
1286 |
+
#include <ATen/ops/upsample_nearest2d_ops.h>
|
1287 |
+
#include <ATen/ops/upsample_nearest2d_backward_ops.h>
|
1288 |
+
#include <ATen/ops/upsample_nearest3d_ops.h>
|
1289 |
+
#include <ATen/ops/upsample_nearest3d_backward_ops.h>
|
1290 |
+
#include <ATen/ops/upsample_trilinear3d_ops.h>
|
1291 |
+
#include <ATen/ops/upsample_trilinear3d_backward_ops.h>
|
1292 |
+
#include <ATen/ops/value_selecting_reduction_backward_ops.h>
|
1293 |
+
#include <ATen/ops/values_ops.h>
|
1294 |
+
#include <ATen/ops/values_copy_ops.h>
|
1295 |
+
#include <ATen/ops/vander_ops.h>
|
1296 |
+
#include <ATen/ops/var_ops.h>
|
1297 |
+
#include <ATen/ops/var_mean_ops.h>
|
1298 |
+
#include <ATen/ops/vdot_ops.h>
|
1299 |
+
#include <ATen/ops/view_ops.h>
|
1300 |
+
#include <ATen/ops/view_as_ops.h>
|
1301 |
+
#include <ATen/ops/view_as_complex_ops.h>
|
1302 |
+
#include <ATen/ops/view_as_complex_copy_ops.h>
|
1303 |
+
#include <ATen/ops/view_as_real_ops.h>
|
1304 |
+
#include <ATen/ops/view_as_real_copy_ops.h>
|
1305 |
+
#include <ATen/ops/view_copy_ops.h>
|
1306 |
+
#include <ATen/ops/vsplit_ops.h>
|
1307 |
+
#include <ATen/ops/vstack_ops.h>
|
1308 |
+
#include <ATen/ops/where_ops.h>
|
1309 |
+
#include <ATen/ops/xlogy_ops.h>
|
1310 |
+
#include <ATen/ops/xor_ops.h>
|
1311 |
+
#include <ATen/ops/zero_ops.h>
|
1312 |
+
#include <ATen/ops/zeros_ops.h>
|
1313 |
+
#include <ATen/ops/zeros_like_ops.h>
|
1314 |
+
|
1315 |
+
// Extension writers: do you write wrapper functions? Are you frustrated with
|
1316 |
+
// resolving overloads of operators? Are you frustrated with dealing with
|
1317 |
+
// pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
|
1318 |
+
// further, this is the utility for you.
|
1319 |
+
//
|
1320 |
+
// Given an operator schema: aten::op.overload(...
|
1321 |
+
//
|
1322 |
+
// Use ATEN_FN2(op, overload) to get a *function* version of the operator
|
1323 |
+
// that is guaranteed to not be overloaded. This means that you can safely
|
1324 |
+
// decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
|
1325 |
+
//
|
1326 |
+
// Given an operator schema without an overload name: aten::op(...
|
1327 |
+
//
|
1328 |
+
// Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
|
1329 |
+
//
|
1330 |
+
// There is some interesting behavior for out= operations.
|
1331 |
+
// ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
|
1332 |
+
// that is, the order of arguments is exactly what it looks like in the schema.
|
1333 |
+
|
1334 |
+
#define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
|
1335 |
+
#define ATEN_FN(op_name) at::_ops::op_name::call
|
1336 |
+
|
1337 |
+
// Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
|
1338 |
+
// metadata about a given aten operator.
|
1339 |
+
// Notable data on the class includes:
|
1340 |
+
// - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
|
1341 |
+
// - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
|
1342 |
+
// - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
|
1343 |
+
// - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
|
1344 |
+
|
1345 |
+
#define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
|
1346 |
+
#define ATEN_OP(op_name) at::_ops::op_name
|
1347 |
+
|
1348 |
+
// WARNING: Please do not call any of the ops in the _ops namespace directly.
|
1349 |
+
// Use the ATEN_FN macros. We do not guarantee stability of the naming
|
1350 |
+
// scheme for the functions in at::_ops
|
1351 |
+
|
1352 |
+
// See Note [The ATen Operators API] for details of the at::_ops namespace
|
1353 |
+
|
1354 |
+
namespace at {
|
1355 |
+
namespace _ops {
|
1356 |
+
|
1357 |
+
} // namespace _ops
|
1358 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Parallel.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/Config.h>
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <functional>
|
5 |
+
#include <string>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
inline int64_t divup(int64_t x, int64_t y) {
|
10 |
+
return (x + y - 1) / y;
|
11 |
+
}
|
12 |
+
|
13 |
+
// Called during new thread initialization
|
14 |
+
TORCH_API void init_num_threads();
|
15 |
+
|
16 |
+
// Sets the number of threads to be used in parallel region
|
17 |
+
TORCH_API void set_num_threads(int);
|
18 |
+
|
19 |
+
// Returns the maximum number of threads that may be used in a parallel region
|
20 |
+
TORCH_API int get_num_threads();
|
21 |
+
|
22 |
+
// Returns the current thread number (starting from 0)
|
23 |
+
// in the current parallel region, or 0 in the sequential region
|
24 |
+
TORCH_API int get_thread_num();
|
25 |
+
|
26 |
+
// Checks whether the code runs in parallel region
|
27 |
+
TORCH_API bool in_parallel_region();
|
28 |
+
|
29 |
+
namespace internal {
|
30 |
+
|
31 |
+
// Initialise num_threads lazily at first parallel call
|
32 |
+
inline void lazy_init_num_threads() {
|
33 |
+
thread_local bool init = false;
|
34 |
+
if (C10_UNLIKELY(!init)) {
|
35 |
+
at::init_num_threads();
|
36 |
+
init = true;
|
37 |
+
}
|
38 |
+
}
|
39 |
+
|
40 |
+
TORCH_API void set_thread_num(int);
|
41 |
+
|
42 |
+
class TORCH_API ThreadIdGuard {
|
43 |
+
public:
|
44 |
+
ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) {
|
45 |
+
set_thread_num(new_id);
|
46 |
+
}
|
47 |
+
|
48 |
+
~ThreadIdGuard() {
|
49 |
+
set_thread_num(old_id_);
|
50 |
+
}
|
51 |
+
|
52 |
+
private:
|
53 |
+
int old_id_;
|
54 |
+
};
|
55 |
+
|
56 |
+
} // namespace internal
|
57 |
+
|
58 |
+
/*
|
59 |
+
parallel_for
|
60 |
+
|
61 |
+
begin: index at which to start applying user function
|
62 |
+
|
63 |
+
end: index at which to stop applying user function
|
64 |
+
|
65 |
+
grain_size: number of elements per chunk. impacts the degree of parallelization
|
66 |
+
|
67 |
+
f: user function applied in parallel to the chunks, signature:
|
68 |
+
void f(int64_t begin, int64_t end)
|
69 |
+
|
70 |
+
Warning: parallel_for does NOT copy thread local
|
71 |
+
states from the current thread to the worker threads.
|
72 |
+
This means for example that Tensor operations CANNOT be used in the
|
73 |
+
body of your function, only data pointers.
|
74 |
+
*/
|
75 |
+
template <class F>
|
76 |
+
inline void parallel_for(
|
77 |
+
const int64_t begin,
|
78 |
+
const int64_t end,
|
79 |
+
const int64_t grain_size,
|
80 |
+
const F& f);
|
81 |
+
|
82 |
+
/*
|
83 |
+
parallel_reduce
|
84 |
+
|
85 |
+
begin: index at which to start applying reduction
|
86 |
+
|
87 |
+
end: index at which to stop applying reduction
|
88 |
+
|
89 |
+
grain_size: number of elements per chunk. impacts number of elements in
|
90 |
+
intermediate results tensor and degree of parallelization.
|
91 |
+
|
92 |
+
ident: identity for binary combination function sf. sf(ident, x) needs to return
|
93 |
+
x.
|
94 |
+
|
95 |
+
f: function for reduction over a chunk. f needs to be of signature scalar_t
|
96 |
+
f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
|
97 |
+
|
98 |
+
sf: function to combine two partial results. sf needs to be of signature
|
99 |
+
scalar_t sf(scalar_t x, scalar_t y)
|
100 |
+
|
101 |
+
For example, you might have a tensor of 10000 entires and want to sum together
|
102 |
+
all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
|
103 |
+
an intermediate result tensor with 4 elements. Then it will execute the function
|
104 |
+
"f" you provide and pass the beginning and end index of these chunks, so
|
105 |
+
0-2499, 2500-4999, etc. and the combination identity. It will then write out
|
106 |
+
the result from each of these chunks into the intermediate result tensor. After
|
107 |
+
that it'll reduce the partial results from each chunk into a single number using
|
108 |
+
the combination function sf and the identity ident. For a total summation this
|
109 |
+
would be "+" and 0 respectively. This is similar to tbb's approach [1], where
|
110 |
+
you need to provide a function to accumulate a subrange, a function to combine
|
111 |
+
two partial results and an identity.
|
112 |
+
|
113 |
+
Warning: parallel_reduce does NOT copy thread local
|
114 |
+
states from the current thread to the worker threads.
|
115 |
+
This means for example that Tensor operations CANNOT be used in the
|
116 |
+
body of your function, only data pointers.
|
117 |
+
|
118 |
+
[1] https://software.intel.com/en-us/node/506154
|
119 |
+
*/
|
120 |
+
template <class scalar_t, class F, class SF>
|
121 |
+
inline scalar_t parallel_reduce(
|
122 |
+
const int64_t begin,
|
123 |
+
const int64_t end,
|
124 |
+
const int64_t grain_size,
|
125 |
+
const scalar_t ident,
|
126 |
+
const F& f,
|
127 |
+
const SF& sf);
|
128 |
+
|
129 |
+
// Returns a detailed string describing parallelization settings
|
130 |
+
TORCH_API std::string get_parallel_info();
|
131 |
+
|
132 |
+
// Sets number of threads used for inter-op parallelism
|
133 |
+
TORCH_API void set_num_interop_threads(int);
|
134 |
+
|
135 |
+
// Returns the number of threads used for inter-op parallelism
|
136 |
+
TORCH_API int get_num_interop_threads();
|
137 |
+
|
138 |
+
// Launches inter-op parallel task
|
139 |
+
TORCH_API void launch(std::function<void()> func);
|
140 |
+
namespace internal {
|
141 |
+
void launch_no_thread_state(std::function<void()> fn);
|
142 |
+
} // namespace internal
|
143 |
+
|
144 |
+
// Launches intra-op parallel task
|
145 |
+
TORCH_API void intraop_launch(std::function<void()> func);
|
146 |
+
|
147 |
+
// Returns number of intra-op threads used by default
|
148 |
+
TORCH_API int intraop_default_num_threads();
|
149 |
+
|
150 |
+
} // namespace at
|
151 |
+
|
152 |
+
#if AT_PARALLEL_OPENMP
|
153 |
+
#include <ATen/ParallelOpenMP.h> // IWYU pragma: keep
|
154 |
+
#elif AT_PARALLEL_NATIVE
|
155 |
+
#include <ATen/ParallelNative.h> // IWYU pragma: keep
|
156 |
+
#elif AT_PARALLEL_NATIVE_TBB
|
157 |
+
#include <ATen/ParallelNativeTBB.h> // IWYU pragma: keep
|
158 |
+
#endif
|
159 |
+
|
160 |
+
#include <ATen/Parallel-inl.h> // IWYU pragma: keep
|
venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNative.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <algorithm>
|
4 |
+
#include <cstddef>
|
5 |
+
#include <exception>
|
6 |
+
|
7 |
+
#include <c10/util/Exception.h>
|
8 |
+
|
9 |
+
#define INTRA_OP_PARALLEL
|
10 |
+
|
11 |
+
namespace at::internal {
|
12 |
+
|
13 |
+
TORCH_API void invoke_parallel(
|
14 |
+
const int64_t begin,
|
15 |
+
const int64_t end,
|
16 |
+
const int64_t grain_size,
|
17 |
+
const std::function<void(int64_t, int64_t)>& f);
|
18 |
+
|
19 |
+
} // namespace at::internal
|
venv/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <algorithm>
|
4 |
+
#include <atomic>
|
5 |
+
#include <cstddef>
|
6 |
+
#include <exception>
|
7 |
+
|
8 |
+
#ifdef _OPENMP
|
9 |
+
#define INTRA_OP_PARALLEL
|
10 |
+
|
11 |
+
#include <omp.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifdef _OPENMP
|
15 |
+
namespace at::internal {
|
16 |
+
template <typename F>
|
17 |
+
inline void invoke_parallel(
|
18 |
+
int64_t begin,
|
19 |
+
int64_t end,
|
20 |
+
int64_t grain_size,
|
21 |
+
const F& f) {
|
22 |
+
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
|
23 |
+
std::exception_ptr eptr;
|
24 |
+
|
25 |
+
#pragma omp parallel
|
26 |
+
{
|
27 |
+
// choose number of tasks based on grain size and number of threads
|
28 |
+
// can't use num_threads clause due to bugs in GOMP's thread pool (See
|
29 |
+
// #32008)
|
30 |
+
int64_t num_threads = omp_get_num_threads();
|
31 |
+
if (grain_size > 0) {
|
32 |
+
num_threads = std::min(num_threads, divup((end - begin), grain_size));
|
33 |
+
}
|
34 |
+
|
35 |
+
int64_t tid = omp_get_thread_num();
|
36 |
+
int64_t chunk_size = divup((end - begin), num_threads);
|
37 |
+
int64_t begin_tid = begin + tid * chunk_size;
|
38 |
+
if (begin_tid < end) {
|
39 |
+
try {
|
40 |
+
internal::ThreadIdGuard tid_guard(tid);
|
41 |
+
f(begin_tid, std::min(end, chunk_size + begin_tid));
|
42 |
+
} catch (...) {
|
43 |
+
if (!err_flag.test_and_set()) {
|
44 |
+
eptr = std::current_exception();
|
45 |
+
}
|
46 |
+
}
|
47 |
+
}
|
48 |
+
}
|
49 |
+
if (eptr) {
|
50 |
+
std::rethrow_exception(eptr);
|
51 |
+
}
|
52 |
+
}
|
53 |
+
} // namespace at::internal
|
54 |
+
#endif // _OPENMP
|
venv/lib/python3.10/site-packages/torch/include/ATen/PythonTorchFunctionTLS.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/SafePyObject.h>
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
|
6 |
+
namespace at::impl {
|
7 |
+
|
8 |
+
enum TorchFunctionDisabledState { ENABLED, SUBCLASSES_DISABLED, ALL_DISABLED };
|
9 |
+
|
10 |
+
struct TORCH_API PythonTorchFunctionTLS {
|
11 |
+
static void set_disabled_state(TorchFunctionDisabledState disabled_state_);
|
12 |
+
static TorchFunctionDisabledState get_disabled_state();
|
13 |
+
|
14 |
+
static void push_onto_stack(std::shared_ptr<SafePyObject> mode);
|
15 |
+
static const std::shared_ptr<SafePyObject> pop_stack();
|
16 |
+
static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
|
17 |
+
static int64_t stack_len();
|
18 |
+
|
19 |
+
static const PythonTorchFunctionTLS& get_state();
|
20 |
+
static void set_state(const PythonTorchFunctionTLS& state);
|
21 |
+
|
22 |
+
private:
|
23 |
+
// The mode TLS is split into
|
24 |
+
// - disabled_state, which says which part of torch function are disabled
|
25 |
+
// - stack_, which is a vector of modes representing the stack of user
|
26 |
+
// defined modes
|
27 |
+
TorchFunctionDisabledState disabled_state_ =
|
28 |
+
TorchFunctionDisabledState::ENABLED;
|
29 |
+
std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
|
30 |
+
};
|
31 |
+
|
32 |
+
TORCH_API bool torch_function_mode_enabled();
|
33 |
+
|
34 |
+
} // namespace at::impl
|
venv/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/torch/include/ATen/ScalarType.h
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/ATenGeneral.h> // for BC reasons
|
3 |
+
#include <c10/core/Backend.h>
|
4 |
+
#include <c10/core/ScalarType.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <cstdint>
|
5 |
+
|
6 |
+
// A simple thread local enumeration, used to link forward and backward pass
|
7 |
+
// ops and is used by autograd and observers framework
|
8 |
+
namespace at::sequence_number {
|
9 |
+
|
10 |
+
TORCH_API uint64_t peek();
|
11 |
+
TORCH_API uint64_t get_and_increment();
|
12 |
+
|
13 |
+
} // namespace at::sequence_number
|
venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h
ADDED
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/SparseCsrTensorImpl.h>
|
4 |
+
#include <ATen/SparseTensorImpl.h>
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
|
7 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
8 |
+
#include <ATen/Functions.h>
|
9 |
+
#include <ATen/NativeFunctions.h>
|
10 |
+
#include <ATen/Operators.h>
|
11 |
+
#else
|
12 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
|
13 |
+
#include <ATen/ops/resize_as_sparse_native.h>
|
14 |
+
#endif
|
15 |
+
|
16 |
+
#define AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(LAYOUT, NAME, ...) \
|
17 |
+
[&] { \
|
18 |
+
const auto& the_layout = LAYOUT; \
|
19 |
+
switch (the_layout) { \
|
20 |
+
case kSparseCsr: \
|
21 |
+
case kSparseCsc: \
|
22 |
+
case kSparseBsr: \
|
23 |
+
case kSparseBsc: \
|
24 |
+
return __VA_ARGS__(); \
|
25 |
+
default: \
|
26 |
+
AT_ERROR( \
|
27 |
+
NAME, \
|
28 |
+
" expected sparse compressed tensor layout but got ", \
|
29 |
+
the_layout); \
|
30 |
+
} \
|
31 |
+
}()
|
32 |
+
|
33 |
+
#define AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( \
|
34 |
+
LAYOUT, NAME, ROW_DIM_ACTION, COLUMN_DIM_ACTION) \
|
35 |
+
[&]() { \
|
36 |
+
const auto& the_layout = LAYOUT; \
|
37 |
+
switch (the_layout) { \
|
38 |
+
case kSparseCsr: \
|
39 |
+
case kSparseBsr: \
|
40 |
+
return (ROW_DIM_ACTION)(); \
|
41 |
+
case kSparseCsc: \
|
42 |
+
case kSparseBsc: \
|
43 |
+
return (COLUMN_DIM_ACTION)(); \
|
44 |
+
default: \
|
45 |
+
AT_ERROR( \
|
46 |
+
NAME, \
|
47 |
+
" expected sparse compressed tensor layout but got ", \
|
48 |
+
the_layout); \
|
49 |
+
} \
|
50 |
+
}()
|
51 |
+
|
52 |
+
#define AT_DISPATCH_PLAIN_SPARSE_COMPRESSED_LAYOUTS( \
|
53 |
+
LAYOUT, NAME, NO_BLOCK_ACTION, BLOCK_ACTION) \
|
54 |
+
[&]() { \
|
55 |
+
const auto& the_layout = LAYOUT; \
|
56 |
+
switch (the_layout) { \
|
57 |
+
case kSparseCsr: \
|
58 |
+
case kSparseCsc: \
|
59 |
+
return (NO_BLOCK_ACTION)(); \
|
60 |
+
case kSparseBsr: \
|
61 |
+
case kSparseBsc: \
|
62 |
+
return (BLOCK_ACTION)(); \
|
63 |
+
default: \
|
64 |
+
AT_ERROR( \
|
65 |
+
NAME, \
|
66 |
+
" expected sparse compressed tensor layout but got ", \
|
67 |
+
the_layout); \
|
68 |
+
} \
|
69 |
+
}()
|
70 |
+
|
71 |
+
#define AT_DISPATCH_SPARSE_ROW_COMPRESSED_LAYOUTS( \
|
72 |
+
LAYOUT, NAME, ROW_DIM_ACTION) \
|
73 |
+
[&]() { \
|
74 |
+
const auto& the_layout = LAYOUT; \
|
75 |
+
switch (the_layout) { \
|
76 |
+
case kSparseCsr: \
|
77 |
+
case kSparseBsr: \
|
78 |
+
return (ROW_DIM_ACTION)(); \
|
79 |
+
default: \
|
80 |
+
AT_ERROR( \
|
81 |
+
NAME, \
|
82 |
+
" expected sparse row compressed tensor layout but got ", \
|
83 |
+
the_layout); \
|
84 |
+
} \
|
85 |
+
}()
|
86 |
+
|
87 |
+
#define AT_DISPATCH_SPARSE_COL_COMPRESSED_LAYOUTS( \
|
88 |
+
LAYOUT, NAME, COL_DIM_ACTION) \
|
89 |
+
[&]() { \
|
90 |
+
const auto& the_layout = LAYOUT; \
|
91 |
+
switch (the_layout) { \
|
92 |
+
case kSparseCsc: \
|
93 |
+
case kSparseBsc: \
|
94 |
+
return (COL_DIM_ACTION)(); \
|
95 |
+
default: \
|
96 |
+
AT_ERROR( \
|
97 |
+
NAME, \
|
98 |
+
" expected sparse column compressed tensor layout but got ", \
|
99 |
+
the_layout); \
|
100 |
+
} \
|
101 |
+
}()
|
102 |
+
|
103 |
+
#define AT_DISPATCH_SPARSE_COMPRESSED_NONBLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \
|
104 |
+
[&]() { \
|
105 |
+
const auto& the_layout = LAYOUT; \
|
106 |
+
switch (the_layout) { \
|
107 |
+
case kSparseCsr: \
|
108 |
+
case kSparseCsc: \
|
109 |
+
return (ACTION)(); \
|
110 |
+
default: \
|
111 |
+
AT_ERROR( \
|
112 |
+
NAME, \
|
113 |
+
" expected sparse compressed (non-block) tensor layout but got ", \
|
114 |
+
the_layout); \
|
115 |
+
} \
|
116 |
+
}()
|
117 |
+
|
118 |
+
#define AT_DISPATCH_SPARSE_COMPRESSED_BLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \
|
119 |
+
[&]() { \
|
120 |
+
const auto& the_layout = LAYOUT; \
|
121 |
+
switch (the_layout) { \
|
122 |
+
case kSparseBsr: \
|
123 |
+
case kSparseBsc: \
|
124 |
+
return (ACTION)(); \
|
125 |
+
default: \
|
126 |
+
AT_ERROR( \
|
127 |
+
NAME, \
|
128 |
+
" expected sparse compressed block tensor layout but got ", \
|
129 |
+
the_layout); \
|
130 |
+
} \
|
131 |
+
}()
|
132 |
+
|
133 |
+
#define AT_DISPATCH_SPARSE_VALUE_TYPES(TYPE, NAME, ...) \
|
134 |
+
AT_DISPATCH_SWITCH( \
|
135 |
+
TYPE, \
|
136 |
+
NAME, \
|
137 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
138 |
+
kComplexHalf, kHalf, kBool, kBFloat16, __VA_ARGS__))
|
139 |
+
|
140 |
+
namespace at::sparse_csr {
|
141 |
+
|
142 |
+
using SparseCsrTensor = Tensor;
|
143 |
+
|
144 |
+
inline bool is_sparse_compressed(const Layout& layout) {
|
145 |
+
switch (layout) {
|
146 |
+
case kSparseCsr:
|
147 |
+
case kSparseCsc:
|
148 |
+
case kSparseBsr:
|
149 |
+
case kSparseBsc:
|
150 |
+
return true;
|
151 |
+
default:;
|
152 |
+
}
|
153 |
+
return false;
|
154 |
+
}
|
155 |
+
|
156 |
+
inline bool is_sparse_compressed(const Tensor& self) {
|
157 |
+
return is_sparse_compressed(self.layout());
|
158 |
+
}
|
159 |
+
|
160 |
+
inline SparseCsrTensorImpl* get_sparse_csr_impl(const SparseCsrTensor& self) {
|
161 |
+
AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(
|
162 |
+
self.layout(), "get_sparse_csr_impl", [&] {});
|
163 |
+
return static_cast<SparseCsrTensorImpl*>(self.unsafeGetTensorImpl());
|
164 |
+
}
|
165 |
+
|
166 |
+
inline std::string layoutToString(
|
167 |
+
Layout layout,
|
168 |
+
bool upper = false,
|
169 |
+
bool lower = false) {
|
170 |
+
switch (layout) {
|
171 |
+
case kSparseCsr:
|
172 |
+
return (upper ? "CSR" : (lower ? "csr" : "Csr"));
|
173 |
+
case kSparseCsc:
|
174 |
+
return (upper ? "CSC" : (lower ? "csc" : "Csc"));
|
175 |
+
case kSparseBsr:
|
176 |
+
return (upper ? "BSR" : (lower ? "bsr" : "Bsr"));
|
177 |
+
case kSparseBsc:
|
178 |
+
return (upper ? "BSC" : (lower ? "bsc" : "Bsc"));
|
179 |
+
default:
|
180 |
+
TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
|
181 |
+
return "";
|
182 |
+
}
|
183 |
+
}
|
184 |
+
|
185 |
+
inline bool isCompressedRow(Layout layout) {
|
186 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
187 |
+
layout, "isCompressedRow", [&] { return true; }, [&] { return false; });
|
188 |
+
}
|
189 |
+
|
190 |
+
inline bool isCompressedColumn(Layout layout) {
|
191 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
192 |
+
layout,
|
193 |
+
"isCompressedColumn",
|
194 |
+
[&] { return false; },
|
195 |
+
[&] { return true; });
|
196 |
+
}
|
197 |
+
|
198 |
+
inline std::string compressedIndicesName(Layout layout) {
|
199 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
200 |
+
layout,
|
201 |
+
"compressedIndicesName",
|
202 |
+
[&] { return "crow_indices"; },
|
203 |
+
[&] { return "ccol_indices"; });
|
204 |
+
}
|
205 |
+
|
206 |
+
inline std::string plainIndicesName(Layout layout) {
|
207 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
208 |
+
layout,
|
209 |
+
"plainIndicesName",
|
210 |
+
[&] { return "col_indices"; },
|
211 |
+
[&] { return "row_indices"; });
|
212 |
+
}
|
213 |
+
|
214 |
+
inline std::string compressedDimName(Layout layout) {
|
215 |
+
switch (layout) {
|
216 |
+
case kSparseCsr:
|
217 |
+
return "row";
|
218 |
+
case kSparseCsc:
|
219 |
+
return "column";
|
220 |
+
case kSparseBsr:
|
221 |
+
return "row block";
|
222 |
+
case kSparseBsc:
|
223 |
+
return "column block";
|
224 |
+
default:
|
225 |
+
TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
|
226 |
+
return "";
|
227 |
+
}
|
228 |
+
}
|
229 |
+
|
230 |
+
inline std::string plainDimName(Layout layout) {
|
231 |
+
switch (layout) {
|
232 |
+
case kSparseCsr:
|
233 |
+
return "column";
|
234 |
+
case kSparseCsc:
|
235 |
+
return "row";
|
236 |
+
case kSparseBsr:
|
237 |
+
return "column block";
|
238 |
+
case kSparseBsc:
|
239 |
+
return "row block";
|
240 |
+
default:
|
241 |
+
TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
|
242 |
+
return "";
|
243 |
+
}
|
244 |
+
}
|
245 |
+
|
246 |
+
inline size_t rowDimension(Layout layout, IntArrayRef size) {
|
247 |
+
return size.size() - (isCompressedRow(layout) ? 2 : 1);
|
248 |
+
}
|
249 |
+
|
250 |
+
inline size_t columnDimension(Layout layout, IntArrayRef size) {
|
251 |
+
return size.size() - (isCompressedColumn(layout) ? 2 : 1);
|
252 |
+
}
|
253 |
+
|
254 |
+
inline size_t compressedDimension(
|
255 |
+
Layout layout,
|
256 |
+
IntArrayRef size,
|
257 |
+
size_t dense_ndim = 0) {
|
258 |
+
return size.size() - dense_ndim - (isCompressedRow(layout) ? 2 : 1);
|
259 |
+
}
|
260 |
+
|
261 |
+
inline size_t plainDimension(
|
262 |
+
Layout layout,
|
263 |
+
IntArrayRef size,
|
264 |
+
size_t dense_ndim = 0) {
|
265 |
+
return size.size() - dense_ndim - (isCompressedRow(layout) ? 1 : 2);
|
266 |
+
}
|
267 |
+
|
268 |
+
inline int64_t numBatchDimensions(Tensor const& self) {
|
269 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
270 |
+
self.layout(),
|
271 |
+
"numBatchDimensions",
|
272 |
+
[&self] { return self.crow_indices().dim() - 1; },
|
273 |
+
[&self] { return self.ccol_indices().dim() - 1; });
|
274 |
+
}
|
275 |
+
|
276 |
+
inline std::pair<Tensor, Tensor> getCompressedPlainIndices(Tensor const& self) {
|
277 |
+
return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
|
278 |
+
self.layout(),
|
279 |
+
"getCompressedPlainIndices",
|
280 |
+
[&self] {
|
281 |
+
return std::make_pair(self.crow_indices(), self.col_indices());
|
282 |
+
},
|
283 |
+
[&self] {
|
284 |
+
return std::make_pair(self.ccol_indices(), self.row_indices());
|
285 |
+
});
|
286 |
+
}
|
287 |
+
|
288 |
+
inline Layout flip_compressed_layout(Layout layout) {
|
289 |
+
switch (layout) {
|
290 |
+
case kSparseCsr:
|
291 |
+
return kSparseCsc;
|
292 |
+
case kSparseCsc:
|
293 |
+
return kSparseCsr;
|
294 |
+
case kSparseBsr:
|
295 |
+
return kSparseBsc;
|
296 |
+
case kSparseBsc:
|
297 |
+
return kSparseBsr;
|
298 |
+
default:
|
299 |
+
TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
|
300 |
+
return kSparseCsr;
|
301 |
+
}
|
302 |
+
}
|
303 |
+
|
304 |
+
inline DimVector getBlockSize(Tensor const& self) {
|
305 |
+
int64_t n_batch = numBatchDimensions(self);
|
306 |
+
return at::DimVector(self.values().sizes().slice(n_batch + 1, 2));
|
307 |
+
}
|
308 |
+
|
309 |
+
inline at::OptionalArray<at::SymInt> getSymIntBlockSize(Tensor const& self) {
|
310 |
+
if (self.layout() == at::kSparseBsr || self.layout() == at::kSparseBsc) {
|
311 |
+
int64_t n_batch = numBatchDimensions(self);
|
312 |
+
return self.values().sym_sizes().slice(n_batch + 1, 2).vec();
|
313 |
+
} else {
|
314 |
+
return {};
|
315 |
+
}
|
316 |
+
}
|
317 |
+
|
318 |
+
template <typename binary_op_t, typename binary_op_out_t>
|
319 |
+
inline bool only_sparse_compressed_binary_op_trivial_cases(
|
320 |
+
const Tensor& self,
|
321 |
+
const Tensor& other,
|
322 |
+
const Scalar& alpha,
|
323 |
+
Tensor& out,
|
324 |
+
const binary_op_t& binary_op,
|
325 |
+
const binary_op_out_t& binary_op_out) {
|
326 |
+
// Only sparse compressed! Just like the name says :)
|
327 |
+
TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(self));
|
328 |
+
TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(other));
|
329 |
+
TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(out));
|
330 |
+
|
331 |
+
// Bypass BLAS if there are matches in (self, other, out)
|
332 |
+
if (self.is_same(out) && self.is_same(other)) {
|
333 |
+
binary_op_out(self.values(), other.values(), alpha);
|
334 |
+
return true;
|
335 |
+
}
|
336 |
+
if (self.is_same(other)) {
|
337 |
+
auto [compressed_indices, plain_indices] =
|
338 |
+
at::sparse_csr::getCompressedPlainIndices(self);
|
339 |
+
static_cast<SparseCsrTensorImpl*>(out.unsafeGetTensorImpl())
|
340 |
+
->set_member_tensors(
|
341 |
+
compressed_indices,
|
342 |
+
plain_indices,
|
343 |
+
binary_op(self.values(), other.values(), alpha),
|
344 |
+
self.sizes());
|
345 |
+
return true;
|
346 |
+
}
|
347 |
+
return false;
|
348 |
+
}
|
349 |
+
|
350 |
+
inline bool only_sparse_compressed_add_trivial_cases(
|
351 |
+
const Tensor& self,
|
352 |
+
const Tensor& other,
|
353 |
+
const Scalar& alpha,
|
354 |
+
Tensor& out) {
|
355 |
+
return only_sparse_compressed_binary_op_trivial_cases(
|
356 |
+
self,
|
357 |
+
other,
|
358 |
+
alpha,
|
359 |
+
out,
|
360 |
+
[](const Tensor& v1, const Tensor& v2, const Scalar& alpha) {
|
361 |
+
return v1.add(v2, alpha);
|
362 |
+
},
|
363 |
+
[](const Tensor& v1, const Tensor& v2, const Scalar& alpha) {
|
364 |
+
return v1.add_(v2, alpha);
|
365 |
+
});
|
366 |
+
}
|
367 |
+
|
368 |
+
inline Tensor to_type(const Tensor& input, ScalarType dtype) {
|
369 |
+
auto [compressed_indices, plain_indices] =
|
370 |
+
at::sparse_csr::getCompressedPlainIndices(input);
|
371 |
+
return at::_sparse_compressed_tensor_unsafe(
|
372 |
+
compressed_indices,
|
373 |
+
plain_indices,
|
374 |
+
std::move(input.values()).to(dtype),
|
375 |
+
input.sizes(),
|
376 |
+
dtype,
|
377 |
+
input.layout(),
|
378 |
+
input.device(),
|
379 |
+
input.options().pinned_memory_opt());
|
380 |
+
}
|
381 |
+
|
382 |
+
template <typename acc_t, typename scalar_t>
|
383 |
+
inline std::tuple<Tensor, Tensor> create_acc_buffer(
|
384 |
+
TensorOptions option,
|
385 |
+
ScalarType type,
|
386 |
+
int64_t nnz = -1) {
|
387 |
+
Tensor new_values, new_values_acc;
|
388 |
+
constexpr bool need_acc = !std::is_same_v<scalar_t, acc_t>;
|
389 |
+
bool is_integral = at::isIntegralType(type, /*includeBool=*/true);
|
390 |
+
if constexpr (need_acc) {
|
391 |
+
auto acc_dtype = CppTypeToScalarType<acc_t>::value;
|
392 |
+
new_values_acc = at::empty({}, option.dtype(acc_dtype));
|
393 |
+
new_values = is_integral ? new_values_acc : at::empty({}, option);
|
394 |
+
} else {
|
395 |
+
new_values = new_values_acc = at::empty({}, option);
|
396 |
+
}
|
397 |
+
if (nnz != -1) {
|
398 |
+
return std::make_tuple(
|
399 |
+
new_values.resize_(nnz), new_values_acc.resize_(nnz));
|
400 |
+
} else {
|
401 |
+
return std::make_tuple(new_values, new_values_acc);
|
402 |
+
}
|
403 |
+
}
|
404 |
+
|
405 |
+
inline void copy_from_acc_buffer(Tensor& new_values, Tensor& new_values_acc) {
|
406 |
+
if (!new_values_acc.is_same(new_values)) {
|
407 |
+
new_values.copy_(new_values_acc);
|
408 |
+
}
|
409 |
+
}
|
410 |
+
|
411 |
+
} // namespace at::sparse_csr
|
venv/lib/python3.10/site-packages/torch/include/ATen/Storage.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/Storage.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/Tensor.h
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/TensorAccessor.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/TensorBase.h>
|
4 |
+
#include <c10/core/WrapDimMinimal.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
// Return if the tensor geometry represented by `sizes` and `strides` is
|
9 |
+
// contiguous Although we cache is_contiguous in tensor now, this is till useful
|
10 |
+
// because it allows checking if a particular geometry is contiguous without
|
11 |
+
// explicitly constructing a tensor, e.g., when you want to choose a kernel
|
12 |
+
// strategy based on whether a subgeometry is contiguous.
|
13 |
+
TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
|
14 |
+
|
15 |
+
struct TORCH_API TensorGeometry {
|
16 |
+
TensorGeometry() = default;
|
17 |
+
|
18 |
+
explicit TensorGeometry(c10::SymIntArrayRef sizes)
|
19 |
+
: sizes_(sizes.vec()),
|
20 |
+
strides_(sizes.size()),
|
21 |
+
has_symbolic_sizes_strides_(
|
22 |
+
!c10::asIntArrayRefSlowOpt(sizes).has_value()) {
|
23 |
+
int64_t dim = static_cast<int64_t>(sizes.size());
|
24 |
+
c10::SymInt expected_stride = 1;
|
25 |
+
for (int64_t i = dim - 1; i >= 0; i--) {
|
26 |
+
strides_[i] = expected_stride;
|
27 |
+
expected_stride *= sizes_[i];
|
28 |
+
}
|
29 |
+
numel_ = expected_stride;
|
30 |
+
}
|
31 |
+
|
32 |
+
explicit TensorGeometry(const TensorBase& t)
|
33 |
+
: sizes_(t.sym_sizes().vec()),
|
34 |
+
strides_(t.sym_strides().vec()),
|
35 |
+
storage_offset_(t.sym_storage_offset()),
|
36 |
+
numel_(t.sym_numel()),
|
37 |
+
has_symbolic_sizes_strides_(
|
38 |
+
t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {}
|
39 |
+
|
40 |
+
// true if the tensor is contiguous
|
41 |
+
bool is_contiguous() const;
|
42 |
+
|
43 |
+
int64_t dim() const {
|
44 |
+
return static_cast<int64_t>(sizes_.size());
|
45 |
+
}
|
46 |
+
|
47 |
+
int64_t size(int64_t dim) const {
|
48 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
49 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
50 |
+
return sizes_.at(static_cast<size_t>(dim)).as_int_unchecked();
|
51 |
+
}
|
52 |
+
c10::IntArrayRef sizes() const {
|
53 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
54 |
+
return c10::asIntArrayRefUnchecked(sizes_);
|
55 |
+
}
|
56 |
+
int64_t stride(int64_t dim) const {
|
57 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
58 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
59 |
+
return strides_.at(static_cast<size_t>(dim)).as_int_unchecked();
|
60 |
+
}
|
61 |
+
c10::IntArrayRef strides() const {
|
62 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
63 |
+
return c10::asIntArrayRefUnchecked(strides_);
|
64 |
+
}
|
65 |
+
int64_t storage_offset() const {
|
66 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
67 |
+
return storage_offset_.as_int_unchecked();
|
68 |
+
}
|
69 |
+
int64_t numel() const {
|
70 |
+
TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
|
71 |
+
return numel_.as_int_unchecked();
|
72 |
+
}
|
73 |
+
|
74 |
+
c10::SymInt sym_size(int64_t dim) const {
|
75 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
76 |
+
return sizes_.at(static_cast<size_t>(dim));
|
77 |
+
}
|
78 |
+
c10::SymIntArrayRef sym_sizes() const {
|
79 |
+
return sizes_;
|
80 |
+
}
|
81 |
+
c10::SymInt sym_stride(int64_t dim) const {
|
82 |
+
dim = c10::maybe_wrap_dim(dim, this->dim());
|
83 |
+
return strides_.at(static_cast<size_t>(dim));
|
84 |
+
}
|
85 |
+
c10::SymIntArrayRef sym_strides() const {
|
86 |
+
return strides_;
|
87 |
+
}
|
88 |
+
c10::SymInt sym_storage_offset() const {
|
89 |
+
return storage_offset_;
|
90 |
+
}
|
91 |
+
c10::SymInt sym_numel() const {
|
92 |
+
return numel_;
|
93 |
+
}
|
94 |
+
|
95 |
+
TensorGeometry transpose(int64_t dim0, int64_t dim1) {
|
96 |
+
TensorGeometry r = *this; // copy
|
97 |
+
TORCH_CHECK(
|
98 |
+
dim0 < dim(),
|
99 |
+
"transpose: dim0=",
|
100 |
+
dim0,
|
101 |
+
" out of range (dim=",
|
102 |
+
dim(),
|
103 |
+
")")
|
104 |
+
TORCH_CHECK(
|
105 |
+
dim1 < dim(),
|
106 |
+
"transpose: dim1=",
|
107 |
+
dim1,
|
108 |
+
" out of range (dim=",
|
109 |
+
dim(),
|
110 |
+
")")
|
111 |
+
std::swap(r.sizes_[dim0], r.sizes_[dim1]);
|
112 |
+
std::swap(r.strides_[dim0], r.strides_[dim1]);
|
113 |
+
return r;
|
114 |
+
}
|
115 |
+
|
116 |
+
std::vector<c10::SymInt>& mutable_sizes() {
|
117 |
+
return sizes_;
|
118 |
+
}
|
119 |
+
std::vector<c10::SymInt>& mutable_strides() {
|
120 |
+
return strides_;
|
121 |
+
}
|
122 |
+
c10::SymInt& mutable_storage_offset() {
|
123 |
+
return storage_offset_;
|
124 |
+
}
|
125 |
+
void recompute() {
|
126 |
+
// recalculate numel after a change
|
127 |
+
c10::SymInt numel = 1;
|
128 |
+
for (const auto& i : sizes_) {
|
129 |
+
numel = numel * i;
|
130 |
+
}
|
131 |
+
numel_ = std::move(numel);
|
132 |
+
has_symbolic_sizes_strides_ =
|
133 |
+
!c10::asIntArrayRefSlowOpt(sizes_).has_value();
|
134 |
+
}
|
135 |
+
|
136 |
+
private:
|
137 |
+
std::vector<c10::SymInt> sizes_;
|
138 |
+
std::vector<c10::SymInt> strides_;
|
139 |
+
c10::SymInt storage_offset_;
|
140 |
+
c10::SymInt numel_;
|
141 |
+
bool has_symbolic_sizes_strides_{false};
|
142 |
+
};
|
143 |
+
|
144 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/DimVector.h>
|
4 |
+
#include <ATen/core/Dimname.h>
|
5 |
+
#include <c10/core/TensorOptions.h>
|
6 |
+
#include <c10/util/strides.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
class Tensor;
|
11 |
+
|
12 |
+
namespace impl {
|
13 |
+
|
14 |
+
// Use this to define the prototype for a meta function. There are two
|
15 |
+
// versions; one that takes one argument (just the operator name), or FUNC2
|
16 |
+
// variant that takes two arguments (operator name and overload name).
|
17 |
+
//
|
18 |
+
// Example usage:
|
19 |
+
//
|
20 |
+
// TORCH_META_FUNC2(add, Tensor) (
|
21 |
+
// const Tensor& self, const Tensor& other
|
22 |
+
// ) {
|
23 |
+
// ... compute sizes and options ...
|
24 |
+
// set_output(sizes, options);
|
25 |
+
// }
|
26 |
+
//
|
27 |
+
#define TORCH_META_FUNC(name) void structured_##name::meta
|
28 |
+
#define TORCH_META_FUNC2(name, overload) \
|
29 |
+
void structured_##name##_##overload::meta
|
30 |
+
|
31 |
+
// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct
|
32 |
+
// as a return value. They should be used when the kernel in question has
|
33 |
+
// precomputed values declared in native_functions.yaml and the corresponding
|
34 |
+
// implementation should return an instance of the aforementioned struct.
|
35 |
+
#define TORCH_PRECOMPUTE_META_FUNC(name) \
|
36 |
+
structured_##name::meta_return_ty structured_##name::meta
|
37 |
+
#define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \
|
38 |
+
structured_##name##_##overload::meta_return_ty \
|
39 |
+
structured_##name##_##overload::meta
|
40 |
+
|
41 |
+
// Use this to create a precompute struct in a meta function.
|
42 |
+
#define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<>
|
43 |
+
#define TORCH_PRECOMPUTE_STRUCT2(name, overload) \
|
44 |
+
structured_##name##_##overload::precompute_out<>
|
45 |
+
|
46 |
+
// Use this to define the prototype for an implementation. This takes only
|
47 |
+
// one argument, which is the name of the dispatch key entry you're
|
48 |
+
// implementing.
|
49 |
+
//
|
50 |
+
// Example usage:
|
51 |
+
//
|
52 |
+
// TORCH_IMPL_FUNC(add_cpu) (
|
53 |
+
// Tensor& result, const Tensor& self, const Tensor& other
|
54 |
+
// ) {
|
55 |
+
// ... do the actual implementation ...
|
56 |
+
// }
|
57 |
+
//
|
58 |
+
#define TORCH_IMPL_FUNC(name) void structured_##name::impl
|
59 |
+
|
60 |
+
// Base class for all structured kernel classes. The set_output virtual
|
61 |
+
// method is varied depending whether or not the operator is
|
62 |
+
// functional/out/inplace, and could also be specialized for CPU/CUDA/etc
|
63 |
+
// (although presently it isn't).
|
64 |
+
//
|
65 |
+
// A notable subclass of this interface is TensorIteratorBase.
|
66 |
+
struct TORCH_API MetaBase {
|
67 |
+
MetaBase() = default;
|
68 |
+
MetaBase(const MetaBase&) = default;
|
69 |
+
MetaBase& operator=(const MetaBase&) = default;
|
70 |
+
MetaBase(MetaBase&&) noexcept = default;
|
71 |
+
MetaBase& operator=(MetaBase&&) noexcept = default;
|
72 |
+
virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
|
73 |
+
|
74 |
+
// Note: [set_output_*]
|
75 |
+
// See: https://github.com/pytorch/pytorch/issues/69813
|
76 |
+
// Whenever defining the output properties in the META function of a
|
77 |
+
// structured kernel (what was usually done with `set_output`), use one of
|
78 |
+
// these 3 variants, instead. In order to decide which variant to use, check
|
79 |
+
// the following decision tree:
|
80 |
+
//
|
81 |
+
// - Can the kernel you are going to implement support output tensors
|
82 |
+
// with arbitrary strides?
|
83 |
+
// |
|
84 |
+
// -- YES: `set_output_raw_strided`
|
85 |
+
// |
|
86 |
+
// -- NO: Should the output tensor strides be contiguous?
|
87 |
+
// |
|
88 |
+
// -- YES: `set_output_contiguous`
|
89 |
+
// |
|
90 |
+
// -- NO: `set_output_strided`
|
91 |
+
//
|
92 |
+
// Use this function whenever the kernel requires specific strides for the
|
93 |
+
// output. If `strides` does not match the given output strides, proxy outputs
|
94 |
+
// will be created and passed to the IMPL function.
|
95 |
+
virtual void set_output_strided(
|
96 |
+
int64_t output_idx,
|
97 |
+
IntArrayRef sizes,
|
98 |
+
IntArrayRef strides,
|
99 |
+
TensorOptions options,
|
100 |
+
DimnameList names = {}) {
|
101 |
+
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
102 |
+
}
|
103 |
+
|
104 |
+
// Use this function whenever the kernel knows how to handle arbitrary strided
|
105 |
+
// outputs. This function has the same behavior as the old `set_output`: it
|
106 |
+
// will only re-stride if the given output was resized.
|
107 |
+
virtual void set_output_raw_strided(
|
108 |
+
int64_t output_idx,
|
109 |
+
IntArrayRef sizes,
|
110 |
+
IntArrayRef strides_hint,
|
111 |
+
TensorOptions options,
|
112 |
+
DimnameList names = {}) {
|
113 |
+
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
114 |
+
}
|
115 |
+
|
116 |
+
// Use this function if the kernel requires contiguous strides.
|
117 |
+
// Alias for `set_output_strided`, but with contiguous strides.
|
118 |
+
void set_output_contiguous(
|
119 |
+
int64_t output_idx,
|
120 |
+
IntArrayRef sizes,
|
121 |
+
TensorOptions options,
|
122 |
+
DimnameList names = {}) {
|
123 |
+
auto strides = c10::contiguous_strides(sizes);
|
124 |
+
set_output_strided(output_idx, sizes, strides, options, names);
|
125 |
+
}
|
126 |
+
|
127 |
+
// Returns a reference to an undefined tensor if there is no presupplied
|
128 |
+
// output
|
129 |
+
const Tensor& maybe_get_output() {
|
130 |
+
return maybe_get_output(0);
|
131 |
+
}
|
132 |
+
virtual ~MetaBase() = default;
|
133 |
+
};
|
134 |
+
|
135 |
+
} // namespace impl
|
136 |
+
|
137 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalState.h
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/InferenceMode.h>
|
4 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
7 |
+
|
8 |
+
#include <ATen/FuncTorchTLS.h>
|
9 |
+
#include <ATen/PythonTorchFunctionTLS.h>
|
10 |
+
#include <ATen/SavedTensorHooks.h>
|
11 |
+
#include <ATen/ThreadLocalPythonObjects.h>
|
12 |
+
#include <ATen/record_function.h>
|
13 |
+
#include <c10/core/impl/PythonDispatcherTLS.h>
|
14 |
+
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
// Thread local state contains values that are preserved across
|
19 |
+
// thread boundaries (e.g. at::launch/JIT fork, autograd).
|
20 |
+
// Note at::parallel_for doesn't preserve TLS across thread boundaries.
|
21 |
+
class TORCH_API ThreadLocalState {
|
22 |
+
public:
|
23 |
+
// Saves the thread local variables' values and
|
24 |
+
// returns them as a ThreadLocalState
|
25 |
+
ThreadLocalState();
|
26 |
+
|
27 |
+
// set_grad_mode - force the value of the grad mode TLS in
|
28 |
+
// the current state object. This is used for example in the
|
29 |
+
// autograd engine.
|
30 |
+
void set_grad_mode(bool enabled);
|
31 |
+
|
32 |
+
// set_multithreading_enabled - force the value of the multithreadinmaximum
|
33 |
+
// threads TLS in
|
34 |
+
// the current state object. This is used for example in the
|
35 |
+
// autograd engine.
|
36 |
+
void set_multithreading_enabled(bool enabled);
|
37 |
+
|
38 |
+
// Sets thread local variables in the current thread,
|
39 |
+
// according to the thread boundary specified
|
40 |
+
static void setThreadLocalState(const ThreadLocalState& state);
|
41 |
+
|
42 |
+
private:
|
43 |
+
c10::impl::LocalDispatchKeySet dispatch_key_;
|
44 |
+
|
45 |
+
// ThreadLocalDebugInfo does not change after being created
|
46 |
+
// with DebugInfoGuard
|
47 |
+
std::shared_ptr<c10::ThreadLocalDebugInfo> debug_info_;
|
48 |
+
|
49 |
+
// RecordFunction TLS
|
50 |
+
RecordFunctionTLS rf_tls_;
|
51 |
+
|
52 |
+
// TLS for out-of-tree functorch
|
53 |
+
// See NOTE [functorch TLS in pytorch/pytorch] for why this needs to be a
|
54 |
+
// pointer (spoiler alert: it's due to the indirection)
|
55 |
+
// This needs to be a shared_ptr instead of a unique_ptr because
|
56 |
+
// ThreadLocalState is copy-able and does indeed get copied. Maybe we can
|
57 |
+
// consider adding an explicit copy constructor for ThreadLocalState in the
|
58 |
+
// future but I didn't want to add one just for this.
|
59 |
+
std::shared_ptr<const functorch::FuncTorchTLSBase> functorch_tls_;
|
60 |
+
|
61 |
+
// TLS for AutogradModes
|
62 |
+
AutogradState autograd_tls_;
|
63 |
+
|
64 |
+
// TLS for enable_torch_dispatch_mode
|
65 |
+
c10::impl::TorchDispatchModeTLS torch_dispatch_mode_state_;
|
66 |
+
|
67 |
+
// TLS for enable_python_dispatcher
|
68 |
+
c10::impl::PyInterpreter* python_dispatcher_state_;
|
69 |
+
|
70 |
+
// TLS for __torch_function__ (mode and disable_torch_function)
|
71 |
+
at::impl::PythonTorchFunctionTLS python_torch_function_state_;
|
72 |
+
|
73 |
+
// TLS for saved tensors default hooks
|
74 |
+
at::impl::SavedTensorDefaultHooksTLS saved_tensors_default_hooks_state_;
|
75 |
+
|
76 |
+
bool functionalization_reapply_views_state_;
|
77 |
+
|
78 |
+
// TLS for arbitrary python objects that is registered via hooks
|
79 |
+
at::impl::ThreadLocalPythonObjects saved_objects_;
|
80 |
+
|
81 |
+
friend class ThreadLocalStateGuard;
|
82 |
+
};
|
83 |
+
|
84 |
+
// Guard to set and reset the thread local state
|
85 |
+
class TORCH_API ThreadLocalStateGuard {
|
86 |
+
public:
|
87 |
+
explicit ThreadLocalStateGuard(const ThreadLocalState& state)
|
88 |
+
: prev_state_(ThreadLocalState()) {
|
89 |
+
// set the given state across the thread boundary
|
90 |
+
ThreadLocalState::setThreadLocalState(state);
|
91 |
+
}
|
92 |
+
|
93 |
+
~ThreadLocalStateGuard() {
|
94 |
+
// restore previously set variables
|
95 |
+
ThreadLocalState::setThreadLocalState(prev_state_);
|
96 |
+
}
|
97 |
+
|
98 |
+
private:
|
99 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
100 |
+
const ThreadLocalState prev_state_;
|
101 |
+
};
|
102 |
+
|
103 |
+
template <typename T>
|
104 |
+
auto wrapPropagateTLSState(T callback) {
|
105 |
+
return [tls_state = ThreadLocalState(),
|
106 |
+
callback = std::move(callback)](auto&&... args) {
|
107 |
+
ThreadLocalStateGuard g(tls_state);
|
108 |
+
// Propagate value returned by callback().
|
109 |
+
return callback(std::forward<decltype(args)>(args)...);
|
110 |
+
};
|
111 |
+
}
|
112 |
+
|
113 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Utils.h
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/EmptyTensor.h>
|
4 |
+
#include <ATen/Formatting.h>
|
5 |
+
#include <ATen/core/ATenGeneral.h>
|
6 |
+
#include <ATen/core/Generator.h>
|
7 |
+
#include <c10/core/ScalarType.h>
|
8 |
+
#include <c10/core/StorageImpl.h>
|
9 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
10 |
+
#include <c10/util/ArrayRef.h>
|
11 |
+
#include <c10/util/Exception.h>
|
12 |
+
#include <c10/util/accumulate.h>
|
13 |
+
#include <c10/util/irange.h>
|
14 |
+
|
15 |
+
#include <algorithm>
|
16 |
+
#include <memory>
|
17 |
+
#include <numeric>
|
18 |
+
#include <sstream>
|
19 |
+
#include <typeinfo>
|
20 |
+
|
21 |
+
#define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
|
22 |
+
TypeName(const TypeName&) = delete; \
|
23 |
+
void operator=(const TypeName&) = delete
|
24 |
+
|
25 |
+
namespace at {
|
26 |
+
|
27 |
+
TORCH_API int _crash_if_asan(int);
|
28 |
+
|
29 |
+
// Converts a TensorList (i.e. ArrayRef<Tensor> to vector of TensorImpl*)
|
30 |
+
// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat.
|
31 |
+
// Once cat is ported entirely to ATen this can be deleted!
|
32 |
+
static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
|
33 |
+
ArrayRef<Tensor> tensors,
|
34 |
+
const char* name,
|
35 |
+
int pos,
|
36 |
+
c10::DeviceType device_type,
|
37 |
+
ScalarType scalar_type) {
|
38 |
+
std::vector<TensorImpl*> unwrapped;
|
39 |
+
unwrapped.reserve(tensors.size());
|
40 |
+
for (const auto i : c10::irange(tensors.size())) {
|
41 |
+
const auto& expr = tensors[i];
|
42 |
+
if (expr.layout() != Layout::Strided) {
|
43 |
+
AT_ERROR(
|
44 |
+
"Expected dense tensor but got ",
|
45 |
+
expr.layout(),
|
46 |
+
" for sequence element ",
|
47 |
+
i,
|
48 |
+
" in sequence argument at position #",
|
49 |
+
pos,
|
50 |
+
" '",
|
51 |
+
name,
|
52 |
+
"'");
|
53 |
+
}
|
54 |
+
if (expr.device().type() != device_type) {
|
55 |
+
AT_ERROR(
|
56 |
+
"Expected object of device type ",
|
57 |
+
device_type,
|
58 |
+
" but got device type ",
|
59 |
+
expr.device().type(),
|
60 |
+
" for sequence element ",
|
61 |
+
i,
|
62 |
+
" in sequence argument at position #",
|
63 |
+
pos,
|
64 |
+
" '",
|
65 |
+
name,
|
66 |
+
"'");
|
67 |
+
}
|
68 |
+
if (expr.scalar_type() != scalar_type) {
|
69 |
+
AT_ERROR(
|
70 |
+
"Expected object of scalar type ",
|
71 |
+
scalar_type,
|
72 |
+
" but got scalar type ",
|
73 |
+
expr.scalar_type(),
|
74 |
+
" for sequence element ",
|
75 |
+
i,
|
76 |
+
" in sequence argument at position #",
|
77 |
+
pos,
|
78 |
+
" '",
|
79 |
+
name,
|
80 |
+
"'");
|
81 |
+
}
|
82 |
+
unwrapped.emplace_back(expr.unsafeGetTensorImpl());
|
83 |
+
}
|
84 |
+
return unwrapped;
|
85 |
+
}
|
86 |
+
|
87 |
+
template <size_t N>
|
88 |
+
std::array<int64_t, N> check_intlist(
|
89 |
+
ArrayRef<int64_t> list,
|
90 |
+
const char* name,
|
91 |
+
int pos) {
|
92 |
+
if (list.empty()) {
|
93 |
+
// TODO: is this necessary? We used to treat nullptr-vs-not in IntList
|
94 |
+
// differently with strides as a way of faking optional.
|
95 |
+
list = {};
|
96 |
+
}
|
97 |
+
auto res = std::array<int64_t, N>();
|
98 |
+
if (list.size() == 1 && N > 1) {
|
99 |
+
res.fill(list[0]);
|
100 |
+
return res;
|
101 |
+
}
|
102 |
+
if (list.size() != N) {
|
103 |
+
AT_ERROR(
|
104 |
+
"Expected a list of ",
|
105 |
+
N,
|
106 |
+
" ints but got ",
|
107 |
+
list.size(),
|
108 |
+
" for argument #",
|
109 |
+
pos,
|
110 |
+
" '",
|
111 |
+
name,
|
112 |
+
"'");
|
113 |
+
}
|
114 |
+
std::copy_n(list.begin(), N, res.begin());
|
115 |
+
return res;
|
116 |
+
}
|
117 |
+
|
118 |
+
using at::detail::check_size_nonnegative;
|
119 |
+
|
120 |
+
namespace detail {
|
121 |
+
|
122 |
+
template <typename T>
|
123 |
+
TORCH_API Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options);
|
124 |
+
|
125 |
+
template <typename T>
|
126 |
+
TORCH_API Tensor
|
127 |
+
tensor_backend(ArrayRef<T> values, const TensorOptions& options);
|
128 |
+
|
129 |
+
template <typename T>
|
130 |
+
TORCH_API Tensor
|
131 |
+
tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options);
|
132 |
+
|
133 |
+
template <typename T>
|
134 |
+
TORCH_API Tensor
|
135 |
+
tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options);
|
136 |
+
} // namespace detail
|
137 |
+
|
138 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/macros/Macros.h>
|
3 |
+
#include <type_traits>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
|
7 |
+
/**
|
8 |
+
Computes ceil(a / b)
|
9 |
+
*/
|
10 |
+
template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
|
11 |
+
C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) {
|
12 |
+
return (a + b - 1) / b;
|
13 |
+
}
|
14 |
+
|
15 |
+
/**
|
16 |
+
Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest
|
17 |
+
multiple of b
|
18 |
+
*/
|
19 |
+
template <typename T>
|
20 |
+
C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) {
|
21 |
+
return ceil_div(a, b) * b;
|
22 |
+
}
|
23 |
+
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/code_template.h
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
|
5 |
+
#include <sstream>
|
6 |
+
#include <string>
|
7 |
+
#include <unordered_map>
|
8 |
+
#include <vector>
|
9 |
+
|
10 |
+
namespace at::jit {
|
11 |
+
|
12 |
+
// A template environment is a mapping from template variable names, e.g.,
|
13 |
+
// identifier (corresponding to $identifier) to their expansions.
|
14 |
+
//
|
15 |
+
// This template environment supports storing strings, numbers and lists
|
16 |
+
// of strings, and can be chained together (so that lookup proceeds in
|
17 |
+
// in the top level environment, and then recurses into a parent
|
18 |
+
// environment if the key is not found.)
|
19 |
+
struct TemplateEnv {
|
20 |
+
TemplateEnv() = default;
|
21 |
+
TemplateEnv(TemplateEnv& parent) : parent(&parent) {}
|
22 |
+
|
23 |
+
using string_list = std::vector<std::string>;
|
24 |
+
|
25 |
+
// Add a string 'v' to the map at key 'k'.
|
26 |
+
void s(const std::string& k, const std::string& v) {
|
27 |
+
strings_[k] = v;
|
28 |
+
lists_.erase(k);
|
29 |
+
}
|
30 |
+
|
31 |
+
// Add a number 'v' to the map at key 'k'
|
32 |
+
template <typename T>
|
33 |
+
void d(const std::string& k, const T& v) {
|
34 |
+
strings_[k] = c10::to_string(v);
|
35 |
+
lists_.erase(k);
|
36 |
+
}
|
37 |
+
|
38 |
+
// Retrieve the string representation of the value stored at 'k' from the map.
|
39 |
+
// Raises an exception if the key is not found.
|
40 |
+
const std::string& s(const std::string& k) const {
|
41 |
+
if (strings_.count(k) == 0) {
|
42 |
+
if (parent) {
|
43 |
+
return parent->s(k);
|
44 |
+
}
|
45 |
+
notFound(k);
|
46 |
+
}
|
47 |
+
return strings_.at(k);
|
48 |
+
}
|
49 |
+
|
50 |
+
// Store a list of strings 'v' in the map at 'k'.
|
51 |
+
void v(const std::string& k, const string_list& v) {
|
52 |
+
lists_[k] = v;
|
53 |
+
strings_.erase(k);
|
54 |
+
}
|
55 |
+
|
56 |
+
// Retrieve a list of strings stored at 'k' from the map.
|
57 |
+
// Raises an exception if the key is not found.
|
58 |
+
const string_list& v(const std::string& k) const {
|
59 |
+
if (lists_.count(k) == 0) {
|
60 |
+
if (parent) {
|
61 |
+
return parent->v(k);
|
62 |
+
}
|
63 |
+
notFound(k);
|
64 |
+
}
|
65 |
+
return lists_.at(k);
|
66 |
+
}
|
67 |
+
|
68 |
+
// Test if a string 'k' is a string (as opposed to a list.)
|
69 |
+
bool keyIsString(const std::string& k) const {
|
70 |
+
if (strings_.count(k) > 0)
|
71 |
+
return true;
|
72 |
+
if (lists_.count(k) > 0)
|
73 |
+
return false;
|
74 |
+
if (parent)
|
75 |
+
return parent->keyIsString(k);
|
76 |
+
notFound(k);
|
77 |
+
}
|
78 |
+
|
79 |
+
private:
|
80 |
+
[[noreturn]] void notFound(const std::string& k) const {
|
81 |
+
std::stringstream ss;
|
82 |
+
ss << "key not found: " << k;
|
83 |
+
throw std::logic_error(ss.str());
|
84 |
+
}
|
85 |
+
|
86 |
+
std::unordered_map<std::string, std::string> strings_;
|
87 |
+
std::unordered_map<std::string, string_list> lists_;
|
88 |
+
TemplateEnv* parent{nullptr};
|
89 |
+
};
|
90 |
+
|
91 |
+
/*
|
92 |
+
# Match $identifier or ${identifier} and replace with the value in env.
|
93 |
+
# If this identifier is at the beginning of whitespace on a line
|
94 |
+
# and its value is a list then it is treated as
|
95 |
+
# block substitution by indenting all lines of all elements.
|
96 |
+
# If the identifier is on a line starting with non-whitespace and a list
|
97 |
+
# then it is comma separated. ${,foo} will insert a comma before the list
|
98 |
+
# if this list is not empty and ${foo,} will insert one after.
|
99 |
+
*/
|
100 |
+
struct CodeTemplate {
|
101 |
+
/* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {}
|
102 |
+
|
103 |
+
std::string format(const TemplateEnv& env) const {
|
104 |
+
std::stringstream out;
|
105 |
+
size_t pos = 0;
|
106 |
+
size_t indent = 0;
|
107 |
+
bool all_whitespace = true;
|
108 |
+
while (pos < template_text.size()) {
|
109 |
+
char c = template_text[pos];
|
110 |
+
if (c == '$') {
|
111 |
+
std::stringstream kss;
|
112 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
113 |
+
bool comma_before;
|
114 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
115 |
+
bool comma_after;
|
116 |
+
size_t new_pos = parseKey(pos, kss, comma_before, comma_after);
|
117 |
+
std::string k = kss.str();
|
118 |
+
bool is_string = env.keyIsString(k);
|
119 |
+
if (all_whitespace) {
|
120 |
+
if (is_string)
|
121 |
+
emitStringWithIndents(out, indent, env.s(k));
|
122 |
+
else
|
123 |
+
emitLinesIndented(out, indent, env.v(k));
|
124 |
+
} else {
|
125 |
+
if (is_string)
|
126 |
+
out << env.s(k);
|
127 |
+
else
|
128 |
+
emitCommaSeparatedList(out, env.v(k), comma_before, comma_after);
|
129 |
+
}
|
130 |
+
all_whitespace = false;
|
131 |
+
pos = new_pos;
|
132 |
+
} else {
|
133 |
+
out << c;
|
134 |
+
if (!isspace(c))
|
135 |
+
all_whitespace = false;
|
136 |
+
indent++;
|
137 |
+
if (c == '\n') {
|
138 |
+
indent = 0;
|
139 |
+
all_whitespace = true;
|
140 |
+
}
|
141 |
+
pos++;
|
142 |
+
}
|
143 |
+
}
|
144 |
+
return out.str();
|
145 |
+
}
|
146 |
+
|
147 |
+
private:
|
148 |
+
using string_list = std::vector<std::string>;
|
149 |
+
char charAt(size_t p) const {
|
150 |
+
if (p >= template_text.size())
|
151 |
+
throw std::logic_error("EOS found in key");
|
152 |
+
return template_text[p];
|
153 |
+
}
|
154 |
+
size_t parseKey(
|
155 |
+
size_t pos,
|
156 |
+
std::ostream& k,
|
157 |
+
bool& comma_before,
|
158 |
+
bool& comma_after) const {
|
159 |
+
comma_before = false;
|
160 |
+
comma_after = false;
|
161 |
+
pos++;
|
162 |
+
if (charAt(pos) == '{') {
|
163 |
+
pos++;
|
164 |
+
if (charAt(pos) == ',') {
|
165 |
+
comma_before = true;
|
166 |
+
pos++;
|
167 |
+
}
|
168 |
+
pos = parseIdent(pos, k);
|
169 |
+
if (charAt(pos) == ',') {
|
170 |
+
comma_after = true;
|
171 |
+
pos++;
|
172 |
+
}
|
173 |
+
if (charAt(pos) != '}')
|
174 |
+
throw std::logic_error("missing terminating '}'");
|
175 |
+
pos++;
|
176 |
+
return pos;
|
177 |
+
} else {
|
178 |
+
return parseIdent(pos, k);
|
179 |
+
}
|
180 |
+
}
|
181 |
+
size_t parseIdent(size_t pos, std::ostream& k) const {
|
182 |
+
while (pos < template_text.size() &&
|
183 |
+
(isalnum(template_text[pos]) || template_text[pos] == '_')) {
|
184 |
+
k << template_text[pos];
|
185 |
+
pos++;
|
186 |
+
}
|
187 |
+
return pos;
|
188 |
+
}
|
189 |
+
void emitCommaSeparatedList(
|
190 |
+
std::ostream& out,
|
191 |
+
const string_list& strings,
|
192 |
+
bool comma_before,
|
193 |
+
bool comma_after) const {
|
194 |
+
if (comma_before && !strings.empty())
|
195 |
+
out << ", ";
|
196 |
+
for (const auto i : c10::irange(strings.size())) {
|
197 |
+
if (i > 0)
|
198 |
+
out << ", ";
|
199 |
+
out << strings[i];
|
200 |
+
}
|
201 |
+
if (comma_after && !strings.empty())
|
202 |
+
out << ", ";
|
203 |
+
}
|
204 |
+
// These indentation functions follow the convention that they never emit
|
205 |
+
// leading or trailing newlines when the input string does not have leading
|
206 |
+
// or trailing newlines. It's the responsibility of the calling function
|
207 |
+
// to indent correctly in the context.
|
208 |
+
void emitIndent(std::ostream& out, size_t indent) const {
|
209 |
+
for (C10_UNUSED const auto i : c10::irange(indent)) {
|
210 |
+
out << " ";
|
211 |
+
}
|
212 |
+
}
|
213 |
+
void emitStringWithIndents(
|
214 |
+
std::ostream& out,
|
215 |
+
size_t indent,
|
216 |
+
const std::string& str) const {
|
217 |
+
for (auto c : str) {
|
218 |
+
out << c;
|
219 |
+
if (c == '\n') {
|
220 |
+
emitIndent(out, indent);
|
221 |
+
}
|
222 |
+
}
|
223 |
+
}
|
224 |
+
void emitLinesIndented(
|
225 |
+
std::stringstream& out,
|
226 |
+
size_t indent,
|
227 |
+
const string_list& strings) const {
|
228 |
+
for (const auto i : c10::irange(strings.size())) {
|
229 |
+
if (i > 0)
|
230 |
+
emitIndent(out, indent);
|
231 |
+
emitStringWithIndents(out, indent, strings[i]);
|
232 |
+
if (i + 1 != strings.size())
|
233 |
+
out << "\n";
|
234 |
+
}
|
235 |
+
}
|
236 |
+
std::string template_text;
|
237 |
+
};
|
238 |
+
|
239 |
+
static inline std::string format(const std::string& fmt, TemplateEnv& env) {
|
240 |
+
return CodeTemplate(fmt).format(env);
|
241 |
+
}
|
242 |
+
|
243 |
+
} // namespace at::jit
|