Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ArrayRef.h +2 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h +49 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Context.h +518 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h +527 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h +31 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h +1295 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorIteratorInternal.h +72 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Utils.h +138 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/WrapDimUtilsMulti.h +44 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h +211 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/cuda_cmake_macros.h +6 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h +174 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h +121 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h +343 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h +67 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h +16 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Exception.h +715 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwned.h +143 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h +74 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h +29 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h +274 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h +247 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h +90 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h +155 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h +154 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/FunctionRef.h +72 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Half.h +506 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h +78 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Load.h +38 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Logging.h +375 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h +233 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Optional.h +47 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Registry.h +327 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ScopeExit.h +53 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h +88 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h +202 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h +153 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Type.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h +168 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h +196 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeList.h +516 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeSafeSignMath.h +144 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeTraits.h +152 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Unicode.h +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/UniqueVoidPtr.h +124 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Unroll.h +29 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/accumulate.h +134 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/bits.h +61 -0
.gitattributes
CHANGED
@@ -203,3 +203,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.
|
|
203 |
env-llmeval/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
204 |
env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
205 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
203 |
env-llmeval/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
204 |
env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
205 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
|
206 |
+
env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ArrayRef.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/ArrayRef.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Generator.h>
|
4 |
+
#include <ATen/core/MT19937RNGEngine.h>
|
5 |
+
#include <c10/core/GeneratorImpl.h>
|
6 |
+
#include <c10/util/Optional.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
11 |
+
// Constructors
|
12 |
+
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
|
13 |
+
~CPUGeneratorImpl() override = default;
|
14 |
+
|
15 |
+
// CPUGeneratorImpl methods
|
16 |
+
std::shared_ptr<CPUGeneratorImpl> clone() const;
|
17 |
+
void set_current_seed(uint64_t seed) override;
|
18 |
+
void set_offset(uint64_t offset) override;
|
19 |
+
uint64_t get_offset() const override;
|
20 |
+
uint64_t current_seed() const override;
|
21 |
+
uint64_t seed() override;
|
22 |
+
void set_state(const c10::TensorImpl& new_state) override;
|
23 |
+
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
|
24 |
+
static c10::DeviceType device_type();
|
25 |
+
uint32_t random();
|
26 |
+
uint64_t random64();
|
27 |
+
c10::optional<float> next_float_normal_sample();
|
28 |
+
c10::optional<double> next_double_normal_sample();
|
29 |
+
void set_next_float_normal_sample(c10::optional<float> randn);
|
30 |
+
void set_next_double_normal_sample(c10::optional<double> randn);
|
31 |
+
at::mt19937 engine();
|
32 |
+
void set_engine(at::mt19937 engine);
|
33 |
+
|
34 |
+
private:
|
35 |
+
CPUGeneratorImpl* clone_impl() const override;
|
36 |
+
at::mt19937 engine_;
|
37 |
+
c10::optional<float> next_float_normal_sample_;
|
38 |
+
c10::optional<double> next_double_normal_sample_;
|
39 |
+
};
|
40 |
+
|
41 |
+
namespace detail {
|
42 |
+
|
43 |
+
TORCH_API const Generator& getDefaultCPUGenerator();
|
44 |
+
TORCH_API Generator
|
45 |
+
createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
46 |
+
|
47 |
+
} // namespace detail
|
48 |
+
|
49 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Context.h
ADDED
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/CPUGeneratorImpl.h>
|
4 |
+
#include <ATen/LinalgBackend.h>
|
5 |
+
#include <ATen/core/ATenGeneral.h>
|
6 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
7 |
+
#include <ATen/core/Generator.h>
|
8 |
+
#include <ATen/core/LegacyTypeDispatch.h>
|
9 |
+
#include <ATen/detail/CUDAHooksInterface.h>
|
10 |
+
#include <ATen/detail/HIPHooksInterface.h>
|
11 |
+
#include <ATen/detail/IPUHooksInterface.h>
|
12 |
+
#include <ATen/detail/MPSHooksInterface.h>
|
13 |
+
#include <ATen/detail/MTIAHooksInterface.h>
|
14 |
+
#include <ATen/detail/ORTHooksInterface.h>
|
15 |
+
#include <ATen/detail/PrivateUse1HooksInterface.h>
|
16 |
+
#include <ATen/detail/XPUHooksInterface.h>
|
17 |
+
#include <c10/core/QEngine.h>
|
18 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
19 |
+
#include <c10/util/CallOnce.h>
|
20 |
+
#include <c10/util/Exception.h>
|
21 |
+
#include <c10/util/env.h>
|
22 |
+
#include <c10/util/irange.h>
|
23 |
+
|
24 |
+
#include <cstdint>
|
25 |
+
#include <memory>
|
26 |
+
#include <mutex>
|
27 |
+
|
28 |
+
namespace at {
|
29 |
+
|
30 |
+
class Tensor;
|
31 |
+
|
32 |
+
enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM };
|
33 |
+
|
34 |
+
class TORCH_API Context {
|
35 |
+
public:
|
36 |
+
Context();
|
37 |
+
|
38 |
+
const Generator& defaultGenerator(Device device) {
|
39 |
+
c10::DeviceType device_type = device.type();
|
40 |
+
initCUDAIfNeeded(device_type);
|
41 |
+
initHIPIfNeeded(device_type);
|
42 |
+
if (device_type == at::kCPU) {
|
43 |
+
return at::detail::getDefaultCPUGenerator();
|
44 |
+
} else if (device_type == at::kCUDA) {
|
45 |
+
return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index());
|
46 |
+
} else if (device_type == at::kMPS) {
|
47 |
+
return at::detail::getMPSHooks().getDefaultMPSGenerator();
|
48 |
+
} else if (device_type == at::kXPU) {
|
49 |
+
return at::detail::getXPUHooks().getDefaultXPUGenerator(device.index());
|
50 |
+
} else if (device_type == at::kIPU) {
|
51 |
+
return at::detail::getIPUHooks().getDefaultIPUGenerator(device.index());
|
52 |
+
} else if (device_type == at::kPrivateUse1) {
|
53 |
+
return at::GetPrivateUse1HooksInterface()->getDefaultGenerator(
|
54 |
+
device.index());
|
55 |
+
} else {
|
56 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
57 |
+
}
|
58 |
+
}
|
59 |
+
Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
|
60 |
+
initCUDAIfNeeded(device_type);
|
61 |
+
initHIPIfNeeded(device_type);
|
62 |
+
if (device_type == at::kCPU) {
|
63 |
+
return c10::DeviceType::CPU;
|
64 |
+
} else if (device_type == at::kCUDA) {
|
65 |
+
return at::detail::getCUDAHooks().getDeviceFromPtr(data);
|
66 |
+
} else if (device_type == at::kPrivateUse1) {
|
67 |
+
return at::GetPrivateUse1HooksInterface()->getDeviceFromPtr(data);
|
68 |
+
} else {
|
69 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
70 |
+
}
|
71 |
+
}
|
72 |
+
static bool isPinnedPtr(const void* data) {
|
73 |
+
return detail::getCUDAHooks().isPinnedPtr(data);
|
74 |
+
}
|
75 |
+
static bool hasOpenMP();
|
76 |
+
static bool hasMKL();
|
77 |
+
static bool hasLAPACK();
|
78 |
+
static bool hasMKLDNN();
|
79 |
+
static bool hasMAGMA() {
|
80 |
+
return detail::getCUDAHooks().hasMAGMA();
|
81 |
+
}
|
82 |
+
static bool hasCUDA() {
|
83 |
+
return detail::getCUDAHooks().hasCUDA();
|
84 |
+
}
|
85 |
+
static bool hasMTIA() {
|
86 |
+
return detail::getMTIAHooks().hasMTIA();
|
87 |
+
}
|
88 |
+
static bool hasCUDART() {
|
89 |
+
return detail::getCUDAHooks().hasCUDART();
|
90 |
+
}
|
91 |
+
static long versionCUDART() {
|
92 |
+
return detail::getCUDAHooks().versionCUDART();
|
93 |
+
}
|
94 |
+
static bool hasCuDNN() {
|
95 |
+
return detail::getCUDAHooks().hasCuDNN();
|
96 |
+
}
|
97 |
+
static long versionCuDNN() {
|
98 |
+
return detail::getCUDAHooks().versionCuDNN();
|
99 |
+
}
|
100 |
+
static bool hasCuSOLVER() {
|
101 |
+
return detail::getCUDAHooks().hasCuSOLVER();
|
102 |
+
}
|
103 |
+
static bool hasHIP() {
|
104 |
+
return detail::getHIPHooks().hasHIP();
|
105 |
+
}
|
106 |
+
static bool hasMPS() {
|
107 |
+
return detail::getMPSHooks().hasMPS();
|
108 |
+
}
|
109 |
+
static bool hasIPU() {
|
110 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
|
111 |
+
}
|
112 |
+
static bool hasXLA() {
|
113 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
|
114 |
+
}
|
115 |
+
static bool hasXPU() {
|
116 |
+
return detail::getXPUHooks().hasXPU();
|
117 |
+
}
|
118 |
+
static bool hasLazy() {
|
119 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
|
120 |
+
}
|
121 |
+
static bool hasORT() {
|
122 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
|
123 |
+
}
|
124 |
+
// defined in header so that getNonVariableType has ability to inline
|
125 |
+
// call_once check. getNonVariableType is called fairly frequently
|
126 |
+
void lazyInitCUDA() {
|
127 |
+
c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); });
|
128 |
+
}
|
129 |
+
void lazyInitHIP() {
|
130 |
+
c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); });
|
131 |
+
}
|
132 |
+
static const at::cuda::NVRTC& getNVRTC() {
|
133 |
+
return detail::getCUDAHooks().nvrtc();
|
134 |
+
}
|
135 |
+
|
136 |
+
static bool setFlushDenormal(bool on);
|
137 |
+
|
138 |
+
// NB: This method is *purely* whether or not a user requested
|
139 |
+
// that CuDNN was enabled, it doesn't actually say anything about
|
140 |
+
// whether or not CuDNN is actually usable. Use cudnn_is_acceptable
|
141 |
+
// to test this instead
|
142 |
+
bool userEnabledCuDNN() const;
|
143 |
+
void setUserEnabledCuDNN(bool e);
|
144 |
+
bool userEnabledMkldnn() const;
|
145 |
+
void setUserEnabledMkldnn(bool e);
|
146 |
+
bool benchmarkCuDNN() const;
|
147 |
+
void setBenchmarkCuDNN(bool);
|
148 |
+
int benchmarkLimitCuDNN() const;
|
149 |
+
void setBenchmarkLimitCuDNN(int);
|
150 |
+
bool deterministicCuDNN() const;
|
151 |
+
void setDeterministicCuDNN(bool);
|
152 |
+
|
153 |
+
// Note [Disabling Fused SDP Kernels]
|
154 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
155 |
+
// Flash and Memory Efficient SDP kernels are enabled by default.
|
156 |
+
// However, they can be disabled by setting
|
157 |
+
// at::globalContext().setUserEnabledFlashSDP(false) flag.
|
158 |
+
// This is useful for debugging purposes. For example, if you want to
|
159 |
+
// compare the performance of the flash SDP kernels with the unfused
|
160 |
+
// kernel, you can disable the flash SDP kernels. By disabling
|
161 |
+
// the math SDP kernel, you can force your code to use flash kernels.
|
162 |
+
// The math SDP kernel can be disabled by setting
|
163 |
+
// at::globalContext().setUserEnabledMathSDP(false) flag.
|
164 |
+
void setSDPUseFlash(bool);
|
165 |
+
bool userEnabledFlashSDP() const;
|
166 |
+
|
167 |
+
void setSDPUseMemEfficient(bool);
|
168 |
+
bool userEnabledMemEfficientSDP() const;
|
169 |
+
|
170 |
+
void setSDPUseMath(bool);
|
171 |
+
bool userEnabledMathSDP() const;
|
172 |
+
|
173 |
+
at::LinalgBackend linalgPreferredBackend() const;
|
174 |
+
void setLinalgPreferredBackend(at::LinalgBackend);
|
175 |
+
|
176 |
+
// Note [Enabling Deterministic Operations]
|
177 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
178 |
+
// Operations in PyTorch that normally act nondeterministically, but have an
|
179 |
+
// alternate deterministic implementation, should satisfy the following
|
180 |
+
// requirements:
|
181 |
+
//
|
182 |
+
// * Include this comment: "See Note [Enabling Deterministic Operations]"
|
183 |
+
//
|
184 |
+
// * Check the value of `at::globalContext().deterministicAlgorithms()` to
|
185 |
+
// toggle
|
186 |
+
// between nondeterministic and deterministic implementations.
|
187 |
+
//
|
188 |
+
// * Have an entry in the list of PyTorch operations that toggle between
|
189 |
+
// nondeterministic
|
190 |
+
// and deterministic implementations, in the docstring of
|
191 |
+
// `use_deterministic_algorithms()` in torch/__init__.py
|
192 |
+
//
|
193 |
+
// `example_func()` below shows an example of toggling between
|
194 |
+
// nondeterministic and deterministic implementations:
|
195 |
+
//
|
196 |
+
// void example_func() {
|
197 |
+
// // See Note [Enabling Deterministic Operations]
|
198 |
+
// if (at::globalContext().deterministicAlgorithms()) {
|
199 |
+
// example_func_deterministic();
|
200 |
+
// } else {
|
201 |
+
// example_func_nondeterministic();
|
202 |
+
// }
|
203 |
+
// }
|
204 |
+
|
205 |
+
bool deterministicAlgorithms() const;
|
206 |
+
bool deterministicAlgorithmsWarnOnly() const;
|
207 |
+
void setDeterministicAlgorithms(bool, bool);
|
208 |
+
bool deterministicFillUninitializedMemory() const;
|
209 |
+
void setDeterministicFillUninitializedMemory(bool);
|
210 |
+
|
211 |
+
// Note [Writing Nondeterministic Operations]
|
212 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
213 |
+
// Operations in PyTorch that act nondeterministically and do not have an
|
214 |
+
// alternate deterministic implementation should satisfy the following
|
215 |
+
// requirements:
|
216 |
+
//
|
217 |
+
// * Include this comment: "See Note [Writing Nondeterministic Operations]"
|
218 |
+
//
|
219 |
+
// * Include a comment explaining why the operation is nondeterministic.
|
220 |
+
//
|
221 |
+
// * Throw an error when `Context::deterministicAlgorithms()` is true. Most
|
222 |
+
// of the time, this should be accomplished by calling
|
223 |
+
// `at::globalContext().alertNotDeterminstic()`. However, if the
|
224 |
+
// nondeterministic behavior is caused by the CuBLAS workspace
|
225 |
+
// configuration in CUDA >= 10.2,
|
226 |
+
// `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
|
227 |
+
// called instead (in this case, a comment explaining why the operation is
|
228 |
+
// nondeterministic is not necessary). See below for details on these
|
229 |
+
// methods.
|
230 |
+
//
|
231 |
+
// * Have an entry in the list of nondeterministic PyTorch operations in the
|
232 |
+
// docstring of `use_deterministic_algorithms()` in torch/__init__.py
|
233 |
+
//
|
234 |
+
// * Have a test function in `test/test_torch.py` whose name begins with
|
235 |
+
// `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace
|
236 |
+
// configuration is the reason for nondeterminism, the operation should be
|
237 |
+
// included in the `test_cublas_config_nondeterministic_alert` test. Any new
|
238 |
+
// tests should ideally follow a pattern similar to the existing ones.
|
239 |
+
//
|
240 |
+
// `example_func()` below shows an example of the comments and error-throwing
|
241 |
+
// code for a nondeterministic operation:
|
242 |
+
//
|
243 |
+
// void example_func() {
|
244 |
+
// // See Note [Writing Nondeterministic Operations]
|
245 |
+
// // Nondeterministic because <reason>
|
246 |
+
// at::globalContext().alertNondeterministic("example_func");
|
247 |
+
// ...
|
248 |
+
// }
|
249 |
+
|
250 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true
|
251 |
+
static void alertNotDeterministic(c10::string_view const& caller);
|
252 |
+
|
253 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true, CUDA
|
254 |
+
// >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or
|
255 |
+
// ":4096:8". For more details:
|
256 |
+
// https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
|
257 |
+
void alertCuBLASConfigNotDeterministic() const;
|
258 |
+
|
259 |
+
void setFloat32MatmulPrecision(const std::string& s);
|
260 |
+
bool allowTF32CuDNN() const;
|
261 |
+
void setAllowTF32CuDNN(bool);
|
262 |
+
bool allowTF32CuBLAS() const;
|
263 |
+
void setAllowTF32CuBLAS(bool);
|
264 |
+
Float32MatmulPrecision float32MatmulPrecision() const;
|
265 |
+
void setFloat32MatmulPrecision(Float32MatmulPrecision p);
|
266 |
+
bool allowFP16ReductionCuBLAS() const;
|
267 |
+
void setAllowFP16ReductionCuBLAS(bool);
|
268 |
+
bool allowBF16ReductionCuBLAS() const;
|
269 |
+
void setAllowBF16ReductionCuBLAS(bool);
|
270 |
+
at::QEngine qEngine() const;
|
271 |
+
void setQEngine(at::QEngine e);
|
272 |
+
static const std::vector<at::QEngine>& supportedQEngines();
|
273 |
+
static bool isXNNPACKAvailable();
|
274 |
+
void setCheckSparseTensorInvariants(bool e);
|
275 |
+
bool checkSparseTensorInvariants() const;
|
276 |
+
// This method is used to release the original weight after pre-packing.
|
277 |
+
// It should be called once before loading/running the model.
|
278 |
+
// NB: By default it is set to true for mobile builds.
|
279 |
+
void setReleaseWeightsWhenPrepacking(bool e);
|
280 |
+
bool releaseWeightsWhenPrepacking() const;
|
281 |
+
|
282 |
+
void setDisplayVmapFallbackWarnings(bool enabled);
|
283 |
+
bool areVmapFallbackWarningsEnabled() const;
|
284 |
+
|
285 |
+
void setDefaultMobileCPUAllocator();
|
286 |
+
void unsetDefaultMobileCPUAllocator();
|
287 |
+
|
288 |
+
private:
|
289 |
+
void initCUDAIfNeeded(c10::DeviceType p) {
|
290 |
+
if (p == c10::DeviceType::CUDA) {
|
291 |
+
lazyInitCUDA();
|
292 |
+
}
|
293 |
+
}
|
294 |
+
void initHIPIfNeeded(c10::DeviceType p) {
|
295 |
+
if (p == c10::DeviceType::HIP) {
|
296 |
+
lazyInitHIP();
|
297 |
+
}
|
298 |
+
}
|
299 |
+
static bool checkCuBLASConfigDeterministic();
|
300 |
+
c10::once_flag thc_init;
|
301 |
+
c10::once_flag thh_init;
|
302 |
+
bool enabled_cudnn = true;
|
303 |
+
bool deterministic_cudnn = false;
|
304 |
+
bool _deterministic_algorithms = false;
|
305 |
+
bool _deterministic_algorithms_warn_only = false;
|
306 |
+
bool _deterministic_fill_uninitialized_memory = true;
|
307 |
+
bool enabled_flashSDP = true;
|
308 |
+
bool enabled_mem_efficientSDP = true;
|
309 |
+
bool enabled_mathSDP = true;
|
310 |
+
#ifdef USE_ROCM
|
311 |
+
bool benchmark_cudnn = true;
|
312 |
+
#else
|
313 |
+
bool benchmark_cudnn = false;
|
314 |
+
#endif
|
315 |
+
Float32MatmulPrecision float32_matmul_precision =
|
316 |
+
c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true
|
317 |
+
? at::Float32MatmulPrecision::HIGH
|
318 |
+
: at::Float32MatmulPrecision::HIGHEST;
|
319 |
+
int benchmark_limit_cudnn = 10;
|
320 |
+
bool allow_tf32_cudnn = true;
|
321 |
+
bool allow_fp16_reduction_cublas = true;
|
322 |
+
bool allow_bf16_reduction_cublas = true;
|
323 |
+
bool enabled_mkldnn = true;
|
324 |
+
at::LinalgBackend linalg_preferred_backend =
|
325 |
+
c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true
|
326 |
+
? at::LinalgBackend::Cusolver
|
327 |
+
: at::LinalgBackend::Default;
|
328 |
+
#ifdef C10_MOBILE
|
329 |
+
bool release_original_weights = true;
|
330 |
+
#else
|
331 |
+
bool release_original_weights = false;
|
332 |
+
#endif
|
333 |
+
bool display_vmap_fallback_warnings_ = false;
|
334 |
+
c10::optional<at::QEngine> quantized_engine = c10::nullopt;
|
335 |
+
bool enable_sparse_tensor_invariant_checks = false;
|
336 |
+
|
337 |
+
Allocator* prev_allocator_ptr_{nullptr};
|
338 |
+
};
|
339 |
+
|
340 |
+
TORCH_API Context& globalContext();
|
341 |
+
|
342 |
+
static inline void init() {
|
343 |
+
globalContext();
|
344 |
+
}
|
345 |
+
|
346 |
+
TORCH_API Allocator* getCPUAllocator();
|
347 |
+
|
348 |
+
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
|
349 |
+
Backend p,
|
350 |
+
ScalarType s) {
|
351 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
352 |
+
p, s);
|
353 |
+
}
|
354 |
+
|
355 |
+
static inline DeprecatedTypeProperties& CPU(ScalarType s) {
|
356 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
357 |
+
Backend::CPU, s);
|
358 |
+
}
|
359 |
+
|
360 |
+
static inline DeprecatedTypeProperties& CUDA(ScalarType s) {
|
361 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
362 |
+
Backend::CUDA, s);
|
363 |
+
}
|
364 |
+
|
365 |
+
static inline DeprecatedTypeProperties& HIP(ScalarType s) {
|
366 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
367 |
+
Backend::HIP, s);
|
368 |
+
}
|
369 |
+
|
370 |
+
static inline DeprecatedTypeProperties& MPS(ScalarType s) {
|
371 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
372 |
+
Backend::MPS, s);
|
373 |
+
}
|
374 |
+
|
375 |
+
static inline bool hasCUDA() {
|
376 |
+
return globalContext().hasCUDA();
|
377 |
+
}
|
378 |
+
|
379 |
+
static inline bool hasMTIA() {
|
380 |
+
return globalContext().hasMTIA();
|
381 |
+
}
|
382 |
+
|
383 |
+
static inline bool hasHIP() {
|
384 |
+
return globalContext().hasHIP();
|
385 |
+
}
|
386 |
+
|
387 |
+
static inline bool hasIPU() {
|
388 |
+
return globalContext().hasIPU();
|
389 |
+
}
|
390 |
+
|
391 |
+
static inline bool hasXLA() {
|
392 |
+
return globalContext().hasXLA();
|
393 |
+
}
|
394 |
+
|
395 |
+
static inline bool hasMPS() {
|
396 |
+
return globalContext().hasMPS();
|
397 |
+
}
|
398 |
+
|
399 |
+
static inline bool hasORT() {
|
400 |
+
return globalContext().hasORT();
|
401 |
+
}
|
402 |
+
|
403 |
+
static inline bool hasXPU() {
|
404 |
+
return globalContext().hasXPU();
|
405 |
+
}
|
406 |
+
|
407 |
+
// Despite its name, this function returns the number of *CUDA* GPUs.
|
408 |
+
static inline size_t getNumGPUs() {
|
409 |
+
// WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS
|
410 |
+
// FUNCTION. If you are interested in interrogating the number of
|
411 |
+
// devices for a specific device type, add that function to the
|
412 |
+
// relevant library (e.g., similar to at::cuda::device_count())
|
413 |
+
if (hasCUDA() && hasHIP()) {
|
414 |
+
throw std::runtime_error(
|
415 |
+
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
|
416 |
+
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
|
417 |
+
"means HIP. Rebuild PyTorch with one or the other disabled.");
|
418 |
+
} else if (hasCUDA()) {
|
419 |
+
return detail::getCUDAHooks().getNumGPUs();
|
420 |
+
} else if (hasHIP()) {
|
421 |
+
return detail::getHIPHooks().getNumGPUs();
|
422 |
+
} else {
|
423 |
+
return 0;
|
424 |
+
}
|
425 |
+
}
|
426 |
+
|
427 |
+
static inline bool hasOpenMP() {
|
428 |
+
return globalContext().hasOpenMP();
|
429 |
+
}
|
430 |
+
|
431 |
+
static inline bool hasMKL() {
|
432 |
+
return globalContext().hasMKL();
|
433 |
+
}
|
434 |
+
|
435 |
+
static inline bool hasLAPACK() {
|
436 |
+
return globalContext().hasLAPACK();
|
437 |
+
}
|
438 |
+
|
439 |
+
static inline bool hasMAGMA() {
|
440 |
+
return globalContext().hasMAGMA();
|
441 |
+
}
|
442 |
+
|
443 |
+
static inline bool hasMKLDNN() {
|
444 |
+
return globalContext().hasMKLDNN();
|
445 |
+
}
|
446 |
+
|
447 |
+
static inline void manual_seed(uint64_t seed) {
|
448 |
+
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
|
449 |
+
{
|
450 |
+
// See Note [Acquire lock when using random generators]
|
451 |
+
std::lock_guard<std::mutex> lock(gen.mutex());
|
452 |
+
gen.set_current_seed(seed);
|
453 |
+
}
|
454 |
+
// NB: Sometimes we build with CUDA, but we don't have any GPUs
|
455 |
+
// available. In that case, we must not seed CUDA; it will fail!
|
456 |
+
const auto cuda_num_gpus = detail::getCUDAHooks().getNumGPUs();
|
457 |
+
if (hasCUDA() && cuda_num_gpus > 0) {
|
458 |
+
for (const auto i : c10::irange(cuda_num_gpus)) {
|
459 |
+
auto cuda_gen = globalContext().defaultGenerator(
|
460 |
+
Device(at::kCUDA, static_cast<c10::DeviceIndex>(i)));
|
461 |
+
{
|
462 |
+
// See Note [Acquire lock when using random generators]
|
463 |
+
std::lock_guard<std::mutex> lock(cuda_gen.mutex());
|
464 |
+
cuda_gen.set_current_seed(seed);
|
465 |
+
}
|
466 |
+
}
|
467 |
+
}
|
468 |
+
|
469 |
+
const auto xpu_num_gpus = detail::getXPUHooks().getNumGPUs();
|
470 |
+
if (hasXPU() && xpu_num_gpus > 0) {
|
471 |
+
for (const auto i : c10::irange(xpu_num_gpus)) {
|
472 |
+
auto xpu_gen = globalContext().defaultGenerator(
|
473 |
+
Device(at::kXPU, static_cast<c10::DeviceIndex>(i)));
|
474 |
+
{
|
475 |
+
// See Note [Acquire lock when using random generators]
|
476 |
+
std::lock_guard<std::mutex> lock(xpu_gen.mutex());
|
477 |
+
xpu_gen.set_current_seed(seed);
|
478 |
+
}
|
479 |
+
}
|
480 |
+
}
|
481 |
+
|
482 |
+
if (hasMPS()) {
|
483 |
+
auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
|
484 |
+
// See Note [Acquire lock when using random generators]
|
485 |
+
std::lock_guard<std::mutex> lock(mps_gen.mutex());
|
486 |
+
mps_gen.set_current_seed(seed);
|
487 |
+
}
|
488 |
+
}
|
489 |
+
|
490 |
+
// When the global flag `allow_tf32` is set to true, cuBLAS handles are
|
491 |
+
// automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH.
|
492 |
+
// For some operators, such as addmv, TF32 offers no performance improvement
|
493 |
+
// but causes precision loss. To help this case, this class implements
|
494 |
+
// a RAII guard that can be used to quickly disable TF32 within its scope.
|
495 |
+
//
|
496 |
+
// Usage:
|
497 |
+
// NoTF32Guard disable_tf32;
|
498 |
+
struct TORCH_API NoTF32Guard {
|
499 |
+
NoTF32Guard();
|
500 |
+
~NoTF32Guard();
|
501 |
+
static bool should_disable_tf32();
|
502 |
+
|
503 |
+
private:
|
504 |
+
bool changed = false;
|
505 |
+
};
|
506 |
+
|
507 |
+
#ifdef USE_ROCM
|
508 |
+
struct TORCH_API ROCmBackwardPassGuard {
|
509 |
+
ROCmBackwardPassGuard();
|
510 |
+
~ROCmBackwardPassGuard();
|
511 |
+
static bool is_backward_pass();
|
512 |
+
|
513 |
+
private:
|
514 |
+
static thread_local bool is_backward_pass_;
|
515 |
+
};
|
516 |
+
#endif
|
517 |
+
|
518 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ExpandUtils.h
ADDED
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
4 |
+
#include <ATen/Functions.h>
|
5 |
+
#else
|
6 |
+
#include <ATen/ops/view.h>
|
7 |
+
#include <ATen/ops/view_copy.h>
|
8 |
+
#endif
|
9 |
+
|
10 |
+
#include <ATen/Tensor.h>
|
11 |
+
#include <ATen/core/DimVector.h>
|
12 |
+
#include <c10/util/Exception.h>
|
13 |
+
#include <c10/util/MaybeOwned.h>
|
14 |
+
#include <c10/util/irange.h>
|
15 |
+
|
16 |
+
#include <functional>
|
17 |
+
#include <sstream>
|
18 |
+
#include <tuple>
|
19 |
+
#include <utility>
|
20 |
+
|
21 |
+
namespace at {
|
22 |
+
|
23 |
+
TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
24 |
+
TORCH_API std::vector<SymInt> infer_size_symint(
|
25 |
+
SymIntArrayRef a,
|
26 |
+
SymIntArrayRef b);
|
27 |
+
TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b);
|
28 |
+
TORCH_API SymDimVector
|
29 |
+
infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b);
|
30 |
+
|
31 |
+
// Named type instead of a pair/tuple so that we can be sure to
|
32 |
+
// construct the vectors in place and get NRVO.
|
33 |
+
template <typename Container>
|
34 |
+
struct InferExpandGeometryResult {
|
35 |
+
Container sizes;
|
36 |
+
Container strides;
|
37 |
+
explicit InferExpandGeometryResult(size_t ndim)
|
38 |
+
: sizes(ndim), strides(ndim) {}
|
39 |
+
explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim)
|
40 |
+
: sizes(sizes_.begin(), sizes_.end()), strides(ndim) {}
|
41 |
+
};
|
42 |
+
|
43 |
+
TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
44 |
+
inferExpandGeometry(
|
45 |
+
IntArrayRef tensor_sizes,
|
46 |
+
IntArrayRef tensor_strides,
|
47 |
+
IntArrayRef sizes);
|
48 |
+
|
49 |
+
TORCH_API InferExpandGeometryResult<DimVector> inferExpandGeometry_dimvector(
|
50 |
+
IntArrayRef tensor_sizes,
|
51 |
+
IntArrayRef tensor_strides,
|
52 |
+
IntArrayRef sizes);
|
53 |
+
|
54 |
+
TORCH_API std::vector<int64_t> infer_dense_strides(
|
55 |
+
IntArrayRef tensor_sizes,
|
56 |
+
IntArrayRef tensor_strides);
|
57 |
+
|
58 |
+
// True if input shapes are expandable
|
59 |
+
// NOTE: infer_size did a similar check, please keep them sync if change is
|
60 |
+
// needed
|
61 |
+
inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) {
|
62 |
+
size_t ndim1 = shape1.size();
|
63 |
+
size_t ndim2 = shape2.size();
|
64 |
+
size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2;
|
65 |
+
|
66 |
+
for (int64_t i = static_cast<int64_t>(ndim) - 1; i >= 0; --i) {
|
67 |
+
if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 ||
|
68 |
+
shape2[ndim2] == 1) {
|
69 |
+
continue;
|
70 |
+
}
|
71 |
+
return false;
|
72 |
+
}
|
73 |
+
return true;
|
74 |
+
}
|
75 |
+
|
76 |
+
// avoid copy-construction of Tensor by using a reference_wrapper.
|
77 |
+
inline void check_defined(
|
78 |
+
std::initializer_list<std::reference_wrapper<const Tensor>> tensors,
|
79 |
+
const char* api_name) {
|
80 |
+
for (auto& t : tensors) {
|
81 |
+
if (!t.get().defined()) {
|
82 |
+
AT_ERROR(api_name, "(...) called with an undefined Tensor");
|
83 |
+
}
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
// NOTE [ ExpandUtils Borrowing ]
|
88 |
+
//
|
89 |
+
// Functions in ExpandUtils return `c10::MaybeOwned<Tensor>` because
|
90 |
+
// expansion may not actually be needed, in which case we can improve
|
91 |
+
// efficiency by returning
|
92 |
+
// `c10::MaybeOwned<Tensor>::borrowed(to_expand)`. However, this means
|
93 |
+
// that you need to be careful: the returned `c10::MaybeOwned<Tensor>`
|
94 |
+
// must not outlive the original `Tensor` object that `to_expand`
|
95 |
+
// referred to! The deleted rvalue reference overloads of these
|
96 |
+
// functions help with this by preventing trivial use of a temporary
|
97 |
+
// resulting from a function call, but it is still possible to make a
|
98 |
+
// mistake.
|
99 |
+
|
100 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
101 |
+
const Tensor& tensor,
|
102 |
+
const Tensor& to_expand) {
|
103 |
+
if (tensor.sym_sizes().equals(to_expand.sym_sizes())) {
|
104 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
105 |
+
}
|
106 |
+
return c10::MaybeOwned<Tensor>::owned(
|
107 |
+
to_expand.expand_symint(tensor.sym_sizes()));
|
108 |
+
}
|
109 |
+
|
110 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
111 |
+
const Tensor& tensor,
|
112 |
+
Tensor&& to_expand) = delete;
|
113 |
+
|
114 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
115 |
+
const Tensor& tensor,
|
116 |
+
const Tensor& to_expand,
|
117 |
+
const char* api_name) {
|
118 |
+
check_defined({tensor, to_expand}, api_name);
|
119 |
+
return expand_inplace(tensor, to_expand);
|
120 |
+
}
|
121 |
+
|
122 |
+
inline c10::MaybeOwned<Tensor> expand_inplace(
|
123 |
+
const Tensor& tensor,
|
124 |
+
Tensor&& to_expand,
|
125 |
+
const char* api_name) = delete;
|
126 |
+
|
127 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
128 |
+
expand_inplace(
|
129 |
+
const Tensor& tensor,
|
130 |
+
const Tensor& to_expand1,
|
131 |
+
const Tensor& to_expand2) {
|
132 |
+
if (tensor.sizes().equals(to_expand1.sizes()) &&
|
133 |
+
tensor.sizes().equals((to_expand2.sizes()))) {
|
134 |
+
return std::make_tuple(
|
135 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
136 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
137 |
+
}
|
138 |
+
|
139 |
+
return std::make_tuple(
|
140 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(tensor.sizes())),
|
141 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(tensor.sizes())));
|
142 |
+
}
|
143 |
+
|
144 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
145 |
+
expand_inplace(
|
146 |
+
const Tensor& tensor,
|
147 |
+
Tensor&& to_expand1,
|
148 |
+
const Tensor& to_expand2) = delete;
|
149 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
150 |
+
expand_inplace(
|
151 |
+
const Tensor& tensor,
|
152 |
+
const Tensor& to_expand1,
|
153 |
+
Tensor&& to_expand2) = delete;
|
154 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
155 |
+
expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) =
|
156 |
+
delete;
|
157 |
+
|
158 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
159 |
+
expand_inplace(
|
160 |
+
const Tensor& tensor,
|
161 |
+
const Tensor& to_expand1,
|
162 |
+
const Tensor& to_expand2,
|
163 |
+
const char* api_name) {
|
164 |
+
check_defined({tensor, to_expand1, to_expand2}, api_name);
|
165 |
+
return expand_inplace(tensor, to_expand1, to_expand2);
|
166 |
+
}
|
167 |
+
|
168 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
169 |
+
expand_inplace(
|
170 |
+
const Tensor& tensor,
|
171 |
+
Tensor&& to_expand1,
|
172 |
+
const Tensor& to_expand2,
|
173 |
+
const char* api_name) = delete;
|
174 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
175 |
+
expand_inplace(
|
176 |
+
const Tensor& tensor,
|
177 |
+
const Tensor& to_expand1,
|
178 |
+
Tensor&& to_expand2,
|
179 |
+
const char* api_name) = delete;
|
180 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
181 |
+
expand_inplace(
|
182 |
+
const Tensor& tensor,
|
183 |
+
Tensor&& to_expand1,
|
184 |
+
Tensor&& to_expand2,
|
185 |
+
const char* api_name) = delete;
|
186 |
+
|
187 |
+
// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation.
|
188 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
189 |
+
expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) {
|
190 |
+
auto s1 = to_expand1.sym_sizes();
|
191 |
+
auto s2 = to_expand2.sym_sizes();
|
192 |
+
if (s1.equals(s2)) {
|
193 |
+
return std::make_tuple(
|
194 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
195 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2));
|
196 |
+
}
|
197 |
+
|
198 |
+
auto expanded_size = infer_size_symdimvector(s1, s2);
|
199 |
+
return std::make_tuple(
|
200 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand_symint(expanded_size)),
|
201 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand_symint(expanded_size)));
|
202 |
+
}
|
203 |
+
|
204 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
205 |
+
expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete;
|
206 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
207 |
+
expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete;
|
208 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
209 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete;
|
210 |
+
|
211 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
212 |
+
expand_outplace(
|
213 |
+
const Tensor& to_expand1,
|
214 |
+
const Tensor& to_expand2,
|
215 |
+
const char* api_name) {
|
216 |
+
check_defined({to_expand1, to_expand2}, api_name);
|
217 |
+
return expand_outplace(to_expand1, to_expand2);
|
218 |
+
}
|
219 |
+
|
220 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
221 |
+
expand_outplace(
|
222 |
+
Tensor&& to_expand1,
|
223 |
+
const Tensor& to_expand2,
|
224 |
+
const char* api_name) = delete;
|
225 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
226 |
+
expand_outplace(
|
227 |
+
const Tensor& to_expand1,
|
228 |
+
Tensor&& to_expand2,
|
229 |
+
const char* api_name) = delete;
|
230 |
+
inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>>
|
231 |
+
expand_outplace(
|
232 |
+
Tensor&& to_expand1,
|
233 |
+
Tensor&& to_expand2,
|
234 |
+
const char* api_name) = delete;
|
235 |
+
|
236 |
+
inline std::tuple<
|
237 |
+
c10::MaybeOwned<Tensor>,
|
238 |
+
c10::MaybeOwned<Tensor>,
|
239 |
+
c10::MaybeOwned<Tensor>>
|
240 |
+
expand_outplace(
|
241 |
+
const Tensor& to_expand1,
|
242 |
+
const Tensor& to_expand2,
|
243 |
+
const Tensor& to_expand3) {
|
244 |
+
if (to_expand1.sizes().equals(to_expand2.sizes()) &&
|
245 |
+
to_expand1.sizes().equals(to_expand3.sizes())) {
|
246 |
+
return std::make_tuple(
|
247 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand1),
|
248 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand2),
|
249 |
+
c10::MaybeOwned<Tensor>::borrowed(to_expand3));
|
250 |
+
}
|
251 |
+
|
252 |
+
auto expanded_size12 =
|
253 |
+
infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes());
|
254 |
+
auto expanded_size =
|
255 |
+
infer_size_dimvector(expanded_size12, to_expand3.sizes());
|
256 |
+
return std::make_tuple(
|
257 |
+
c10::MaybeOwned<Tensor>::owned(to_expand1.expand(expanded_size)),
|
258 |
+
c10::MaybeOwned<Tensor>::owned(to_expand2.expand(expanded_size)),
|
259 |
+
c10::MaybeOwned<Tensor>::owned(to_expand3.expand(expanded_size)));
|
260 |
+
}
|
261 |
+
|
262 |
+
inline std::tuple<
|
263 |
+
c10::MaybeOwned<Tensor>,
|
264 |
+
c10::MaybeOwned<Tensor>,
|
265 |
+
c10::MaybeOwned<Tensor>>
|
266 |
+
expand_outplace(
|
267 |
+
Tensor&& to_expand1,
|
268 |
+
const Tensor& to_expand2,
|
269 |
+
const Tensor& to_expand3) = delete;
|
270 |
+
inline std::tuple<
|
271 |
+
c10::MaybeOwned<Tensor>,
|
272 |
+
c10::MaybeOwned<Tensor>,
|
273 |
+
c10::MaybeOwned<Tensor>>
|
274 |
+
expand_outplace(
|
275 |
+
const Tensor& to_expand1,
|
276 |
+
Tensor&& to_expand2,
|
277 |
+
const Tensor& to_expand3) = delete;
|
278 |
+
inline std::tuple<
|
279 |
+
c10::MaybeOwned<Tensor>,
|
280 |
+
c10::MaybeOwned<Tensor>,
|
281 |
+
c10::MaybeOwned<Tensor>>
|
282 |
+
expand_outplace(
|
283 |
+
Tensor&& to_expand1,
|
284 |
+
Tensor&& to_expand2,
|
285 |
+
const Tensor& to_expand3) = delete;
|
286 |
+
inline std::tuple<
|
287 |
+
c10::MaybeOwned<Tensor>,
|
288 |
+
c10::MaybeOwned<Tensor>,
|
289 |
+
c10::MaybeOwned<Tensor>>
|
290 |
+
expand_outplace(
|
291 |
+
const Tensor& to_expand1,
|
292 |
+
const Tensor& to_expand2,
|
293 |
+
Tensor&& to_expand3) = delete;
|
294 |
+
inline std::tuple<
|
295 |
+
c10::MaybeOwned<Tensor>,
|
296 |
+
c10::MaybeOwned<Tensor>,
|
297 |
+
c10::MaybeOwned<Tensor>>
|
298 |
+
expand_outplace(
|
299 |
+
Tensor&& to_expand1,
|
300 |
+
const Tensor& to_expand2,
|
301 |
+
Tensor&& to_expand3) = delete;
|
302 |
+
inline std::tuple<
|
303 |
+
c10::MaybeOwned<Tensor>,
|
304 |
+
c10::MaybeOwned<Tensor>,
|
305 |
+
c10::MaybeOwned<Tensor>>
|
306 |
+
expand_outplace(
|
307 |
+
const Tensor& to_expand1,
|
308 |
+
Tensor&& to_expand2,
|
309 |
+
Tensor&& to_expand3) = delete;
|
310 |
+
inline std::tuple<
|
311 |
+
c10::MaybeOwned<Tensor>,
|
312 |
+
c10::MaybeOwned<Tensor>,
|
313 |
+
c10::MaybeOwned<Tensor>>
|
314 |
+
expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) =
|
315 |
+
delete;
|
316 |
+
|
317 |
+
inline std::tuple<
|
318 |
+
c10::MaybeOwned<Tensor>,
|
319 |
+
c10::MaybeOwned<Tensor>,
|
320 |
+
c10::MaybeOwned<Tensor>>
|
321 |
+
expand_outplace(
|
322 |
+
const Tensor& to_expand1,
|
323 |
+
const Tensor& to_expand2,
|
324 |
+
const Tensor& to_expand3,
|
325 |
+
const char* api_name) {
|
326 |
+
check_defined({to_expand1, to_expand2, to_expand3}, api_name);
|
327 |
+
return expand_outplace(to_expand1, to_expand2, to_expand3);
|
328 |
+
}
|
329 |
+
|
330 |
+
inline std::tuple<
|
331 |
+
c10::MaybeOwned<Tensor>,
|
332 |
+
c10::MaybeOwned<Tensor>,
|
333 |
+
c10::MaybeOwned<Tensor>>
|
334 |
+
expand_outplace(
|
335 |
+
Tensor&& to_expand1,
|
336 |
+
const Tensor& to_expand2,
|
337 |
+
const Tensor& to_expand3,
|
338 |
+
const char* api_name) = delete;
|
339 |
+
inline std::tuple<
|
340 |
+
c10::MaybeOwned<Tensor>,
|
341 |
+
c10::MaybeOwned<Tensor>,
|
342 |
+
c10::MaybeOwned<Tensor>>
|
343 |
+
expand_outplace(
|
344 |
+
const Tensor& to_expand1,
|
345 |
+
Tensor&& to_expand2,
|
346 |
+
const Tensor& to_expand3,
|
347 |
+
const char* api_name) = delete;
|
348 |
+
inline std::tuple<
|
349 |
+
c10::MaybeOwned<Tensor>,
|
350 |
+
c10::MaybeOwned<Tensor>,
|
351 |
+
c10::MaybeOwned<Tensor>>
|
352 |
+
expand_outplace(
|
353 |
+
Tensor&& to_expand1,
|
354 |
+
Tensor&& to_expand2,
|
355 |
+
const Tensor& to_expand3,
|
356 |
+
const char* api_name) = delete;
|
357 |
+
inline std::tuple<
|
358 |
+
c10::MaybeOwned<Tensor>,
|
359 |
+
c10::MaybeOwned<Tensor>,
|
360 |
+
c10::MaybeOwned<Tensor>>
|
361 |
+
expand_outplace(
|
362 |
+
const Tensor& to_expand1,
|
363 |
+
const Tensor& to_expand2,
|
364 |
+
Tensor&& to_expand3,
|
365 |
+
const char* api_name) = delete;
|
366 |
+
inline std::tuple<
|
367 |
+
c10::MaybeOwned<Tensor>,
|
368 |
+
c10::MaybeOwned<Tensor>,
|
369 |
+
c10::MaybeOwned<Tensor>>
|
370 |
+
expand_outplace(
|
371 |
+
Tensor&& to_expand1,
|
372 |
+
const Tensor& to_expand2,
|
373 |
+
Tensor&& to_expand3,
|
374 |
+
const char* api_name) = delete;
|
375 |
+
inline std::tuple<
|
376 |
+
c10::MaybeOwned<Tensor>,
|
377 |
+
c10::MaybeOwned<Tensor>,
|
378 |
+
c10::MaybeOwned<Tensor>>
|
379 |
+
expand_outplace(
|
380 |
+
const Tensor& to_expand1,
|
381 |
+
Tensor&& to_expand2,
|
382 |
+
Tensor&& to_expand3,
|
383 |
+
const char* api_name) = delete;
|
384 |
+
inline std::tuple<
|
385 |
+
c10::MaybeOwned<Tensor>,
|
386 |
+
c10::MaybeOwned<Tensor>,
|
387 |
+
c10::MaybeOwned<Tensor>>
|
388 |
+
expand_outplace(
|
389 |
+
Tensor&& to_expand1,
|
390 |
+
Tensor&& to_expand2,
|
391 |
+
Tensor&& to_expand3,
|
392 |
+
const char* api_name) = delete;
|
393 |
+
|
394 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
395 |
+
const Tensor& to_expand,
|
396 |
+
IntArrayRef sizes) {
|
397 |
+
if (to_expand.sizes().equals(sizes)) {
|
398 |
+
return c10::MaybeOwned<Tensor>::borrowed(to_expand);
|
399 |
+
}
|
400 |
+
|
401 |
+
return c10::MaybeOwned<Tensor>::owned(to_expand.expand(sizes));
|
402 |
+
}
|
403 |
+
|
404 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
405 |
+
Tensor&& to_expand,
|
406 |
+
IntArrayRef sizes) = delete;
|
407 |
+
|
408 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
409 |
+
const Tensor& to_expand,
|
410 |
+
IntArrayRef sizes,
|
411 |
+
const char* api_name) {
|
412 |
+
check_defined({to_expand}, api_name);
|
413 |
+
return expand_size(to_expand, sizes);
|
414 |
+
}
|
415 |
+
|
416 |
+
inline c10::MaybeOwned<Tensor> expand_size(
|
417 |
+
Tensor&& to_expand,
|
418 |
+
IntArrayRef sizes,
|
419 |
+
const char* api_name) = delete;
|
420 |
+
|
421 |
+
inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
|
422 |
+
// expands a list of Tensors; ignores undefined (null) tensors
|
423 |
+
bool first = true;
|
424 |
+
DimVector sizes;
|
425 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
426 |
+
if (!to_expand[i].defined()) {
|
427 |
+
continue;
|
428 |
+
} else if (first) {
|
429 |
+
sizes = to_expand[i].sizes();
|
430 |
+
first = false;
|
431 |
+
} else {
|
432 |
+
sizes = infer_size_dimvector(sizes, to_expand[i].sizes());
|
433 |
+
}
|
434 |
+
}
|
435 |
+
|
436 |
+
std::vector<Tensor> result(to_expand.size());
|
437 |
+
for (const auto i : c10::irange(to_expand.size())) {
|
438 |
+
if (!to_expand[i].defined()) {
|
439 |
+
continue;
|
440 |
+
} else if (to_expand[i].sizes().equals(sizes)) {
|
441 |
+
result[i] = to_expand[i];
|
442 |
+
} else {
|
443 |
+
result[i] = to_expand[i].expand(sizes);
|
444 |
+
}
|
445 |
+
}
|
446 |
+
return result;
|
447 |
+
}
|
448 |
+
|
449 |
+
template <typename T>
|
450 |
+
inline Tensor _sum_to(
|
451 |
+
Tensor tensor,
|
452 |
+
const c10::ArrayRef<T> shape,
|
453 |
+
bool always_return_non_view = false) {
|
454 |
+
if (shape.size() == 0) {
|
455 |
+
return tensor.sum();
|
456 |
+
}
|
457 |
+
|
458 |
+
auto sizes = at::symint::sizes<T>(tensor);
|
459 |
+
c10::SmallVector<int64_t, 8> reduce_dims;
|
460 |
+
const int64_t leading_dims = sizes.size() - shape.size();
|
461 |
+
for (const auto i : c10::irange(leading_dims)) {
|
462 |
+
reduce_dims.push_back(i);
|
463 |
+
}
|
464 |
+
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
|
465 |
+
if (shape[i - leading_dims] == 1 && sizes[i] != 1) {
|
466 |
+
reduce_dims.push_back(i);
|
467 |
+
}
|
468 |
+
}
|
469 |
+
|
470 |
+
if (!reduce_dims.empty()) {
|
471 |
+
tensor = tensor.sum(reduce_dims, /*keepdim=*/true);
|
472 |
+
}
|
473 |
+
|
474 |
+
if (always_return_non_view) {
|
475 |
+
// This is only actually used by the functionalization pass.
|
476 |
+
// We want to be able to guarantee that this function doesn't return a view
|
477 |
+
// of the input.
|
478 |
+
return leading_dims > 0 ? at::symint::view_copy<T>(tensor, shape)
|
479 |
+
: tensor.clone();
|
480 |
+
} else {
|
481 |
+
return leading_dims > 0 ? at::symint::view<T>(tensor, shape) : tensor;
|
482 |
+
}
|
483 |
+
}
|
484 |
+
|
485 |
+
inline Tensor sum_to(
|
486 |
+
Tensor tensor,
|
487 |
+
const c10::SymIntArrayRef shape,
|
488 |
+
bool always_return_non_view = false) {
|
489 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
490 |
+
}
|
491 |
+
|
492 |
+
// Sums `tensor` repeatedly to produce a tensor of shape `shape`.
|
493 |
+
// Precondition: is_expandable_to(shape, tensor.sizes()) must be true
|
494 |
+
inline Tensor sum_to(
|
495 |
+
Tensor tensor,
|
496 |
+
const IntArrayRef shape,
|
497 |
+
bool always_return_non_view = false) {
|
498 |
+
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
499 |
+
}
|
500 |
+
|
501 |
+
static inline bool is_expandable_to(
|
502 |
+
SymIntArrayRef shape,
|
503 |
+
c10::SymIntArrayRef desired) {
|
504 |
+
size_t ndim = shape.size();
|
505 |
+
size_t target_dim = desired.size();
|
506 |
+
if (ndim > target_dim) {
|
507 |
+
return false;
|
508 |
+
}
|
509 |
+
for (const auto i : c10::irange(ndim)) {
|
510 |
+
const auto& size = shape[ndim - i - 1];
|
511 |
+
const auto& target = desired[target_dim - i - 1];
|
512 |
+
if (size != target && size != 1) {
|
513 |
+
return false;
|
514 |
+
}
|
515 |
+
}
|
516 |
+
return true;
|
517 |
+
}
|
518 |
+
|
519 |
+
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
520 |
+
auto sym_shape = c10::SymIntArrayRef(
|
521 |
+
reinterpret_cast<const c10::SymInt*>(shape.data()), shape.size());
|
522 |
+
auto sym_desired = c10::SymIntArrayRef(
|
523 |
+
reinterpret_cast<const c10::SymInt*>(desired.data()), desired.size());
|
524 |
+
return is_expandable_to(sym_shape, sym_desired);
|
525 |
+
}
|
526 |
+
|
527 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/Exception.h>
|
4 |
+
|
5 |
+
#include <ostream>
|
6 |
+
#include <string>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
enum class LinalgBackend : int8_t { Default, Cusolver, Magma };
|
11 |
+
|
12 |
+
inline std::string LinalgBackendToString(at::LinalgBackend backend) {
|
13 |
+
switch (backend) {
|
14 |
+
case LinalgBackend::Default:
|
15 |
+
return "at::LinalgBackend::Default";
|
16 |
+
case LinalgBackend::Cusolver:
|
17 |
+
return "at::LinalgBackend::Cusolver";
|
18 |
+
case LinalgBackend::Magma:
|
19 |
+
return "at::LinalgBackend::Magma";
|
20 |
+
default:
|
21 |
+
TORCH_CHECK(false, "Unknown linalg backend");
|
22 |
+
}
|
23 |
+
}
|
24 |
+
|
25 |
+
inline std::ostream& operator<<(
|
26 |
+
std::ostream& stream,
|
27 |
+
at::LinalgBackend backend) {
|
28 |
+
return stream << LinalgBackendToString(backend);
|
29 |
+
}
|
30 |
+
|
31 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h
ADDED
@@ -0,0 +1,1295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunctions.h
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
|
17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#include <c10/core/Scalar.h>
|
21 |
+
#include <c10/core/Storage.h>
|
22 |
+
#include <c10/core/TensorOptions.h>
|
23 |
+
#include <c10/util/Deprecated.h>
|
24 |
+
#include <c10/util/Optional.h>
|
25 |
+
#include <c10/core/QScheme.h>
|
26 |
+
#include <ATen/core/Reduction.h>
|
27 |
+
#include <ATen/core/Tensor.h>
|
28 |
+
#include <tuple>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
|
32 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
|
33 |
+
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
|
34 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
|
35 |
+
#include <ATen/ops/_add_batch_dim_native.h>
|
36 |
+
#include <ATen/ops/_add_relu_native.h>
|
37 |
+
#include <ATen/ops/_addmm_activation_native.h>
|
38 |
+
#include <ATen/ops/_aminmax_native.h>
|
39 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
|
40 |
+
#include <ATen/ops/_amp_update_scale_native.h>
|
41 |
+
#include <ATen/ops/_assert_async_native.h>
|
42 |
+
#include <ATen/ops/_assert_tensor_metadata_native.h>
|
43 |
+
#include <ATen/ops/_autocast_to_full_precision_native.h>
|
44 |
+
#include <ATen/ops/_autocast_to_reduced_precision_native.h>
|
45 |
+
#include <ATen/ops/_backward_native.h>
|
46 |
+
#include <ATen/ops/_batch_norm_impl_index_native.h>
|
47 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
|
48 |
+
#include <ATen/ops/_cast_Byte_native.h>
|
49 |
+
#include <ATen/ops/_cast_Char_native.h>
|
50 |
+
#include <ATen/ops/_cast_Double_native.h>
|
51 |
+
#include <ATen/ops/_cast_Float_native.h>
|
52 |
+
#include <ATen/ops/_cast_Half_native.h>
|
53 |
+
#include <ATen/ops/_cast_Int_native.h>
|
54 |
+
#include <ATen/ops/_cast_Long_native.h>
|
55 |
+
#include <ATen/ops/_cast_Short_native.h>
|
56 |
+
#include <ATen/ops/_cdist_backward_native.h>
|
57 |
+
#include <ATen/ops/_cdist_forward_native.h>
|
58 |
+
#include <ATen/ops/_cholesky_solve_helper_native.h>
|
59 |
+
#include <ATen/ops/_choose_qparams_per_tensor_native.h>
|
60 |
+
#include <ATen/ops/_coalesce_native.h>
|
61 |
+
#include <ATen/ops/_coalesced_native.h>
|
62 |
+
#include <ATen/ops/_compute_linear_combination_native.h>
|
63 |
+
#include <ATen/ops/_conj_native.h>
|
64 |
+
#include <ATen/ops/_conj_copy_native.h>
|
65 |
+
#include <ATen/ops/_conj_physical_native.h>
|
66 |
+
#include <ATen/ops/_conv_depthwise2d_native.h>
|
67 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
|
68 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
|
69 |
+
#include <ATen/ops/_convert_weight_to_int4pack_native.h>
|
70 |
+
#include <ATen/ops/_convolution_native.h>
|
71 |
+
#include <ATen/ops/_convolution_double_backward_native.h>
|
72 |
+
#include <ATen/ops/_convolution_mode_native.h>
|
73 |
+
#include <ATen/ops/_copy_from_native.h>
|
74 |
+
#include <ATen/ops/_copy_from_and_resize_native.h>
|
75 |
+
#include <ATen/ops/_cslt_compress_native.h>
|
76 |
+
#include <ATen/ops/_cslt_sparse_mm_native.h>
|
77 |
+
#include <ATen/ops/_ctc_loss_native.h>
|
78 |
+
#include <ATen/ops/_ctc_loss_backward_native.h>
|
79 |
+
#include <ATen/ops/_cudnn_ctc_loss_native.h>
|
80 |
+
#include <ATen/ops/_cudnn_init_dropout_state_native.h>
|
81 |
+
#include <ATen/ops/_cudnn_rnn_native.h>
|
82 |
+
#include <ATen/ops/_cudnn_rnn_backward_native.h>
|
83 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
|
84 |
+
#include <ATen/ops/_cufft_clear_plan_cache_native.h>
|
85 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_native.h>
|
86 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_native.h>
|
87 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_native.h>
|
88 |
+
#include <ATen/ops/_cummax_helper_native.h>
|
89 |
+
#include <ATen/ops/_cummin_helper_native.h>
|
90 |
+
#include <ATen/ops/_debug_has_internal_overlap_native.h>
|
91 |
+
#include <ATen/ops/_dimI_native.h>
|
92 |
+
#include <ATen/ops/_dimV_native.h>
|
93 |
+
#include <ATen/ops/_dim_arange_native.h>
|
94 |
+
#include <ATen/ops/_dirichlet_grad_native.h>
|
95 |
+
#include <ATen/ops/_efficient_attention_backward_native.h>
|
96 |
+
#include <ATen/ops/_efficient_attention_forward_native.h>
|
97 |
+
#include <ATen/ops/_efficientzerotensor_native.h>
|
98 |
+
#include <ATen/ops/_embedding_bag_native.h>
|
99 |
+
#include <ATen/ops/_embedding_bag_backward_native.h>
|
100 |
+
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
|
101 |
+
#include <ATen/ops/_embedding_bag_forward_only_native.h>
|
102 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
|
103 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
|
104 |
+
#include <ATen/ops/_empty_affine_quantized_native.h>
|
105 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
|
106 |
+
#include <ATen/ops/_euclidean_dist_native.h>
|
107 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
|
108 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
|
109 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
|
110 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
|
111 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
|
112 |
+
#include <ATen/ops/_fft_c2c_native.h>
|
113 |
+
#include <ATen/ops/_fft_c2r_native.h>
|
114 |
+
#include <ATen/ops/_fft_r2c_native.h>
|
115 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_native.h>
|
116 |
+
#include <ATen/ops/_flash_attention_backward_native.h>
|
117 |
+
#include <ATen/ops/_flash_attention_forward_native.h>
|
118 |
+
#include <ATen/ops/_foobar_native.h>
|
119 |
+
#include <ATen/ops/_foreach_abs_native.h>
|
120 |
+
#include <ATen/ops/_foreach_acos_native.h>
|
121 |
+
#include <ATen/ops/_foreach_add_native.h>
|
122 |
+
#include <ATen/ops/_foreach_addcdiv_native.h>
|
123 |
+
#include <ATen/ops/_foreach_addcmul_native.h>
|
124 |
+
#include <ATen/ops/_foreach_asin_native.h>
|
125 |
+
#include <ATen/ops/_foreach_atan_native.h>
|
126 |
+
#include <ATen/ops/_foreach_ceil_native.h>
|
127 |
+
#include <ATen/ops/_foreach_clamp_max_native.h>
|
128 |
+
#include <ATen/ops/_foreach_clamp_min_native.h>
|
129 |
+
#include <ATen/ops/_foreach_copy_native.h>
|
130 |
+
#include <ATen/ops/_foreach_cos_native.h>
|
131 |
+
#include <ATen/ops/_foreach_cosh_native.h>
|
132 |
+
#include <ATen/ops/_foreach_div_native.h>
|
133 |
+
#include <ATen/ops/_foreach_erf_native.h>
|
134 |
+
#include <ATen/ops/_foreach_erfc_native.h>
|
135 |
+
#include <ATen/ops/_foreach_exp_native.h>
|
136 |
+
#include <ATen/ops/_foreach_expm1_native.h>
|
137 |
+
#include <ATen/ops/_foreach_floor_native.h>
|
138 |
+
#include <ATen/ops/_foreach_frac_native.h>
|
139 |
+
#include <ATen/ops/_foreach_lerp_native.h>
|
140 |
+
#include <ATen/ops/_foreach_lgamma_native.h>
|
141 |
+
#include <ATen/ops/_foreach_log_native.h>
|
142 |
+
#include <ATen/ops/_foreach_log10_native.h>
|
143 |
+
#include <ATen/ops/_foreach_log1p_native.h>
|
144 |
+
#include <ATen/ops/_foreach_log2_native.h>
|
145 |
+
#include <ATen/ops/_foreach_maximum_native.h>
|
146 |
+
#include <ATen/ops/_foreach_minimum_native.h>
|
147 |
+
#include <ATen/ops/_foreach_mul_native.h>
|
148 |
+
#include <ATen/ops/_foreach_neg_native.h>
|
149 |
+
#include <ATen/ops/_foreach_norm_native.h>
|
150 |
+
#include <ATen/ops/_foreach_pow_native.h>
|
151 |
+
#include <ATen/ops/_foreach_reciprocal_native.h>
|
152 |
+
#include <ATen/ops/_foreach_round_native.h>
|
153 |
+
#include <ATen/ops/_foreach_sigmoid_native.h>
|
154 |
+
#include <ATen/ops/_foreach_sign_native.h>
|
155 |
+
#include <ATen/ops/_foreach_sin_native.h>
|
156 |
+
#include <ATen/ops/_foreach_sinh_native.h>
|
157 |
+
#include <ATen/ops/_foreach_sqrt_native.h>
|
158 |
+
#include <ATen/ops/_foreach_sub_native.h>
|
159 |
+
#include <ATen/ops/_foreach_tan_native.h>
|
160 |
+
#include <ATen/ops/_foreach_tanh_native.h>
|
161 |
+
#include <ATen/ops/_foreach_trunc_native.h>
|
162 |
+
#include <ATen/ops/_foreach_zero_native.h>
|
163 |
+
#include <ATen/ops/_functional_assert_async_native.h>
|
164 |
+
#include <ATen/ops/_functional_sym_constrain_range_native.h>
|
165 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_native.h>
|
166 |
+
#include <ATen/ops/_fused_adam_native.h>
|
167 |
+
#include <ATen/ops/_fused_adamw_native.h>
|
168 |
+
#include <ATen/ops/_fused_dropout_native.h>
|
169 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
|
170 |
+
#include <ATen/ops/_fused_sdp_choice_native.h>
|
171 |
+
#include <ATen/ops/_fw_primal_native.h>
|
172 |
+
#include <ATen/ops/_fw_primal_copy_native.h>
|
173 |
+
#include <ATen/ops/_gather_sparse_backward_native.h>
|
174 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h>
|
175 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h>
|
176 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
|
177 |
+
#include <ATen/ops/_has_same_storage_numel_native.h>
|
178 |
+
#include <ATen/ops/_histogramdd_bin_edges_native.h>
|
179 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
|
180 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
|
181 |
+
#include <ATen/ops/_index_put_impl_native.h>
|
182 |
+
#include <ATen/ops/_indices_native.h>
|
183 |
+
#include <ATen/ops/_indices_copy_native.h>
|
184 |
+
#include <ATen/ops/_int_mm_native.h>
|
185 |
+
#include <ATen/ops/_is_all_true_native.h>
|
186 |
+
#include <ATen/ops/_is_any_true_native.h>
|
187 |
+
#include <ATen/ops/_is_zerotensor_native.h>
|
188 |
+
#include <ATen/ops/_linalg_check_errors_native.h>
|
189 |
+
#include <ATen/ops/_linalg_det_native.h>
|
190 |
+
#include <ATen/ops/_linalg_eigh_native.h>
|
191 |
+
#include <ATen/ops/_linalg_slogdet_native.h>
|
192 |
+
#include <ATen/ops/_linalg_solve_ex_native.h>
|
193 |
+
#include <ATen/ops/_linalg_svd_native.h>
|
194 |
+
#include <ATen/ops/_local_scalar_dense_native.h>
|
195 |
+
#include <ATen/ops/_log_softmax_native.h>
|
196 |
+
#include <ATen/ops/_log_softmax_backward_data_native.h>
|
197 |
+
#include <ATen/ops/_logcumsumexp_native.h>
|
198 |
+
#include <ATen/ops/_lstm_mps_native.h>
|
199 |
+
#include <ATen/ops/_lu_with_info_native.h>
|
200 |
+
#include <ATen/ops/_make_dep_token_native.h>
|
201 |
+
#include <ATen/ops/_make_dual_native.h>
|
202 |
+
#include <ATen/ops/_make_dual_copy_native.h>
|
203 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
|
204 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
|
205 |
+
#include <ATen/ops/_masked_scale_native.h>
|
206 |
+
#include <ATen/ops/_masked_softmax_native.h>
|
207 |
+
#include <ATen/ops/_masked_softmax_backward_native.h>
|
208 |
+
#include <ATen/ops/_mixed_dtypes_linear_native.h>
|
209 |
+
#include <ATen/ops/_mkldnn_reshape_native.h>
|
210 |
+
#include <ATen/ops/_mkldnn_transpose_native.h>
|
211 |
+
#include <ATen/ops/_mps_convolution_native.h>
|
212 |
+
#include <ATen/ops/_mps_convolution_transpose_native.h>
|
213 |
+
#include <ATen/ops/_native_batch_norm_legit_native.h>
|
214 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
|
215 |
+
#include <ATen/ops/_native_multi_head_attention_native.h>
|
216 |
+
#include <ATen/ops/_neg_view_native.h>
|
217 |
+
#include <ATen/ops/_neg_view_copy_native.h>
|
218 |
+
#include <ATen/ops/_nested_from_padded_native.h>
|
219 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_native.h>
|
220 |
+
#include <ATen/ops/_nested_select_backward_native.h>
|
221 |
+
#include <ATen/ops/_nested_sum_backward_native.h>
|
222 |
+
#include <ATen/ops/_nested_tensor_from_mask_native.h>
|
223 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
|
224 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_native.h>
|
225 |
+
#include <ATen/ops/_nested_tensor_size_native.h>
|
226 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape_native.h>
|
227 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_native.h>
|
228 |
+
#include <ATen/ops/_nested_tensor_strides_native.h>
|
229 |
+
#include <ATen/ops/_nested_view_from_buffer_native.h>
|
230 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
|
231 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_native.h>
|
232 |
+
#include <ATen/ops/_nnpack_available_native.h>
|
233 |
+
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
|
234 |
+
#include <ATen/ops/_nnz_native.h>
|
235 |
+
#include <ATen/ops/_pack_padded_sequence_native.h>
|
236 |
+
#include <ATen/ops/_pack_padded_sequence_backward_native.h>
|
237 |
+
#include <ATen/ops/_pad_circular_native.h>
|
238 |
+
#include <ATen/ops/_pad_enum_native.h>
|
239 |
+
#include <ATen/ops/_pad_packed_sequence_native.h>
|
240 |
+
#include <ATen/ops/_pdist_backward_native.h>
|
241 |
+
#include <ATen/ops/_pdist_forward_native.h>
|
242 |
+
#include <ATen/ops/_pin_memory_native.h>
|
243 |
+
#include <ATen/ops/_prelu_kernel_native.h>
|
244 |
+
#include <ATen/ops/_prelu_kernel_backward_native.h>
|
245 |
+
#include <ATen/ops/_propagate_xla_data_native.h>
|
246 |
+
#include <ATen/ops/_remove_batch_dim_native.h>
|
247 |
+
#include <ATen/ops/_reshape_alias_native.h>
|
248 |
+
#include <ATen/ops/_reshape_alias_copy_native.h>
|
249 |
+
#include <ATen/ops/_reshape_copy_native.h>
|
250 |
+
#include <ATen/ops/_reshape_from_tensor_native.h>
|
251 |
+
#include <ATen/ops/_resize_output_native.h>
|
252 |
+
#include <ATen/ops/_rowwise_prune_native.h>
|
253 |
+
#include <ATen/ops/_sample_dirichlet_native.h>
|
254 |
+
#include <ATen/ops/_saturate_weight_to_fp16_native.h>
|
255 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
|
256 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_native.h>
|
257 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_native.h>
|
258 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_native.h>
|
259 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_native.h>
|
260 |
+
#include <ATen/ops/_scaled_mm_native.h>
|
261 |
+
#include <ATen/ops/_segment_reduce_backward_native.h>
|
262 |
+
#include <ATen/ops/_shape_as_tensor_native.h>
|
263 |
+
#include <ATen/ops/_slow_conv2d_backward_native.h>
|
264 |
+
#include <ATen/ops/_slow_conv2d_forward_native.h>
|
265 |
+
#include <ATen/ops/_sobol_engine_draw_native.h>
|
266 |
+
#include <ATen/ops/_sobol_engine_ff_native.h>
|
267 |
+
#include <ATen/ops/_sobol_engine_initialize_state_native.h>
|
268 |
+
#include <ATen/ops/_sobol_engine_scramble_native.h>
|
269 |
+
#include <ATen/ops/_softmax_native.h>
|
270 |
+
#include <ATen/ops/_softmax_backward_data_native.h>
|
271 |
+
#include <ATen/ops/_sparse_addmm_native.h>
|
272 |
+
#include <ATen/ops/_sparse_broadcast_to_native.h>
|
273 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
|
274 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
|
275 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h>
|
276 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h>
|
277 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
|
278 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
|
279 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
|
280 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_native.h>
|
281 |
+
#include <ATen/ops/_sparse_csr_prod_native.h>
|
282 |
+
#include <ATen/ops/_sparse_csr_sum_native.h>
|
283 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
|
284 |
+
#include <ATen/ops/_sparse_log_softmax_native.h>
|
285 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
|
286 |
+
#include <ATen/ops/_sparse_mask_projection_native.h>
|
287 |
+
#include <ATen/ops/_sparse_mm_native.h>
|
288 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
|
289 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
|
290 |
+
#include <ATen/ops/_sparse_semi_structured_linear_native.h>
|
291 |
+
#include <ATen/ops/_sparse_softmax_native.h>
|
292 |
+
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
|
293 |
+
#include <ATen/ops/_sparse_sparse_matmul_native.h>
|
294 |
+
#include <ATen/ops/_sparse_sum_native.h>
|
295 |
+
#include <ATen/ops/_sparse_sum_backward_native.h>
|
296 |
+
#include <ATen/ops/_spdiags_native.h>
|
297 |
+
#include <ATen/ops/_stack_native.h>
|
298 |
+
#include <ATen/ops/_standard_gamma_native.h>
|
299 |
+
#include <ATen/ops/_standard_gamma_grad_native.h>
|
300 |
+
#include <ATen/ops/_test_ambiguous_defaults_native.h>
|
301 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
|
302 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h>
|
303 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
|
304 |
+
#include <ATen/ops/_test_check_tensor_native.h>
|
305 |
+
#include <ATen/ops/_test_functorch_fallback_native.h>
|
306 |
+
#include <ATen/ops/_test_optional_filled_intlist_native.h>
|
307 |
+
#include <ATen/ops/_test_optional_floatlist_native.h>
|
308 |
+
#include <ATen/ops/_test_optional_intlist_native.h>
|
309 |
+
#include <ATen/ops/_test_serialization_subcmul_native.h>
|
310 |
+
#include <ATen/ops/_test_string_default_native.h>
|
311 |
+
#include <ATen/ops/_test_warn_in_autograd_native.h>
|
312 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h>
|
313 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
|
314 |
+
#include <ATen/ops/_thnn_fused_gru_cell_native.h>
|
315 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_native.h>
|
316 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
|
317 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
|
318 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
|
319 |
+
#include <ATen/ops/_to_copy_native.h>
|
320 |
+
#include <ATen/ops/_to_cpu_native.h>
|
321 |
+
#include <ATen/ops/_to_dense_native.h>
|
322 |
+
#include <ATen/ops/_to_sparse_native.h>
|
323 |
+
#include <ATen/ops/_to_sparse_bsc_native.h>
|
324 |
+
#include <ATen/ops/_to_sparse_bsr_native.h>
|
325 |
+
#include <ATen/ops/_to_sparse_csc_native.h>
|
326 |
+
#include <ATen/ops/_to_sparse_csr_native.h>
|
327 |
+
#include <ATen/ops/_to_sparse_semi_structured_native.h>
|
328 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
|
329 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
|
330 |
+
#include <ATen/ops/_trilinear_native.h>
|
331 |
+
#include <ATen/ops/_triton_multi_head_attention_native.h>
|
332 |
+
#include <ATen/ops/_triton_scaled_dot_attention_native.h>
|
333 |
+
#include <ATen/ops/_unique_native.h>
|
334 |
+
#include <ATen/ops/_unique2_native.h>
|
335 |
+
#include <ATen/ops/_unpack_dual_native.h>
|
336 |
+
#include <ATen/ops/_unsafe_index_native.h>
|
337 |
+
#include <ATen/ops/_unsafe_index_put_native.h>
|
338 |
+
#include <ATen/ops/_unsafe_view_native.h>
|
339 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
|
340 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
|
341 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
|
342 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
|
343 |
+
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
|
344 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
|
345 |
+
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
|
346 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
|
347 |
+
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
|
348 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
|
349 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
|
350 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h>
|
351 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
|
352 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h>
|
353 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
|
354 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h>
|
355 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_native.h>
|
356 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_native.h>
|
357 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_native.h>
|
358 |
+
#include <ATen/ops/_values_native.h>
|
359 |
+
#include <ATen/ops/_values_copy_native.h>
|
360 |
+
#include <ATen/ops/_version_native.h>
|
361 |
+
#include <ATen/ops/_weight_int4pack_mm_native.h>
|
362 |
+
#include <ATen/ops/_weight_norm_native.h>
|
363 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
|
364 |
+
#include <ATen/ops/_weight_norm_interface_native.h>
|
365 |
+
#include <ATen/ops/_weight_norm_interface_backward_native.h>
|
366 |
+
#include <ATen/ops/abs_native.h>
|
367 |
+
#include <ATen/ops/absolute_native.h>
|
368 |
+
#include <ATen/ops/acos_native.h>
|
369 |
+
#include <ATen/ops/acosh_native.h>
|
370 |
+
#include <ATen/ops/adaptive_avg_pool1d_native.h>
|
371 |
+
#include <ATen/ops/adaptive_avg_pool2d_native.h>
|
372 |
+
#include <ATen/ops/adaptive_avg_pool3d_native.h>
|
373 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
|
374 |
+
#include <ATen/ops/adaptive_max_pool1d_native.h>
|
375 |
+
#include <ATen/ops/adaptive_max_pool2d_native.h>
|
376 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
|
377 |
+
#include <ATen/ops/adaptive_max_pool3d_native.h>
|
378 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
|
379 |
+
#include <ATen/ops/add_native.h>
|
380 |
+
#include <ATen/ops/addbmm_native.h>
|
381 |
+
#include <ATen/ops/addcdiv_native.h>
|
382 |
+
#include <ATen/ops/addcmul_native.h>
|
383 |
+
#include <ATen/ops/addmm_native.h>
|
384 |
+
#include <ATen/ops/addmv_native.h>
|
385 |
+
#include <ATen/ops/addr_native.h>
|
386 |
+
#include <ATen/ops/adjoint_native.h>
|
387 |
+
#include <ATen/ops/affine_grid_generator_native.h>
|
388 |
+
#include <ATen/ops/affine_grid_generator_backward_native.h>
|
389 |
+
#include <ATen/ops/alias_native.h>
|
390 |
+
#include <ATen/ops/alias_copy_native.h>
|
391 |
+
#include <ATen/ops/align_as_native.h>
|
392 |
+
#include <ATen/ops/align_tensors_native.h>
|
393 |
+
#include <ATen/ops/align_to_native.h>
|
394 |
+
#include <ATen/ops/all_native.h>
|
395 |
+
#include <ATen/ops/allclose_native.h>
|
396 |
+
#include <ATen/ops/alpha_dropout_native.h>
|
397 |
+
#include <ATen/ops/amax_native.h>
|
398 |
+
#include <ATen/ops/amin_native.h>
|
399 |
+
#include <ATen/ops/aminmax_native.h>
|
400 |
+
#include <ATen/ops/and_native.h>
|
401 |
+
#include <ATen/ops/angle_native.h>
|
402 |
+
#include <ATen/ops/any_native.h>
|
403 |
+
#include <ATen/ops/arange_native.h>
|
404 |
+
#include <ATen/ops/arccos_native.h>
|
405 |
+
#include <ATen/ops/arccosh_native.h>
|
406 |
+
#include <ATen/ops/arcsin_native.h>
|
407 |
+
#include <ATen/ops/arcsinh_native.h>
|
408 |
+
#include <ATen/ops/arctan_native.h>
|
409 |
+
#include <ATen/ops/arctan2_native.h>
|
410 |
+
#include <ATen/ops/arctanh_native.h>
|
411 |
+
#include <ATen/ops/argmax_native.h>
|
412 |
+
#include <ATen/ops/argmin_native.h>
|
413 |
+
#include <ATen/ops/argsort_native.h>
|
414 |
+
#include <ATen/ops/argwhere_native.h>
|
415 |
+
#include <ATen/ops/as_strided_native.h>
|
416 |
+
#include <ATen/ops/as_strided_copy_native.h>
|
417 |
+
#include <ATen/ops/as_strided_scatter_native.h>
|
418 |
+
#include <ATen/ops/asin_native.h>
|
419 |
+
#include <ATen/ops/asinh_native.h>
|
420 |
+
#include <ATen/ops/atan_native.h>
|
421 |
+
#include <ATen/ops/atan2_native.h>
|
422 |
+
#include <ATen/ops/atanh_native.h>
|
423 |
+
#include <ATen/ops/atleast_1d_native.h>
|
424 |
+
#include <ATen/ops/atleast_2d_native.h>
|
425 |
+
#include <ATen/ops/atleast_3d_native.h>
|
426 |
+
#include <ATen/ops/avg_pool1d_native.h>
|
427 |
+
#include <ATen/ops/avg_pool2d_native.h>
|
428 |
+
#include <ATen/ops/avg_pool2d_backward_native.h>
|
429 |
+
#include <ATen/ops/avg_pool3d_native.h>
|
430 |
+
#include <ATen/ops/avg_pool3d_backward_native.h>
|
431 |
+
#include <ATen/ops/baddbmm_native.h>
|
432 |
+
#include <ATen/ops/bartlett_window_native.h>
|
433 |
+
#include <ATen/ops/batch_norm_native.h>
|
434 |
+
#include <ATen/ops/batch_norm_backward_elemt_native.h>
|
435 |
+
#include <ATen/ops/batch_norm_backward_reduce_native.h>
|
436 |
+
#include <ATen/ops/batch_norm_elemt_native.h>
|
437 |
+
#include <ATen/ops/batch_norm_gather_stats_native.h>
|
438 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
|
439 |
+
#include <ATen/ops/batch_norm_stats_native.h>
|
440 |
+
#include <ATen/ops/batch_norm_update_stats_native.h>
|
441 |
+
#include <ATen/ops/bernoulli_native.h>
|
442 |
+
#include <ATen/ops/bilinear_native.h>
|
443 |
+
#include <ATen/ops/binary_cross_entropy_native.h>
|
444 |
+
#include <ATen/ops/binary_cross_entropy_backward_native.h>
|
445 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
|
446 |
+
#include <ATen/ops/bincount_native.h>
|
447 |
+
#include <ATen/ops/binomial_native.h>
|
448 |
+
#include <ATen/ops/bitwise_and_native.h>
|
449 |
+
#include <ATen/ops/bitwise_left_shift_native.h>
|
450 |
+
#include <ATen/ops/bitwise_not_native.h>
|
451 |
+
#include <ATen/ops/bitwise_or_native.h>
|
452 |
+
#include <ATen/ops/bitwise_right_shift_native.h>
|
453 |
+
#include <ATen/ops/bitwise_xor_native.h>
|
454 |
+
#include <ATen/ops/blackman_window_native.h>
|
455 |
+
#include <ATen/ops/block_diag_native.h>
|
456 |
+
#include <ATen/ops/bmm_native.h>
|
457 |
+
#include <ATen/ops/broadcast_tensors_native.h>
|
458 |
+
#include <ATen/ops/broadcast_to_native.h>
|
459 |
+
#include <ATen/ops/bucketize_native.h>
|
460 |
+
#include <ATen/ops/can_cast_native.h>
|
461 |
+
#include <ATen/ops/cartesian_prod_native.h>
|
462 |
+
#include <ATen/ops/cat_native.h>
|
463 |
+
#include <ATen/ops/cauchy_native.h>
|
464 |
+
#include <ATen/ops/ccol_indices_native.h>
|
465 |
+
#include <ATen/ops/ccol_indices_copy_native.h>
|
466 |
+
#include <ATen/ops/cdist_native.h>
|
467 |
+
#include <ATen/ops/ceil_native.h>
|
468 |
+
#include <ATen/ops/celu_native.h>
|
469 |
+
#include <ATen/ops/chain_matmul_native.h>
|
470 |
+
#include <ATen/ops/chalf_native.h>
|
471 |
+
#include <ATen/ops/channel_shuffle_native.h>
|
472 |
+
#include <ATen/ops/cholesky_native.h>
|
473 |
+
#include <ATen/ops/cholesky_inverse_native.h>
|
474 |
+
#include <ATen/ops/cholesky_solve_native.h>
|
475 |
+
#include <ATen/ops/choose_qparams_optimized_native.h>
|
476 |
+
#include <ATen/ops/chunk_native.h>
|
477 |
+
#include <ATen/ops/clamp_native.h>
|
478 |
+
#include <ATen/ops/clamp_max_native.h>
|
479 |
+
#include <ATen/ops/clamp_min_native.h>
|
480 |
+
#include <ATen/ops/clip_native.h>
|
481 |
+
#include <ATen/ops/clone_native.h>
|
482 |
+
#include <ATen/ops/coalesce_native.h>
|
483 |
+
#include <ATen/ops/col2im_native.h>
|
484 |
+
#include <ATen/ops/col_indices_native.h>
|
485 |
+
#include <ATen/ops/col_indices_copy_native.h>
|
486 |
+
#include <ATen/ops/column_stack_native.h>
|
487 |
+
#include <ATen/ops/combinations_native.h>
|
488 |
+
#include <ATen/ops/complex_native.h>
|
489 |
+
#include <ATen/ops/concat_native.h>
|
490 |
+
#include <ATen/ops/concatenate_native.h>
|
491 |
+
#include <ATen/ops/conj_native.h>
|
492 |
+
#include <ATen/ops/conj_physical_native.h>
|
493 |
+
#include <ATen/ops/constant_pad_nd_native.h>
|
494 |
+
#include <ATen/ops/contiguous_native.h>
|
495 |
+
#include <ATen/ops/conv1d_native.h>
|
496 |
+
#include <ATen/ops/conv2d_native.h>
|
497 |
+
#include <ATen/ops/conv3d_native.h>
|
498 |
+
#include <ATen/ops/conv_depthwise3d_native.h>
|
499 |
+
#include <ATen/ops/conv_tbc_native.h>
|
500 |
+
#include <ATen/ops/conv_tbc_backward_native.h>
|
501 |
+
#include <ATen/ops/conv_transpose1d_native.h>
|
502 |
+
#include <ATen/ops/conv_transpose2d_native.h>
|
503 |
+
#include <ATen/ops/conv_transpose3d_native.h>
|
504 |
+
#include <ATen/ops/convolution_native.h>
|
505 |
+
#include <ATen/ops/convolution_backward_native.h>
|
506 |
+
#include <ATen/ops/convolution_backward_overrideable_native.h>
|
507 |
+
#include <ATen/ops/convolution_overrideable_native.h>
|
508 |
+
#include <ATen/ops/copy_native.h>
|
509 |
+
#include <ATen/ops/copy_sparse_to_sparse_native.h>
|
510 |
+
#include <ATen/ops/copysign_native.h>
|
511 |
+
#include <ATen/ops/corrcoef_native.h>
|
512 |
+
#include <ATen/ops/cos_native.h>
|
513 |
+
#include <ATen/ops/cosh_native.h>
|
514 |
+
#include <ATen/ops/cosine_embedding_loss_native.h>
|
515 |
+
#include <ATen/ops/cosine_similarity_native.h>
|
516 |
+
#include <ATen/ops/count_nonzero_native.h>
|
517 |
+
#include <ATen/ops/cov_native.h>
|
518 |
+
#include <ATen/ops/cross_native.h>
|
519 |
+
#include <ATen/ops/cross_entropy_loss_native.h>
|
520 |
+
#include <ATen/ops/crow_indices_native.h>
|
521 |
+
#include <ATen/ops/crow_indices_copy_native.h>
|
522 |
+
#include <ATen/ops/ctc_loss_native.h>
|
523 |
+
#include <ATen/ops/cudnn_affine_grid_generator_native.h>
|
524 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_native.h>
|
525 |
+
#include <ATen/ops/cudnn_batch_norm_native.h>
|
526 |
+
#include <ATen/ops/cudnn_batch_norm_backward_native.h>
|
527 |
+
#include <ATen/ops/cudnn_convolution_native.h>
|
528 |
+
#include <ATen/ops/cudnn_convolution_add_relu_native.h>
|
529 |
+
#include <ATen/ops/cudnn_convolution_relu_native.h>
|
530 |
+
#include <ATen/ops/cudnn_convolution_transpose_native.h>
|
531 |
+
#include <ATen/ops/cudnn_grid_sampler_native.h>
|
532 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_native.h>
|
533 |
+
#include <ATen/ops/cudnn_is_acceptable_native.h>
|
534 |
+
#include <ATen/ops/cummax_native.h>
|
535 |
+
#include <ATen/ops/cummaxmin_backward_native.h>
|
536 |
+
#include <ATen/ops/cummin_native.h>
|
537 |
+
#include <ATen/ops/cumprod_native.h>
|
538 |
+
#include <ATen/ops/cumprod_backward_native.h>
|
539 |
+
#include <ATen/ops/cumsum_native.h>
|
540 |
+
#include <ATen/ops/cumulative_trapezoid_native.h>
|
541 |
+
#include <ATen/ops/data_native.h>
|
542 |
+
#include <ATen/ops/deg2rad_native.h>
|
543 |
+
#include <ATen/ops/dense_dim_native.h>
|
544 |
+
#include <ATen/ops/dequantize_native.h>
|
545 |
+
#include <ATen/ops/det_native.h>
|
546 |
+
#include <ATen/ops/detach_native.h>
|
547 |
+
#include <ATen/ops/detach_copy_native.h>
|
548 |
+
#include <ATen/ops/diag_native.h>
|
549 |
+
#include <ATen/ops/diag_embed_native.h>
|
550 |
+
#include <ATen/ops/diagflat_native.h>
|
551 |
+
#include <ATen/ops/diagonal_native.h>
|
552 |
+
#include <ATen/ops/diagonal_backward_native.h>
|
553 |
+
#include <ATen/ops/diagonal_copy_native.h>
|
554 |
+
#include <ATen/ops/diagonal_scatter_native.h>
|
555 |
+
#include <ATen/ops/diff_native.h>
|
556 |
+
#include <ATen/ops/digamma_native.h>
|
557 |
+
#include <ATen/ops/dist_native.h>
|
558 |
+
#include <ATen/ops/div_native.h>
|
559 |
+
#include <ATen/ops/divide_native.h>
|
560 |
+
#include <ATen/ops/dot_native.h>
|
561 |
+
#include <ATen/ops/dropout_native.h>
|
562 |
+
#include <ATen/ops/dsplit_native.h>
|
563 |
+
#include <ATen/ops/dstack_native.h>
|
564 |
+
#include <ATen/ops/einsum_native.h>
|
565 |
+
#include <ATen/ops/elu_native.h>
|
566 |
+
#include <ATen/ops/elu_backward_native.h>
|
567 |
+
#include <ATen/ops/embedding_native.h>
|
568 |
+
#include <ATen/ops/embedding_backward_native.h>
|
569 |
+
#include <ATen/ops/embedding_bag_native.h>
|
570 |
+
#include <ATen/ops/embedding_dense_backward_native.h>
|
571 |
+
#include <ATen/ops/embedding_renorm_native.h>
|
572 |
+
#include <ATen/ops/embedding_sparse_backward_native.h>
|
573 |
+
#include <ATen/ops/empty_native.h>
|
574 |
+
#include <ATen/ops/empty_like_native.h>
|
575 |
+
#include <ATen/ops/empty_permuted_native.h>
|
576 |
+
#include <ATen/ops/empty_quantized_native.h>
|
577 |
+
#include <ATen/ops/empty_strided_native.h>
|
578 |
+
#include <ATen/ops/eq_native.h>
|
579 |
+
#include <ATen/ops/equal_native.h>
|
580 |
+
#include <ATen/ops/erf_native.h>
|
581 |
+
#include <ATen/ops/erfc_native.h>
|
582 |
+
#include <ATen/ops/erfinv_native.h>
|
583 |
+
#include <ATen/ops/exp_native.h>
|
584 |
+
#include <ATen/ops/exp2_native.h>
|
585 |
+
#include <ATen/ops/expand_native.h>
|
586 |
+
#include <ATen/ops/expand_as_native.h>
|
587 |
+
#include <ATen/ops/expand_copy_native.h>
|
588 |
+
#include <ATen/ops/expm1_native.h>
|
589 |
+
#include <ATen/ops/exponential_native.h>
|
590 |
+
#include <ATen/ops/eye_native.h>
|
591 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_native.h>
|
592 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
|
593 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h>
|
594 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
|
595 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
|
596 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h>
|
597 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_native.h>
|
598 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
|
599 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_native.h>
|
600 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h>
|
601 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_native.h>
|
602 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h>
|
603 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
|
604 |
+
#include <ATen/ops/feature_alpha_dropout_native.h>
|
605 |
+
#include <ATen/ops/feature_dropout_native.h>
|
606 |
+
#include <ATen/ops/fft_fft_native.h>
|
607 |
+
#include <ATen/ops/fft_fft2_native.h>
|
608 |
+
#include <ATen/ops/fft_fftfreq_native.h>
|
609 |
+
#include <ATen/ops/fft_fftn_native.h>
|
610 |
+
#include <ATen/ops/fft_fftshift_native.h>
|
611 |
+
#include <ATen/ops/fft_hfft_native.h>
|
612 |
+
#include <ATen/ops/fft_hfft2_native.h>
|
613 |
+
#include <ATen/ops/fft_hfftn_native.h>
|
614 |
+
#include <ATen/ops/fft_ifft_native.h>
|
615 |
+
#include <ATen/ops/fft_ifft2_native.h>
|
616 |
+
#include <ATen/ops/fft_ifftn_native.h>
|
617 |
+
#include <ATen/ops/fft_ifftshift_native.h>
|
618 |
+
#include <ATen/ops/fft_ihfft_native.h>
|
619 |
+
#include <ATen/ops/fft_ihfft2_native.h>
|
620 |
+
#include <ATen/ops/fft_ihfftn_native.h>
|
621 |
+
#include <ATen/ops/fft_irfft_native.h>
|
622 |
+
#include <ATen/ops/fft_irfft2_native.h>
|
623 |
+
#include <ATen/ops/fft_irfftn_native.h>
|
624 |
+
#include <ATen/ops/fft_rfft_native.h>
|
625 |
+
#include <ATen/ops/fft_rfft2_native.h>
|
626 |
+
#include <ATen/ops/fft_rfftfreq_native.h>
|
627 |
+
#include <ATen/ops/fft_rfftn_native.h>
|
628 |
+
#include <ATen/ops/fill_native.h>
|
629 |
+
#include <ATen/ops/fill_diagonal_native.h>
|
630 |
+
#include <ATen/ops/fix_native.h>
|
631 |
+
#include <ATen/ops/flatten_native.h>
|
632 |
+
#include <ATen/ops/flatten_dense_tensors_native.h>
|
633 |
+
#include <ATen/ops/flip_native.h>
|
634 |
+
#include <ATen/ops/fliplr_native.h>
|
635 |
+
#include <ATen/ops/flipud_native.h>
|
636 |
+
#include <ATen/ops/float_power_native.h>
|
637 |
+
#include <ATen/ops/floor_native.h>
|
638 |
+
#include <ATen/ops/floor_divide_native.h>
|
639 |
+
#include <ATen/ops/fmax_native.h>
|
640 |
+
#include <ATen/ops/fmin_native.h>
|
641 |
+
#include <ATen/ops/fmod_native.h>
|
642 |
+
#include <ATen/ops/frac_native.h>
|
643 |
+
#include <ATen/ops/fractional_max_pool2d_native.h>
|
644 |
+
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
|
645 |
+
#include <ATen/ops/fractional_max_pool3d_native.h>
|
646 |
+
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
|
647 |
+
#include <ATen/ops/frexp_native.h>
|
648 |
+
#include <ATen/ops/frobenius_norm_native.h>
|
649 |
+
#include <ATen/ops/from_file_native.h>
|
650 |
+
#include <ATen/ops/full_native.h>
|
651 |
+
#include <ATen/ops/full_like_native.h>
|
652 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h>
|
653 |
+
#include <ATen/ops/gather_native.h>
|
654 |
+
#include <ATen/ops/gather_backward_native.h>
|
655 |
+
#include <ATen/ops/gcd_native.h>
|
656 |
+
#include <ATen/ops/ge_native.h>
|
657 |
+
#include <ATen/ops/gelu_native.h>
|
658 |
+
#include <ATen/ops/gelu_backward_native.h>
|
659 |
+
#include <ATen/ops/geometric_native.h>
|
660 |
+
#include <ATen/ops/geqrf_native.h>
|
661 |
+
#include <ATen/ops/ger_native.h>
|
662 |
+
#include <ATen/ops/glu_native.h>
|
663 |
+
#include <ATen/ops/glu_backward_native.h>
|
664 |
+
#include <ATen/ops/glu_backward_jvp_native.h>
|
665 |
+
#include <ATen/ops/glu_jvp_native.h>
|
666 |
+
#include <ATen/ops/gradient_native.h>
|
667 |
+
#include <ATen/ops/greater_native.h>
|
668 |
+
#include <ATen/ops/greater_equal_native.h>
|
669 |
+
#include <ATen/ops/grid_sampler_native.h>
|
670 |
+
#include <ATen/ops/grid_sampler_2d_native.h>
|
671 |
+
#include <ATen/ops/grid_sampler_2d_backward_native.h>
|
672 |
+
#include <ATen/ops/grid_sampler_3d_native.h>
|
673 |
+
#include <ATen/ops/grid_sampler_3d_backward_native.h>
|
674 |
+
#include <ATen/ops/group_norm_native.h>
|
675 |
+
#include <ATen/ops/gru_native.h>
|
676 |
+
#include <ATen/ops/gru_cell_native.h>
|
677 |
+
#include <ATen/ops/gt_native.h>
|
678 |
+
#include <ATen/ops/hamming_window_native.h>
|
679 |
+
#include <ATen/ops/hann_window_native.h>
|
680 |
+
#include <ATen/ops/hardshrink_native.h>
|
681 |
+
#include <ATen/ops/hardshrink_backward_native.h>
|
682 |
+
#include <ATen/ops/hardsigmoid_native.h>
|
683 |
+
#include <ATen/ops/hardsigmoid_backward_native.h>
|
684 |
+
#include <ATen/ops/hardswish_native.h>
|
685 |
+
#include <ATen/ops/hardswish_backward_native.h>
|
686 |
+
#include <ATen/ops/hardtanh_native.h>
|
687 |
+
#include <ATen/ops/hardtanh_backward_native.h>
|
688 |
+
#include <ATen/ops/heaviside_native.h>
|
689 |
+
#include <ATen/ops/hinge_embedding_loss_native.h>
|
690 |
+
#include <ATen/ops/histc_native.h>
|
691 |
+
#include <ATen/ops/histogram_native.h>
|
692 |
+
#include <ATen/ops/histogramdd_native.h>
|
693 |
+
#include <ATen/ops/hsplit_native.h>
|
694 |
+
#include <ATen/ops/hspmm_native.h>
|
695 |
+
#include <ATen/ops/hstack_native.h>
|
696 |
+
#include <ATen/ops/huber_loss_native.h>
|
697 |
+
#include <ATen/ops/huber_loss_backward_native.h>
|
698 |
+
#include <ATen/ops/hypot_native.h>
|
699 |
+
#include <ATen/ops/i0_native.h>
|
700 |
+
#include <ATen/ops/igamma_native.h>
|
701 |
+
#include <ATen/ops/igammac_native.h>
|
702 |
+
#include <ATen/ops/im2col_native.h>
|
703 |
+
#include <ATen/ops/imag_native.h>
|
704 |
+
#include <ATen/ops/index_native.h>
|
705 |
+
#include <ATen/ops/index_add_native.h>
|
706 |
+
#include <ATen/ops/index_copy_native.h>
|
707 |
+
#include <ATen/ops/index_fill_native.h>
|
708 |
+
#include <ATen/ops/index_put_native.h>
|
709 |
+
#include <ATen/ops/index_reduce_native.h>
|
710 |
+
#include <ATen/ops/index_select_native.h>
|
711 |
+
#include <ATen/ops/index_select_backward_native.h>
|
712 |
+
#include <ATen/ops/indices_native.h>
|
713 |
+
#include <ATen/ops/indices_copy_native.h>
|
714 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_native.h>
|
715 |
+
#include <ATen/ops/inner_native.h>
|
716 |
+
#include <ATen/ops/instance_norm_native.h>
|
717 |
+
#include <ATen/ops/int_repr_native.h>
|
718 |
+
#include <ATen/ops/inverse_native.h>
|
719 |
+
#include <ATen/ops/is_coalesced_native.h>
|
720 |
+
#include <ATen/ops/is_complex_native.h>
|
721 |
+
#include <ATen/ops/is_conj_native.h>
|
722 |
+
#include <ATen/ops/is_distributed_native.h>
|
723 |
+
#include <ATen/ops/is_floating_point_native.h>
|
724 |
+
#include <ATen/ops/is_inference_native.h>
|
725 |
+
#include <ATen/ops/is_leaf_native.h>
|
726 |
+
#include <ATen/ops/is_neg_native.h>
|
727 |
+
#include <ATen/ops/is_nonzero_native.h>
|
728 |
+
#include <ATen/ops/is_pinned_native.h>
|
729 |
+
#include <ATen/ops/is_same_size_native.h>
|
730 |
+
#include <ATen/ops/is_set_to_native.h>
|
731 |
+
#include <ATen/ops/is_signed_native.h>
|
732 |
+
#include <ATen/ops/is_vulkan_available_native.h>
|
733 |
+
#include <ATen/ops/isclose_native.h>
|
734 |
+
#include <ATen/ops/isfinite_native.h>
|
735 |
+
#include <ATen/ops/isin_native.h>
|
736 |
+
#include <ATen/ops/isinf_native.h>
|
737 |
+
#include <ATen/ops/isnan_native.h>
|
738 |
+
#include <ATen/ops/isneginf_native.h>
|
739 |
+
#include <ATen/ops/isposinf_native.h>
|
740 |
+
#include <ATen/ops/isreal_native.h>
|
741 |
+
#include <ATen/ops/istft_native.h>
|
742 |
+
#include <ATen/ops/item_native.h>
|
743 |
+
#include <ATen/ops/kaiser_window_native.h>
|
744 |
+
#include <ATen/ops/kl_div_native.h>
|
745 |
+
#include <ATen/ops/kron_native.h>
|
746 |
+
#include <ATen/ops/kthvalue_native.h>
|
747 |
+
#include <ATen/ops/l1_loss_native.h>
|
748 |
+
#include <ATen/ops/layer_norm_native.h>
|
749 |
+
#include <ATen/ops/lcm_native.h>
|
750 |
+
#include <ATen/ops/ldexp_native.h>
|
751 |
+
#include <ATen/ops/le_native.h>
|
752 |
+
#include <ATen/ops/leaky_relu_native.h>
|
753 |
+
#include <ATen/ops/leaky_relu_backward_native.h>
|
754 |
+
#include <ATen/ops/lerp_native.h>
|
755 |
+
#include <ATen/ops/less_native.h>
|
756 |
+
#include <ATen/ops/less_equal_native.h>
|
757 |
+
#include <ATen/ops/lgamma_native.h>
|
758 |
+
#include <ATen/ops/lift_native.h>
|
759 |
+
#include <ATen/ops/lift_fresh_native.h>
|
760 |
+
#include <ATen/ops/lift_fresh_copy_native.h>
|
761 |
+
#include <ATen/ops/linalg_cholesky_native.h>
|
762 |
+
#include <ATen/ops/linalg_cholesky_ex_native.h>
|
763 |
+
#include <ATen/ops/linalg_cond_native.h>
|
764 |
+
#include <ATen/ops/linalg_cross_native.h>
|
765 |
+
#include <ATen/ops/linalg_det_native.h>
|
766 |
+
#include <ATen/ops/linalg_diagonal_native.h>
|
767 |
+
#include <ATen/ops/linalg_eig_native.h>
|
768 |
+
#include <ATen/ops/linalg_eigh_native.h>
|
769 |
+
#include <ATen/ops/linalg_eigvals_native.h>
|
770 |
+
#include <ATen/ops/linalg_eigvalsh_native.h>
|
771 |
+
#include <ATen/ops/linalg_householder_product_native.h>
|
772 |
+
#include <ATen/ops/linalg_inv_native.h>
|
773 |
+
#include <ATen/ops/linalg_inv_ex_native.h>
|
774 |
+
#include <ATen/ops/linalg_ldl_factor_native.h>
|
775 |
+
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
|
776 |
+
#include <ATen/ops/linalg_ldl_solve_native.h>
|
777 |
+
#include <ATen/ops/linalg_lstsq_native.h>
|
778 |
+
#include <ATen/ops/linalg_lu_native.h>
|
779 |
+
#include <ATen/ops/linalg_lu_factor_native.h>
|
780 |
+
#include <ATen/ops/linalg_lu_factor_ex_native.h>
|
781 |
+
#include <ATen/ops/linalg_lu_solve_native.h>
|
782 |
+
#include <ATen/ops/linalg_matmul_native.h>
|
783 |
+
#include <ATen/ops/linalg_matrix_exp_native.h>
|
784 |
+
#include <ATen/ops/linalg_matrix_norm_native.h>
|
785 |
+
#include <ATen/ops/linalg_matrix_power_native.h>
|
786 |
+
#include <ATen/ops/linalg_matrix_rank_native.h>
|
787 |
+
#include <ATen/ops/linalg_multi_dot_native.h>
|
788 |
+
#include <ATen/ops/linalg_norm_native.h>
|
789 |
+
#include <ATen/ops/linalg_pinv_native.h>
|
790 |
+
#include <ATen/ops/linalg_qr_native.h>
|
791 |
+
#include <ATen/ops/linalg_slogdet_native.h>
|
792 |
+
#include <ATen/ops/linalg_solve_native.h>
|
793 |
+
#include <ATen/ops/linalg_solve_ex_native.h>
|
794 |
+
#include <ATen/ops/linalg_solve_triangular_native.h>
|
795 |
+
#include <ATen/ops/linalg_svd_native.h>
|
796 |
+
#include <ATen/ops/linalg_svdvals_native.h>
|
797 |
+
#include <ATen/ops/linalg_tensorinv_native.h>
|
798 |
+
#include <ATen/ops/linalg_tensorsolve_native.h>
|
799 |
+
#include <ATen/ops/linalg_vander_native.h>
|
800 |
+
#include <ATen/ops/linalg_vecdot_native.h>
|
801 |
+
#include <ATen/ops/linalg_vector_norm_native.h>
|
802 |
+
#include <ATen/ops/linear_native.h>
|
803 |
+
#include <ATen/ops/linear_backward_native.h>
|
804 |
+
#include <ATen/ops/linspace_native.h>
|
805 |
+
#include <ATen/ops/log_native.h>
|
806 |
+
#include <ATen/ops/log10_native.h>
|
807 |
+
#include <ATen/ops/log1p_native.h>
|
808 |
+
#include <ATen/ops/log2_native.h>
|
809 |
+
#include <ATen/ops/log_normal_native.h>
|
810 |
+
#include <ATen/ops/log_sigmoid_native.h>
|
811 |
+
#include <ATen/ops/log_sigmoid_backward_native.h>
|
812 |
+
#include <ATen/ops/log_sigmoid_forward_native.h>
|
813 |
+
#include <ATen/ops/log_softmax_native.h>
|
814 |
+
#include <ATen/ops/logaddexp_native.h>
|
815 |
+
#include <ATen/ops/logaddexp2_native.h>
|
816 |
+
#include <ATen/ops/logcumsumexp_native.h>
|
817 |
+
#include <ATen/ops/logdet_native.h>
|
818 |
+
#include <ATen/ops/logical_and_native.h>
|
819 |
+
#include <ATen/ops/logical_not_native.h>
|
820 |
+
#include <ATen/ops/logical_or_native.h>
|
821 |
+
#include <ATen/ops/logical_xor_native.h>
|
822 |
+
#include <ATen/ops/logit_native.h>
|
823 |
+
#include <ATen/ops/logit_backward_native.h>
|
824 |
+
#include <ATen/ops/logspace_native.h>
|
825 |
+
#include <ATen/ops/logsumexp_native.h>
|
826 |
+
#include <ATen/ops/lshift_native.h>
|
827 |
+
#include <ATen/ops/lstm_native.h>
|
828 |
+
#include <ATen/ops/lstm_cell_native.h>
|
829 |
+
#include <ATen/ops/lstm_mps_backward_native.h>
|
830 |
+
#include <ATen/ops/lt_native.h>
|
831 |
+
#include <ATen/ops/lu_solve_native.h>
|
832 |
+
#include <ATen/ops/lu_unpack_native.h>
|
833 |
+
#include <ATen/ops/mH_native.h>
|
834 |
+
#include <ATen/ops/mT_native.h>
|
835 |
+
#include <ATen/ops/margin_ranking_loss_native.h>
|
836 |
+
#include <ATen/ops/masked_fill_native.h>
|
837 |
+
#include <ATen/ops/masked_scatter_native.h>
|
838 |
+
#include <ATen/ops/masked_scatter_backward_native.h>
|
839 |
+
#include <ATen/ops/masked_select_native.h>
|
840 |
+
#include <ATen/ops/masked_select_backward_native.h>
|
841 |
+
#include <ATen/ops/matmul_native.h>
|
842 |
+
#include <ATen/ops/matmul_backward_native.h>
|
843 |
+
#include <ATen/ops/matrix_H_native.h>
|
844 |
+
#include <ATen/ops/matrix_exp_native.h>
|
845 |
+
#include <ATen/ops/matrix_exp_backward_native.h>
|
846 |
+
#include <ATen/ops/matrix_power_native.h>
|
847 |
+
#include <ATen/ops/max_native.h>
|
848 |
+
#include <ATen/ops/max_pool1d_native.h>
|
849 |
+
#include <ATen/ops/max_pool1d_with_indices_native.h>
|
850 |
+
#include <ATen/ops/max_pool2d_native.h>
|
851 |
+
#include <ATen/ops/max_pool2d_backward_native.h>
|
852 |
+
#include <ATen/ops/max_pool2d_with_indices_native.h>
|
853 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
|
854 |
+
#include <ATen/ops/max_pool3d_native.h>
|
855 |
+
#include <ATen/ops/max_pool3d_with_indices_native.h>
|
856 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
|
857 |
+
#include <ATen/ops/max_unpool2d_native.h>
|
858 |
+
#include <ATen/ops/max_unpool3d_native.h>
|
859 |
+
#include <ATen/ops/maximum_native.h>
|
860 |
+
#include <ATen/ops/mean_native.h>
|
861 |
+
#include <ATen/ops/median_native.h>
|
862 |
+
#include <ATen/ops/meshgrid_native.h>
|
863 |
+
#include <ATen/ops/min_native.h>
|
864 |
+
#include <ATen/ops/minimum_native.h>
|
865 |
+
#include <ATen/ops/miopen_batch_norm_native.h>
|
866 |
+
#include <ATen/ops/miopen_batch_norm_backward_native.h>
|
867 |
+
#include <ATen/ops/miopen_convolution_native.h>
|
868 |
+
#include <ATen/ops/miopen_convolution_add_relu_native.h>
|
869 |
+
#include <ATen/ops/miopen_convolution_relu_native.h>
|
870 |
+
#include <ATen/ops/miopen_convolution_transpose_native.h>
|
871 |
+
#include <ATen/ops/miopen_depthwise_convolution_native.h>
|
872 |
+
#include <ATen/ops/miopen_rnn_native.h>
|
873 |
+
#include <ATen/ops/miopen_rnn_backward_native.h>
|
874 |
+
#include <ATen/ops/mish_native.h>
|
875 |
+
#include <ATen/ops/mish_backward_native.h>
|
876 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h>
|
877 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h>
|
878 |
+
#include <ATen/ops/mkldnn_convolution_native.h>
|
879 |
+
#include <ATen/ops/mkldnn_linear_native.h>
|
880 |
+
#include <ATen/ops/mkldnn_linear_backward_native.h>
|
881 |
+
#include <ATen/ops/mkldnn_linear_backward_input_native.h>
|
882 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_native.h>
|
883 |
+
#include <ATen/ops/mkldnn_max_pool2d_native.h>
|
884 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_native.h>
|
885 |
+
#include <ATen/ops/mkldnn_max_pool3d_native.h>
|
886 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
|
887 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h>
|
888 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
|
889 |
+
#include <ATen/ops/mkldnn_rnn_layer_native.h>
|
890 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
|
891 |
+
#include <ATen/ops/mm_native.h>
|
892 |
+
#include <ATen/ops/mode_native.h>
|
893 |
+
#include <ATen/ops/moveaxis_native.h>
|
894 |
+
#include <ATen/ops/movedim_native.h>
|
895 |
+
#include <ATen/ops/mps_convolution_backward_native.h>
|
896 |
+
#include <ATen/ops/mps_convolution_transpose_backward_native.h>
|
897 |
+
#include <ATen/ops/mse_loss_native.h>
|
898 |
+
#include <ATen/ops/mse_loss_backward_native.h>
|
899 |
+
#include <ATen/ops/msort_native.h>
|
900 |
+
#include <ATen/ops/mul_native.h>
|
901 |
+
#include <ATen/ops/multi_margin_loss_native.h>
|
902 |
+
#include <ATen/ops/multi_margin_loss_backward_native.h>
|
903 |
+
#include <ATen/ops/multilabel_margin_loss_native.h>
|
904 |
+
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
|
905 |
+
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
|
906 |
+
#include <ATen/ops/multinomial_native.h>
|
907 |
+
#include <ATen/ops/multiply_native.h>
|
908 |
+
#include <ATen/ops/mv_native.h>
|
909 |
+
#include <ATen/ops/mvlgamma_native.h>
|
910 |
+
#include <ATen/ops/nan_to_num_native.h>
|
911 |
+
#include <ATen/ops/nanmean_native.h>
|
912 |
+
#include <ATen/ops/nanmedian_native.h>
|
913 |
+
#include <ATen/ops/nanquantile_native.h>
|
914 |
+
#include <ATen/ops/nansum_native.h>
|
915 |
+
#include <ATen/ops/narrow_native.h>
|
916 |
+
#include <ATen/ops/narrow_copy_native.h>
|
917 |
+
#include <ATen/ops/native_batch_norm_native.h>
|
918 |
+
#include <ATen/ops/native_batch_norm_backward_native.h>
|
919 |
+
#include <ATen/ops/native_channel_shuffle_native.h>
|
920 |
+
#include <ATen/ops/native_dropout_native.h>
|
921 |
+
#include <ATen/ops/native_dropout_backward_native.h>
|
922 |
+
#include <ATen/ops/native_group_norm_native.h>
|
923 |
+
#include <ATen/ops/native_group_norm_backward_native.h>
|
924 |
+
#include <ATen/ops/native_layer_norm_native.h>
|
925 |
+
#include <ATen/ops/native_layer_norm_backward_native.h>
|
926 |
+
#include <ATen/ops/native_norm_native.h>
|
927 |
+
#include <ATen/ops/ne_native.h>
|
928 |
+
#include <ATen/ops/neg_native.h>
|
929 |
+
#include <ATen/ops/negative_native.h>
|
930 |
+
#include <ATen/ops/nested_to_padded_tensor_native.h>
|
931 |
+
#include <ATen/ops/new_empty_native.h>
|
932 |
+
#include <ATen/ops/new_empty_strided_native.h>
|
933 |
+
#include <ATen/ops/new_full_native.h>
|
934 |
+
#include <ATen/ops/new_ones_native.h>
|
935 |
+
#include <ATen/ops/new_zeros_native.h>
|
936 |
+
#include <ATen/ops/nextafter_native.h>
|
937 |
+
#include <ATen/ops/nll_loss_native.h>
|
938 |
+
#include <ATen/ops/nll_loss2d_native.h>
|
939 |
+
#include <ATen/ops/nll_loss2d_backward_native.h>
|
940 |
+
#include <ATen/ops/nll_loss2d_forward_native.h>
|
941 |
+
#include <ATen/ops/nll_loss_backward_native.h>
|
942 |
+
#include <ATen/ops/nll_loss_forward_native.h>
|
943 |
+
#include <ATen/ops/nll_loss_nd_native.h>
|
944 |
+
#include <ATen/ops/nonzero_native.h>
|
945 |
+
#include <ATen/ops/nonzero_numpy_native.h>
|
946 |
+
#include <ATen/ops/nonzero_static_native.h>
|
947 |
+
#include <ATen/ops/norm_native.h>
|
948 |
+
#include <ATen/ops/norm_except_dim_native.h>
|
949 |
+
#include <ATen/ops/normal_native.h>
|
950 |
+
#include <ATen/ops/not_equal_native.h>
|
951 |
+
#include <ATen/ops/nuclear_norm_native.h>
|
952 |
+
#include <ATen/ops/numpy_T_native.h>
|
953 |
+
#include <ATen/ops/one_hot_native.h>
|
954 |
+
#include <ATen/ops/ones_native.h>
|
955 |
+
#include <ATen/ops/ones_like_native.h>
|
956 |
+
#include <ATen/ops/or_native.h>
|
957 |
+
#include <ATen/ops/orgqr_native.h>
|
958 |
+
#include <ATen/ops/ormqr_native.h>
|
959 |
+
#include <ATen/ops/outer_native.h>
|
960 |
+
#include <ATen/ops/output_nr_native.h>
|
961 |
+
#include <ATen/ops/pad_native.h>
|
962 |
+
#include <ATen/ops/pad_sequence_native.h>
|
963 |
+
#include <ATen/ops/pairwise_distance_native.h>
|
964 |
+
#include <ATen/ops/pdist_native.h>
|
965 |
+
#include <ATen/ops/permute_native.h>
|
966 |
+
#include <ATen/ops/permute_copy_native.h>
|
967 |
+
#include <ATen/ops/pin_memory_native.h>
|
968 |
+
#include <ATen/ops/pinverse_native.h>
|
969 |
+
#include <ATen/ops/pixel_shuffle_native.h>
|
970 |
+
#include <ATen/ops/pixel_unshuffle_native.h>
|
971 |
+
#include <ATen/ops/poisson_native.h>
|
972 |
+
#include <ATen/ops/poisson_nll_loss_native.h>
|
973 |
+
#include <ATen/ops/polar_native.h>
|
974 |
+
#include <ATen/ops/polygamma_native.h>
|
975 |
+
#include <ATen/ops/positive_native.h>
|
976 |
+
#include <ATen/ops/pow_native.h>
|
977 |
+
#include <ATen/ops/prelu_native.h>
|
978 |
+
#include <ATen/ops/prod_native.h>
|
979 |
+
#include <ATen/ops/promote_types_native.h>
|
980 |
+
#include <ATen/ops/put_native.h>
|
981 |
+
#include <ATen/ops/q_per_channel_axis_native.h>
|
982 |
+
#include <ATen/ops/q_per_channel_scales_native.h>
|
983 |
+
#include <ATen/ops/q_per_channel_zero_points_native.h>
|
984 |
+
#include <ATen/ops/q_scale_native.h>
|
985 |
+
#include <ATen/ops/q_zero_point_native.h>
|
986 |
+
#include <ATen/ops/qr_native.h>
|
987 |
+
#include <ATen/ops/qscheme_native.h>
|
988 |
+
#include <ATen/ops/quantile_native.h>
|
989 |
+
#include <ATen/ops/quantize_per_channel_native.h>
|
990 |
+
#include <ATen/ops/quantize_per_tensor_native.h>
|
991 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
|
992 |
+
#include <ATen/ops/quantized_batch_norm_native.h>
|
993 |
+
#include <ATen/ops/quantized_gru_cell_native.h>
|
994 |
+
#include <ATen/ops/quantized_lstm_cell_native.h>
|
995 |
+
#include <ATen/ops/quantized_max_pool1d_native.h>
|
996 |
+
#include <ATen/ops/quantized_max_pool2d_native.h>
|
997 |
+
#include <ATen/ops/quantized_max_pool3d_native.h>
|
998 |
+
#include <ATen/ops/quantized_rnn_relu_cell_native.h>
|
999 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_native.h>
|
1000 |
+
#include <ATen/ops/rad2deg_native.h>
|
1001 |
+
#include <ATen/ops/rand_native.h>
|
1002 |
+
#include <ATen/ops/rand_like_native.h>
|
1003 |
+
#include <ATen/ops/randint_native.h>
|
1004 |
+
#include <ATen/ops/randint_like_native.h>
|
1005 |
+
#include <ATen/ops/randn_native.h>
|
1006 |
+
#include <ATen/ops/randn_like_native.h>
|
1007 |
+
#include <ATen/ops/random_native.h>
|
1008 |
+
#include <ATen/ops/randperm_native.h>
|
1009 |
+
#include <ATen/ops/range_native.h>
|
1010 |
+
#include <ATen/ops/ravel_native.h>
|
1011 |
+
#include <ATen/ops/real_native.h>
|
1012 |
+
#include <ATen/ops/reciprocal_native.h>
|
1013 |
+
#include <ATen/ops/record_stream_native.h>
|
1014 |
+
#include <ATen/ops/refine_names_native.h>
|
1015 |
+
#include <ATen/ops/reflection_pad1d_native.h>
|
1016 |
+
#include <ATen/ops/reflection_pad1d_backward_native.h>
|
1017 |
+
#include <ATen/ops/reflection_pad2d_native.h>
|
1018 |
+
#include <ATen/ops/reflection_pad2d_backward_native.h>
|
1019 |
+
#include <ATen/ops/reflection_pad3d_native.h>
|
1020 |
+
#include <ATen/ops/reflection_pad3d_backward_native.h>
|
1021 |
+
#include <ATen/ops/relu_native.h>
|
1022 |
+
#include <ATen/ops/relu6_native.h>
|
1023 |
+
#include <ATen/ops/remainder_native.h>
|
1024 |
+
#include <ATen/ops/rename_native.h>
|
1025 |
+
#include <ATen/ops/renorm_native.h>
|
1026 |
+
#include <ATen/ops/repeat_native.h>
|
1027 |
+
#include <ATen/ops/repeat_interleave_native.h>
|
1028 |
+
#include <ATen/ops/replication_pad1d_native.h>
|
1029 |
+
#include <ATen/ops/replication_pad1d_backward_native.h>
|
1030 |
+
#include <ATen/ops/replication_pad2d_native.h>
|
1031 |
+
#include <ATen/ops/replication_pad2d_backward_native.h>
|
1032 |
+
#include <ATen/ops/replication_pad3d_native.h>
|
1033 |
+
#include <ATen/ops/replication_pad3d_backward_native.h>
|
1034 |
+
#include <ATen/ops/requires_grad_native.h>
|
1035 |
+
#include <ATen/ops/reshape_native.h>
|
1036 |
+
#include <ATen/ops/reshape_as_native.h>
|
1037 |
+
#include <ATen/ops/resize_native.h>
|
1038 |
+
#include <ATen/ops/resize_as_native.h>
|
1039 |
+
#include <ATen/ops/resize_as_sparse_native.h>
|
1040 |
+
#include <ATen/ops/resolve_conj_native.h>
|
1041 |
+
#include <ATen/ops/resolve_neg_native.h>
|
1042 |
+
#include <ATen/ops/result_type_native.h>
|
1043 |
+
#include <ATen/ops/retain_grad_native.h>
|
1044 |
+
#include <ATen/ops/retains_grad_native.h>
|
1045 |
+
#include <ATen/ops/rnn_relu_native.h>
|
1046 |
+
#include <ATen/ops/rnn_relu_cell_native.h>
|
1047 |
+
#include <ATen/ops/rnn_tanh_native.h>
|
1048 |
+
#include <ATen/ops/rnn_tanh_cell_native.h>
|
1049 |
+
#include <ATen/ops/roll_native.h>
|
1050 |
+
#include <ATen/ops/rot90_native.h>
|
1051 |
+
#include <ATen/ops/round_native.h>
|
1052 |
+
#include <ATen/ops/row_indices_native.h>
|
1053 |
+
#include <ATen/ops/row_indices_copy_native.h>
|
1054 |
+
#include <ATen/ops/row_stack_native.h>
|
1055 |
+
#include <ATen/ops/rrelu_native.h>
|
1056 |
+
#include <ATen/ops/rrelu_with_noise_native.h>
|
1057 |
+
#include <ATen/ops/rrelu_with_noise_backward_native.h>
|
1058 |
+
#include <ATen/ops/rshift_native.h>
|
1059 |
+
#include <ATen/ops/rsqrt_native.h>
|
1060 |
+
#include <ATen/ops/rsub_native.h>
|
1061 |
+
#include <ATen/ops/scalar_tensor_native.h>
|
1062 |
+
#include <ATen/ops/scaled_dot_product_attention_native.h>
|
1063 |
+
#include <ATen/ops/scatter_native.h>
|
1064 |
+
#include <ATen/ops/scatter_add_native.h>
|
1065 |
+
#include <ATen/ops/scatter_reduce_native.h>
|
1066 |
+
#include <ATen/ops/searchsorted_native.h>
|
1067 |
+
#include <ATen/ops/segment_reduce_native.h>
|
1068 |
+
#include <ATen/ops/select_native.h>
|
1069 |
+
#include <ATen/ops/select_backward_native.h>
|
1070 |
+
#include <ATen/ops/select_copy_native.h>
|
1071 |
+
#include <ATen/ops/select_scatter_native.h>
|
1072 |
+
#include <ATen/ops/selu_native.h>
|
1073 |
+
#include <ATen/ops/set_native.h>
|
1074 |
+
#include <ATen/ops/set_data_native.h>
|
1075 |
+
#include <ATen/ops/sgn_native.h>
|
1076 |
+
#include <ATen/ops/sigmoid_native.h>
|
1077 |
+
#include <ATen/ops/sigmoid_backward_native.h>
|
1078 |
+
#include <ATen/ops/sign_native.h>
|
1079 |
+
#include <ATen/ops/signbit_native.h>
|
1080 |
+
#include <ATen/ops/silu_native.h>
|
1081 |
+
#include <ATen/ops/silu_backward_native.h>
|
1082 |
+
#include <ATen/ops/sin_native.h>
|
1083 |
+
#include <ATen/ops/sinc_native.h>
|
1084 |
+
#include <ATen/ops/sinh_native.h>
|
1085 |
+
#include <ATen/ops/size_native.h>
|
1086 |
+
#include <ATen/ops/slice_native.h>
|
1087 |
+
#include <ATen/ops/slice_backward_native.h>
|
1088 |
+
#include <ATen/ops/slice_copy_native.h>
|
1089 |
+
#include <ATen/ops/slice_scatter_native.h>
|
1090 |
+
#include <ATen/ops/slogdet_native.h>
|
1091 |
+
#include <ATen/ops/slow_conv3d_native.h>
|
1092 |
+
#include <ATen/ops/slow_conv3d_forward_native.h>
|
1093 |
+
#include <ATen/ops/slow_conv_dilated2d_native.h>
|
1094 |
+
#include <ATen/ops/slow_conv_dilated3d_native.h>
|
1095 |
+
#include <ATen/ops/slow_conv_transpose2d_native.h>
|
1096 |
+
#include <ATen/ops/slow_conv_transpose3d_native.h>
|
1097 |
+
#include <ATen/ops/smm_native.h>
|
1098 |
+
#include <ATen/ops/smooth_l1_loss_native.h>
|
1099 |
+
#include <ATen/ops/smooth_l1_loss_backward_native.h>
|
1100 |
+
#include <ATen/ops/soft_margin_loss_native.h>
|
1101 |
+
#include <ATen/ops/soft_margin_loss_backward_native.h>
|
1102 |
+
#include <ATen/ops/softmax_native.h>
|
1103 |
+
#include <ATen/ops/softplus_native.h>
|
1104 |
+
#include <ATen/ops/softplus_backward_native.h>
|
1105 |
+
#include <ATen/ops/softshrink_native.h>
|
1106 |
+
#include <ATen/ops/softshrink_backward_native.h>
|
1107 |
+
#include <ATen/ops/sort_native.h>
|
1108 |
+
#include <ATen/ops/sparse_bsc_tensor_native.h>
|
1109 |
+
#include <ATen/ops/sparse_bsr_tensor_native.h>
|
1110 |
+
#include <ATen/ops/sparse_compressed_tensor_native.h>
|
1111 |
+
#include <ATen/ops/sparse_coo_tensor_native.h>
|
1112 |
+
#include <ATen/ops/sparse_csc_tensor_native.h>
|
1113 |
+
#include <ATen/ops/sparse_csr_tensor_native.h>
|
1114 |
+
#include <ATen/ops/sparse_dim_native.h>
|
1115 |
+
#include <ATen/ops/sparse_mask_native.h>
|
1116 |
+
#include <ATen/ops/sparse_resize_native.h>
|
1117 |
+
#include <ATen/ops/sparse_resize_and_clear_native.h>
|
1118 |
+
#include <ATen/ops/sparse_sampled_addmm_native.h>
|
1119 |
+
#include <ATen/ops/special_airy_ai_native.h>
|
1120 |
+
#include <ATen/ops/special_bessel_j0_native.h>
|
1121 |
+
#include <ATen/ops/special_bessel_j1_native.h>
|
1122 |
+
#include <ATen/ops/special_bessel_y0_native.h>
|
1123 |
+
#include <ATen/ops/special_bessel_y1_native.h>
|
1124 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
|
1125 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
|
1126 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
|
1127 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
|
1128 |
+
#include <ATen/ops/special_digamma_native.h>
|
1129 |
+
#include <ATen/ops/special_entr_native.h>
|
1130 |
+
#include <ATen/ops/special_erf_native.h>
|
1131 |
+
#include <ATen/ops/special_erfc_native.h>
|
1132 |
+
#include <ATen/ops/special_erfcx_native.h>
|
1133 |
+
#include <ATen/ops/special_erfinv_native.h>
|
1134 |
+
#include <ATen/ops/special_exp2_native.h>
|
1135 |
+
#include <ATen/ops/special_expit_native.h>
|
1136 |
+
#include <ATen/ops/special_expm1_native.h>
|
1137 |
+
#include <ATen/ops/special_gammainc_native.h>
|
1138 |
+
#include <ATen/ops/special_gammaincc_native.h>
|
1139 |
+
#include <ATen/ops/special_gammaln_native.h>
|
1140 |
+
#include <ATen/ops/special_hermite_polynomial_h_native.h>
|
1141 |
+
#include <ATen/ops/special_hermite_polynomial_he_native.h>
|
1142 |
+
#include <ATen/ops/special_i0_native.h>
|
1143 |
+
#include <ATen/ops/special_i0e_native.h>
|
1144 |
+
#include <ATen/ops/special_i1_native.h>
|
1145 |
+
#include <ATen/ops/special_i1e_native.h>
|
1146 |
+
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
|
1147 |
+
#include <ATen/ops/special_legendre_polynomial_p_native.h>
|
1148 |
+
#include <ATen/ops/special_log1p_native.h>
|
1149 |
+
#include <ATen/ops/special_log_ndtr_native.h>
|
1150 |
+
#include <ATen/ops/special_log_softmax_native.h>
|
1151 |
+
#include <ATen/ops/special_logit_native.h>
|
1152 |
+
#include <ATen/ops/special_logsumexp_native.h>
|
1153 |
+
#include <ATen/ops/special_modified_bessel_i0_native.h>
|
1154 |
+
#include <ATen/ops/special_modified_bessel_i1_native.h>
|
1155 |
+
#include <ATen/ops/special_modified_bessel_k0_native.h>
|
1156 |
+
#include <ATen/ops/special_modified_bessel_k1_native.h>
|
1157 |
+
#include <ATen/ops/special_multigammaln_native.h>
|
1158 |
+
#include <ATen/ops/special_ndtr_native.h>
|
1159 |
+
#include <ATen/ops/special_ndtri_native.h>
|
1160 |
+
#include <ATen/ops/special_polygamma_native.h>
|
1161 |
+
#include <ATen/ops/special_psi_native.h>
|
1162 |
+
#include <ATen/ops/special_round_native.h>
|
1163 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
|
1164 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
|
1165 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
|
1166 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
|
1167 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
|
1168 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
|
1169 |
+
#include <ATen/ops/special_sinc_native.h>
|
1170 |
+
#include <ATen/ops/special_softmax_native.h>
|
1171 |
+
#include <ATen/ops/special_spherical_bessel_j0_native.h>
|
1172 |
+
#include <ATen/ops/special_xlog1py_native.h>
|
1173 |
+
#include <ATen/ops/special_xlogy_native.h>
|
1174 |
+
#include <ATen/ops/special_zeta_native.h>
|
1175 |
+
#include <ATen/ops/split_native.h>
|
1176 |
+
#include <ATen/ops/split_copy_native.h>
|
1177 |
+
#include <ATen/ops/split_with_sizes_native.h>
|
1178 |
+
#include <ATen/ops/split_with_sizes_copy_native.h>
|
1179 |
+
#include <ATen/ops/sqrt_native.h>
|
1180 |
+
#include <ATen/ops/square_native.h>
|
1181 |
+
#include <ATen/ops/squeeze_native.h>
|
1182 |
+
#include <ATen/ops/squeeze_copy_native.h>
|
1183 |
+
#include <ATen/ops/sspaddmm_native.h>
|
1184 |
+
#include <ATen/ops/stack_native.h>
|
1185 |
+
#include <ATen/ops/std_native.h>
|
1186 |
+
#include <ATen/ops/std_mean_native.h>
|
1187 |
+
#include <ATen/ops/stft_native.h>
|
1188 |
+
#include <ATen/ops/stride_native.h>
|
1189 |
+
#include <ATen/ops/sub_native.h>
|
1190 |
+
#include <ATen/ops/subtract_native.h>
|
1191 |
+
#include <ATen/ops/sum_native.h>
|
1192 |
+
#include <ATen/ops/sum_to_size_native.h>
|
1193 |
+
#include <ATen/ops/svd_native.h>
|
1194 |
+
#include <ATen/ops/swapaxes_native.h>
|
1195 |
+
#include <ATen/ops/swapdims_native.h>
|
1196 |
+
#include <ATen/ops/sym_constrain_range_native.h>
|
1197 |
+
#include <ATen/ops/sym_constrain_range_for_size_native.h>
|
1198 |
+
#include <ATen/ops/sym_numel_native.h>
|
1199 |
+
#include <ATen/ops/sym_size_native.h>
|
1200 |
+
#include <ATen/ops/sym_storage_offset_native.h>
|
1201 |
+
#include <ATen/ops/sym_stride_native.h>
|
1202 |
+
#include <ATen/ops/t_native.h>
|
1203 |
+
#include <ATen/ops/t_copy_native.h>
|
1204 |
+
#include <ATen/ops/take_native.h>
|
1205 |
+
#include <ATen/ops/take_along_dim_native.h>
|
1206 |
+
#include <ATen/ops/tan_native.h>
|
1207 |
+
#include <ATen/ops/tanh_native.h>
|
1208 |
+
#include <ATen/ops/tanh_backward_native.h>
|
1209 |
+
#include <ATen/ops/tensor_split_native.h>
|
1210 |
+
#include <ATen/ops/tensordot_native.h>
|
1211 |
+
#include <ATen/ops/thnn_conv2d_native.h>
|
1212 |
+
#include <ATen/ops/threshold_native.h>
|
1213 |
+
#include <ATen/ops/threshold_backward_native.h>
|
1214 |
+
#include <ATen/ops/tile_native.h>
|
1215 |
+
#include <ATen/ops/to_native.h>
|
1216 |
+
#include <ATen/ops/to_dense_native.h>
|
1217 |
+
#include <ATen/ops/to_dense_backward_native.h>
|
1218 |
+
#include <ATen/ops/to_mkldnn_native.h>
|
1219 |
+
#include <ATen/ops/to_mkldnn_backward_native.h>
|
1220 |
+
#include <ATen/ops/to_padded_tensor_native.h>
|
1221 |
+
#include <ATen/ops/to_sparse_native.h>
|
1222 |
+
#include <ATen/ops/to_sparse_bsc_native.h>
|
1223 |
+
#include <ATen/ops/to_sparse_bsr_native.h>
|
1224 |
+
#include <ATen/ops/to_sparse_csc_native.h>
|
1225 |
+
#include <ATen/ops/to_sparse_csr_native.h>
|
1226 |
+
#include <ATen/ops/topk_native.h>
|
1227 |
+
#include <ATen/ops/trace_native.h>
|
1228 |
+
#include <ATen/ops/trace_backward_native.h>
|
1229 |
+
#include <ATen/ops/transpose_native.h>
|
1230 |
+
#include <ATen/ops/transpose_copy_native.h>
|
1231 |
+
#include <ATen/ops/trapezoid_native.h>
|
1232 |
+
#include <ATen/ops/trapz_native.h>
|
1233 |
+
#include <ATen/ops/triangular_solve_native.h>
|
1234 |
+
#include <ATen/ops/tril_native.h>
|
1235 |
+
#include <ATen/ops/tril_indices_native.h>
|
1236 |
+
#include <ATen/ops/triplet_margin_loss_native.h>
|
1237 |
+
#include <ATen/ops/triu_native.h>
|
1238 |
+
#include <ATen/ops/triu_indices_native.h>
|
1239 |
+
#include <ATen/ops/true_divide_native.h>
|
1240 |
+
#include <ATen/ops/trunc_native.h>
|
1241 |
+
#include <ATen/ops/type_as_native.h>
|
1242 |
+
#include <ATen/ops/unbind_native.h>
|
1243 |
+
#include <ATen/ops/unbind_copy_native.h>
|
1244 |
+
#include <ATen/ops/unflatten_native.h>
|
1245 |
+
#include <ATen/ops/unflatten_dense_tensors_native.h>
|
1246 |
+
#include <ATen/ops/unfold_native.h>
|
1247 |
+
#include <ATen/ops/unfold_backward_native.h>
|
1248 |
+
#include <ATen/ops/unfold_copy_native.h>
|
1249 |
+
#include <ATen/ops/uniform_native.h>
|
1250 |
+
#include <ATen/ops/unique_consecutive_native.h>
|
1251 |
+
#include <ATen/ops/unique_dim_native.h>
|
1252 |
+
#include <ATen/ops/unique_dim_consecutive_native.h>
|
1253 |
+
#include <ATen/ops/unsafe_chunk_native.h>
|
1254 |
+
#include <ATen/ops/unsafe_split_native.h>
|
1255 |
+
#include <ATen/ops/unsafe_split_with_sizes_native.h>
|
1256 |
+
#include <ATen/ops/unsqueeze_native.h>
|
1257 |
+
#include <ATen/ops/unsqueeze_copy_native.h>
|
1258 |
+
#include <ATen/ops/upsample_bicubic2d_native.h>
|
1259 |
+
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
|
1260 |
+
#include <ATen/ops/upsample_bilinear2d_native.h>
|
1261 |
+
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
|
1262 |
+
#include <ATen/ops/upsample_linear1d_native.h>
|
1263 |
+
#include <ATen/ops/upsample_linear1d_backward_native.h>
|
1264 |
+
#include <ATen/ops/upsample_nearest1d_native.h>
|
1265 |
+
#include <ATen/ops/upsample_nearest1d_backward_native.h>
|
1266 |
+
#include <ATen/ops/upsample_nearest2d_native.h>
|
1267 |
+
#include <ATen/ops/upsample_nearest2d_backward_native.h>
|
1268 |
+
#include <ATen/ops/upsample_nearest3d_native.h>
|
1269 |
+
#include <ATen/ops/upsample_nearest3d_backward_native.h>
|
1270 |
+
#include <ATen/ops/upsample_trilinear3d_native.h>
|
1271 |
+
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
|
1272 |
+
#include <ATen/ops/value_selecting_reduction_backward_native.h>
|
1273 |
+
#include <ATen/ops/values_native.h>
|
1274 |
+
#include <ATen/ops/values_copy_native.h>
|
1275 |
+
#include <ATen/ops/vander_native.h>
|
1276 |
+
#include <ATen/ops/var_native.h>
|
1277 |
+
#include <ATen/ops/var_mean_native.h>
|
1278 |
+
#include <ATen/ops/vdot_native.h>
|
1279 |
+
#include <ATen/ops/view_native.h>
|
1280 |
+
#include <ATen/ops/view_as_native.h>
|
1281 |
+
#include <ATen/ops/view_as_complex_native.h>
|
1282 |
+
#include <ATen/ops/view_as_complex_copy_native.h>
|
1283 |
+
#include <ATen/ops/view_as_real_native.h>
|
1284 |
+
#include <ATen/ops/view_as_real_copy_native.h>
|
1285 |
+
#include <ATen/ops/view_copy_native.h>
|
1286 |
+
#include <ATen/ops/vsplit_native.h>
|
1287 |
+
#include <ATen/ops/vstack_native.h>
|
1288 |
+
#include <ATen/ops/where_native.h>
|
1289 |
+
#include <ATen/ops/xlogy_native.h>
|
1290 |
+
#include <ATen/ops/xor_native.h>
|
1291 |
+
#include <ATen/ops/zero_native.h>
|
1292 |
+
#include <ATen/ops/zeros_native.h>
|
1293 |
+
#include <ATen/ops/zeros_like_native.h>
|
1294 |
+
|
1295 |
+
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorIteratorInternal.h
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/native/TensorIterator.h>
|
3 |
+
#include <c10/util/SmallBuffer.h>
|
4 |
+
#include <c10/util/irange.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
struct DimCounter {
|
9 |
+
DimCounter(IntArrayRef shape, Range range);
|
10 |
+
|
11 |
+
void increment(const std::array<int64_t, 2>& step);
|
12 |
+
bool is_done() const;
|
13 |
+
std::array<int64_t, 2> max_2d_step() const;
|
14 |
+
|
15 |
+
IntArrayRef shape;
|
16 |
+
Range range;
|
17 |
+
c10::SmallBuffer<int64_t, 4> values;
|
18 |
+
int64_t offset;
|
19 |
+
};
|
20 |
+
|
21 |
+
namespace internal {
|
22 |
+
|
23 |
+
inline void get_data_ptrs(
|
24 |
+
char** ptrs,
|
25 |
+
ArrayRef<char*> base,
|
26 |
+
IntArrayRef strides,
|
27 |
+
IntArrayRef counter) {
|
28 |
+
const int64_t ntensors = base.size();
|
29 |
+
const int64_t ndim = counter.size();
|
30 |
+
std::copy(base.begin(), base.end(), ptrs);
|
31 |
+
for (const auto dim : c10::irange(ndim)) {
|
32 |
+
int64_t value = counter[dim];
|
33 |
+
for (const auto arg : c10::irange(ntensors)) {
|
34 |
+
ptrs[arg] += value * strides[dim * ntensors + arg];
|
35 |
+
}
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
inline void serial_for_each(
|
40 |
+
IntArrayRef shape,
|
41 |
+
IntArrayRef strides,
|
42 |
+
char** base_ptrs,
|
43 |
+
size_t ntensors,
|
44 |
+
typename TensorIteratorBase::loop2d_t loop,
|
45 |
+
Range range) {
|
46 |
+
const auto ndim = shape.size();
|
47 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
48 |
+
strides.size() == ntensors * std::max(size_t{2}, ndim));
|
49 |
+
|
50 |
+
if (ndim <= 1) {
|
51 |
+
if (range.begin == 0) {
|
52 |
+
loop(base_ptrs, strides.data(), range.size(), 1);
|
53 |
+
} else {
|
54 |
+
c10::SmallBuffer<char*, 4> ptrs(ntensors);
|
55 |
+
get_data_ptrs(ptrs.data(), {base_ptrs, ntensors}, strides, {range.begin});
|
56 |
+
loop(ptrs.data(), strides.data(), range.size(), 1);
|
57 |
+
}
|
58 |
+
} else {
|
59 |
+
c10::SmallBuffer<char*, 4> ptrs(ntensors);
|
60 |
+
auto counter = DimCounter(shape, range);
|
61 |
+
while (!counter.is_done()) {
|
62 |
+
get_data_ptrs(
|
63 |
+
ptrs.data(), {base_ptrs, ntensors}, strides, counter.values);
|
64 |
+
auto step = counter.max_2d_step();
|
65 |
+
loop(ptrs.data(), strides.data(), step[0], step[1]);
|
66 |
+
counter.increment(step);
|
67 |
+
}
|
68 |
+
}
|
69 |
+
}
|
70 |
+
|
71 |
+
} // namespace internal
|
72 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Utils.h
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/EmptyTensor.h>
|
4 |
+
#include <ATen/Formatting.h>
|
5 |
+
#include <ATen/core/ATenGeneral.h>
|
6 |
+
#include <ATen/core/Generator.h>
|
7 |
+
#include <c10/core/ScalarType.h>
|
8 |
+
#include <c10/core/StorageImpl.h>
|
9 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
10 |
+
#include <c10/util/ArrayRef.h>
|
11 |
+
#include <c10/util/Exception.h>
|
12 |
+
#include <c10/util/accumulate.h>
|
13 |
+
#include <c10/util/irange.h>
|
14 |
+
|
15 |
+
#include <algorithm>
|
16 |
+
#include <memory>
|
17 |
+
#include <numeric>
|
18 |
+
#include <sstream>
|
19 |
+
#include <typeinfo>
|
20 |
+
|
21 |
+
#define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
|
22 |
+
TypeName(const TypeName&) = delete; \
|
23 |
+
void operator=(const TypeName&) = delete
|
24 |
+
|
25 |
+
namespace at {
|
26 |
+
|
27 |
+
TORCH_API int _crash_if_asan(int);
|
28 |
+
|
29 |
+
// Converts a TensorList (i.e. ArrayRef<Tensor> to vector of TensorImpl*)
|
30 |
+
// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat.
|
31 |
+
// Once cat is ported entirely to ATen this can be deleted!
|
32 |
+
static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
|
33 |
+
ArrayRef<Tensor> tensors,
|
34 |
+
const char* name,
|
35 |
+
int pos,
|
36 |
+
c10::DeviceType device_type,
|
37 |
+
ScalarType scalar_type) {
|
38 |
+
std::vector<TensorImpl*> unwrapped;
|
39 |
+
unwrapped.reserve(tensors.size());
|
40 |
+
for (const auto i : c10::irange(tensors.size())) {
|
41 |
+
const auto& expr = tensors[i];
|
42 |
+
if (expr.layout() != Layout::Strided) {
|
43 |
+
AT_ERROR(
|
44 |
+
"Expected dense tensor but got ",
|
45 |
+
expr.layout(),
|
46 |
+
" for sequence element ",
|
47 |
+
i,
|
48 |
+
" in sequence argument at position #",
|
49 |
+
pos,
|
50 |
+
" '",
|
51 |
+
name,
|
52 |
+
"'");
|
53 |
+
}
|
54 |
+
if (expr.device().type() != device_type) {
|
55 |
+
AT_ERROR(
|
56 |
+
"Expected object of device type ",
|
57 |
+
device_type,
|
58 |
+
" but got device type ",
|
59 |
+
expr.device().type(),
|
60 |
+
" for sequence element ",
|
61 |
+
i,
|
62 |
+
" in sequence argument at position #",
|
63 |
+
pos,
|
64 |
+
" '",
|
65 |
+
name,
|
66 |
+
"'");
|
67 |
+
}
|
68 |
+
if (expr.scalar_type() != scalar_type) {
|
69 |
+
AT_ERROR(
|
70 |
+
"Expected object of scalar type ",
|
71 |
+
scalar_type,
|
72 |
+
" but got scalar type ",
|
73 |
+
expr.scalar_type(),
|
74 |
+
" for sequence element ",
|
75 |
+
i,
|
76 |
+
" in sequence argument at position #",
|
77 |
+
pos,
|
78 |
+
" '",
|
79 |
+
name,
|
80 |
+
"'");
|
81 |
+
}
|
82 |
+
unwrapped.emplace_back(expr.unsafeGetTensorImpl());
|
83 |
+
}
|
84 |
+
return unwrapped;
|
85 |
+
}
|
86 |
+
|
87 |
+
template <size_t N>
|
88 |
+
std::array<int64_t, N> check_intlist(
|
89 |
+
ArrayRef<int64_t> list,
|
90 |
+
const char* name,
|
91 |
+
int pos) {
|
92 |
+
if (list.empty()) {
|
93 |
+
// TODO: is this necessary? We used to treat nullptr-vs-not in IntList
|
94 |
+
// differently with strides as a way of faking optional.
|
95 |
+
list = {};
|
96 |
+
}
|
97 |
+
auto res = std::array<int64_t, N>();
|
98 |
+
if (list.size() == 1 && N > 1) {
|
99 |
+
res.fill(list[0]);
|
100 |
+
return res;
|
101 |
+
}
|
102 |
+
if (list.size() != N) {
|
103 |
+
AT_ERROR(
|
104 |
+
"Expected a list of ",
|
105 |
+
N,
|
106 |
+
" ints but got ",
|
107 |
+
list.size(),
|
108 |
+
" for argument #",
|
109 |
+
pos,
|
110 |
+
" '",
|
111 |
+
name,
|
112 |
+
"'");
|
113 |
+
}
|
114 |
+
std::copy_n(list.begin(), N, res.begin());
|
115 |
+
return res;
|
116 |
+
}
|
117 |
+
|
118 |
+
using at::detail::check_size_nonnegative;
|
119 |
+
|
120 |
+
namespace detail {
|
121 |
+
|
122 |
+
template <typename T>
|
123 |
+
TORCH_API Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options);
|
124 |
+
|
125 |
+
template <typename T>
|
126 |
+
TORCH_API Tensor
|
127 |
+
tensor_backend(ArrayRef<T> values, const TensorOptions& options);
|
128 |
+
|
129 |
+
template <typename T>
|
130 |
+
TORCH_API Tensor
|
131 |
+
tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options);
|
132 |
+
|
133 |
+
template <typename T>
|
134 |
+
TORCH_API Tensor
|
135 |
+
tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options);
|
136 |
+
} // namespace detail
|
137 |
+
|
138 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/WrapDimUtilsMulti.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/WrapDimUtils.h>
|
4 |
+
#include <c10/core/TensorImpl.h>
|
5 |
+
#include <c10/util/irange.h>
|
6 |
+
#include <bitset>
|
7 |
+
#include <sstream>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
// This is in an extra file to work around strange interaction of
|
12 |
+
// bitset on Windows with operator overloading
|
13 |
+
|
14 |
+
constexpr size_t dim_bitset_size = 64;
|
15 |
+
|
16 |
+
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(
|
17 |
+
OptionalIntArrayRef opt_dims,
|
18 |
+
size_t ndims) {
|
19 |
+
TORCH_CHECK(
|
20 |
+
ndims <= dim_bitset_size,
|
21 |
+
"only tensors with up to ",
|
22 |
+
dim_bitset_size,
|
23 |
+
" dims are supported");
|
24 |
+
std::bitset<dim_bitset_size> seen;
|
25 |
+
if (opt_dims.has_value()) {
|
26 |
+
auto dims = opt_dims.value();
|
27 |
+
for (const auto i : c10::irange(dims.size())) {
|
28 |
+
size_t dim = maybe_wrap_dim(dims[i], static_cast<int64_t>(ndims));
|
29 |
+
TORCH_CHECK(
|
30 |
+
!seen[dim],
|
31 |
+
"dim ",
|
32 |
+
dim,
|
33 |
+
" appears multiple times in the list of dims");
|
34 |
+
seen[dim] = true;
|
35 |
+
}
|
36 |
+
} else {
|
37 |
+
for (size_t dim = 0; dim < ndims; dim++) {
|
38 |
+
seen[dim] = true;
|
39 |
+
}
|
40 |
+
}
|
41 |
+
return seen;
|
42 |
+
}
|
43 |
+
|
44 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDAGuardImpl.h
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceGuard.h>
|
4 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
5 |
+
#include <c10/core/impl/GPUTrace.h>
|
6 |
+
#include <c10/macros/Macros.h>
|
7 |
+
#include <c10/util/Exception.h>
|
8 |
+
|
9 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
10 |
+
#include <c10/cuda/CUDAException.h>
|
11 |
+
#include <c10/cuda/CUDAFunctions.h>
|
12 |
+
#include <c10/cuda/CUDAStream.h>
|
13 |
+
|
14 |
+
#include <cuda_runtime_api.h>
|
15 |
+
|
16 |
+
namespace c10 {
|
17 |
+
namespace cuda {
|
18 |
+
namespace impl {
|
19 |
+
|
20 |
+
struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
|
21 |
+
static constexpr DeviceType static_type = DeviceType::CUDA;
|
22 |
+
|
23 |
+
CUDAGuardImpl() = default;
|
24 |
+
explicit CUDAGuardImpl(DeviceType t) {
|
25 |
+
TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA);
|
26 |
+
}
|
27 |
+
DeviceType type() const override {
|
28 |
+
return DeviceType::CUDA;
|
29 |
+
}
|
30 |
+
Device exchangeDevice(Device d) const override {
|
31 |
+
TORCH_INTERNAL_ASSERT(d.is_cuda());
|
32 |
+
int old_device_index = c10::cuda::ExchangeDevice(d.index());
|
33 |
+
return Device(DeviceType::CUDA, old_device_index);
|
34 |
+
}
|
35 |
+
Device getDevice() const override {
|
36 |
+
int device;
|
37 |
+
C10_CUDA_CHECK(c10::cuda::GetDevice(&device));
|
38 |
+
return Device(DeviceType::CUDA, device);
|
39 |
+
}
|
40 |
+
c10::optional<Device> uncheckedGetDevice() const noexcept {
|
41 |
+
int device;
|
42 |
+
const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
|
43 |
+
C10_CUDA_CHECK_WARN(err);
|
44 |
+
if (err != cudaSuccess) {
|
45 |
+
return c10::nullopt;
|
46 |
+
}
|
47 |
+
return Device(DeviceType::CUDA, device);
|
48 |
+
}
|
49 |
+
void setDevice(Device d) const override {
|
50 |
+
TORCH_INTERNAL_ASSERT(d.is_cuda());
|
51 |
+
C10_CUDA_CHECK(c10::cuda::SetDevice(d.index()));
|
52 |
+
}
|
53 |
+
void uncheckedSetDevice(Device d) const noexcept override {
|
54 |
+
C10_CUDA_CHECK_WARN(c10::cuda::MaybeSetDevice(d.index()));
|
55 |
+
}
|
56 |
+
Stream getStream(Device d) const noexcept override {
|
57 |
+
return getCurrentCUDAStream(d.index()).unwrap();
|
58 |
+
}
|
59 |
+
Stream getDefaultStream(Device d) const override {
|
60 |
+
return getDefaultCUDAStream(d.index());
|
61 |
+
}
|
62 |
+
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
|
63 |
+
const override {
|
64 |
+
return getStreamFromPool(isHighPriority, d.index());
|
65 |
+
}
|
66 |
+
// NB: These do NOT set the current device
|
67 |
+
Stream exchangeStream(Stream s) const noexcept override {
|
68 |
+
CUDAStream cs(s);
|
69 |
+
auto old_stream = getCurrentCUDAStream(s.device().index());
|
70 |
+
setCurrentCUDAStream(cs);
|
71 |
+
return old_stream.unwrap();
|
72 |
+
}
|
73 |
+
DeviceIndex deviceCount() const noexcept override {
|
74 |
+
return device_count();
|
75 |
+
}
|
76 |
+
|
77 |
+
// Event-related functions
|
78 |
+
void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const {
|
79 |
+
// Maps PyTorch's Event::Flag to CUDA flag
|
80 |
+
auto cuda_flag = cudaEventDefault;
|
81 |
+
switch (flag) {
|
82 |
+
case EventFlag::PYTORCH_DEFAULT:
|
83 |
+
case EventFlag::CUDA_EVENT_DISABLE_TIMING:
|
84 |
+
cuda_flag = cudaEventDisableTiming;
|
85 |
+
break;
|
86 |
+
case EventFlag::BACKEND_DEFAULT:
|
87 |
+
case EventFlag::CUDA_EVENT_DEFAULT:
|
88 |
+
cuda_flag = cudaEventDefault;
|
89 |
+
break;
|
90 |
+
default:
|
91 |
+
TORCH_CHECK(false, "CUDA event received unknown flag");
|
92 |
+
}
|
93 |
+
|
94 |
+
C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag));
|
95 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
96 |
+
if (C10_UNLIKELY(interp)) {
|
97 |
+
(*interp)->trace_gpu_event_creation(
|
98 |
+
reinterpret_cast<uintptr_t>(cuda_event));
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
void destroyEvent(void* event, const DeviceIndex device_index)
|
103 |
+
const noexcept override {
|
104 |
+
if (!event)
|
105 |
+
return;
|
106 |
+
auto cuda_event = static_cast<cudaEvent_t>(event);
|
107 |
+
int orig_device;
|
108 |
+
C10_CUDA_CHECK_WARN(c10::cuda::GetDevice(&orig_device));
|
109 |
+
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(device_index));
|
110 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
111 |
+
if (C10_UNLIKELY(interp)) {
|
112 |
+
(*interp)->trace_gpu_event_deletion(
|
113 |
+
reinterpret_cast<uintptr_t>(cuda_event));
|
114 |
+
}
|
115 |
+
C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event));
|
116 |
+
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(orig_device));
|
117 |
+
}
|
118 |
+
|
119 |
+
void record(
|
120 |
+
void** event,
|
121 |
+
const Stream& stream,
|
122 |
+
const DeviceIndex device_index,
|
123 |
+
const EventFlag flag) const override {
|
124 |
+
TORCH_CHECK(
|
125 |
+
device_index == -1 || device_index == stream.device_index(),
|
126 |
+
"Event device index ",
|
127 |
+
device_index,
|
128 |
+
" does not match recording stream's device index ",
|
129 |
+
stream.device_index(),
|
130 |
+
".");
|
131 |
+
|
132 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(*event);
|
133 |
+
CUDAStream cuda_stream{stream};
|
134 |
+
|
135 |
+
// Moves to stream's device to record
|
136 |
+
const auto orig_device = getDevice();
|
137 |
+
setDevice(stream.device());
|
138 |
+
|
139 |
+
// Creates the event (lazily)
|
140 |
+
if (!cuda_event)
|
141 |
+
createEvent(&cuda_event, flag);
|
142 |
+
C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream));
|
143 |
+
// Makes the void* point to the (possibly just allocated) CUDA event
|
144 |
+
*event = cuda_event;
|
145 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
146 |
+
if (C10_UNLIKELY(interp)) {
|
147 |
+
(*interp)->trace_gpu_event_record(
|
148 |
+
reinterpret_cast<uintptr_t>(cuda_event),
|
149 |
+
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
|
150 |
+
}
|
151 |
+
|
152 |
+
// Resets device
|
153 |
+
setDevice(orig_device);
|
154 |
+
}
|
155 |
+
|
156 |
+
void block(void* event, const Stream& stream) const override {
|
157 |
+
if (!event)
|
158 |
+
return;
|
159 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
|
160 |
+
CUDAStream cuda_stream{stream};
|
161 |
+
const auto orig_device = getDevice();
|
162 |
+
setDevice(stream.device());
|
163 |
+
C10_CUDA_CHECK(cudaStreamWaitEvent(
|
164 |
+
cuda_stream,
|
165 |
+
cuda_event,
|
166 |
+
/*flags (must be zero)=*/0));
|
167 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
168 |
+
if (C10_UNLIKELY(interp)) {
|
169 |
+
(*interp)->trace_gpu_event_wait(
|
170 |
+
reinterpret_cast<uintptr_t>(cuda_event),
|
171 |
+
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
|
172 |
+
}
|
173 |
+
setDevice(orig_device);
|
174 |
+
}
|
175 |
+
|
176 |
+
// May be called from any device
|
177 |
+
bool queryEvent(void* event) const override {
|
178 |
+
if (!event)
|
179 |
+
return true;
|
180 |
+
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
|
181 |
+
const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event));
|
182 |
+
if (err != cudaErrorNotReady) {
|
183 |
+
C10_CUDA_CHECK(err);
|
184 |
+
} else {
|
185 |
+
// ignore and clear the error if not ready
|
186 |
+
(void)cudaGetLastError();
|
187 |
+
}
|
188 |
+
return (err == cudaSuccess);
|
189 |
+
}
|
190 |
+
|
191 |
+
// Stream-related functions
|
192 |
+
bool queryStream(const Stream& stream) const override {
|
193 |
+
CUDAStream cuda_stream{stream};
|
194 |
+
return cuda_stream.query();
|
195 |
+
}
|
196 |
+
|
197 |
+
void synchronizeStream(const Stream& stream) const override {
|
198 |
+
CUDAStream cuda_stream{stream};
|
199 |
+
cuda_stream.synchronize();
|
200 |
+
}
|
201 |
+
|
202 |
+
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
|
203 |
+
const override {
|
204 |
+
CUDAStream cuda_stream{stream};
|
205 |
+
CUDACachingAllocator::recordStream(data_ptr, cuda_stream);
|
206 |
+
}
|
207 |
+
};
|
208 |
+
|
209 |
+
} // namespace impl
|
210 |
+
} // namespace cuda
|
211 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/CUDATest.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/cuda/CUDAMacros.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
namespace cuda {
|
7 |
+
namespace impl {
|
8 |
+
|
9 |
+
C10_CUDA_API int c10_cuda_test();
|
10 |
+
|
11 |
+
}
|
12 |
+
} // namespace cuda
|
13 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/cuda/impl/cuda_cmake_macros.h
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// Automatically generated header file for the C10 CUDA library. Do not
|
4 |
+
// include this file directly. Instead, include c10/cuda/CUDAMacros.h
|
5 |
+
|
6 |
+
#define C10_CUDA_BUILD_SHARED_LIBS
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
|
2 |
+
//
|
3 |
+
// The LLVM Compiler Infrastructure
|
4 |
+
//
|
5 |
+
// This file is distributed under the University of Illinois Open Source
|
6 |
+
// License. See LICENSE.TXT for details.
|
7 |
+
//
|
8 |
+
//===----------------------------------------------------------------------===//
|
9 |
+
//
|
10 |
+
// This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
|
11 |
+
//
|
12 |
+
//===----------------------------------------------------------------------===//
|
13 |
+
|
14 |
+
// ATen: modified from llvm::AlignOf
|
15 |
+
// replaced LLVM_ALIGNAS with alignas
|
16 |
+
|
17 |
+
#pragma once
|
18 |
+
|
19 |
+
#include <cstddef>
|
20 |
+
|
21 |
+
namespace c10 {
|
22 |
+
|
23 |
+
/// \struct AlignedCharArray
|
24 |
+
/// \brief Helper for building an aligned character array type.
|
25 |
+
///
|
26 |
+
/// This template is used to explicitly build up a collection of aligned
|
27 |
+
/// character array types. We have to build these up using a macro and explicit
|
28 |
+
/// specialization to cope with MSVC (at least till 2015) where only an
|
29 |
+
/// integer literal can be used to specify an alignment constraint. Once built
|
30 |
+
/// up here, we can then begin to indirect between these using normal C++
|
31 |
+
/// template parameters.
|
32 |
+
|
33 |
+
// MSVC requires special handling here.
|
34 |
+
#ifndef _MSC_VER
|
35 |
+
|
36 |
+
template <size_t Alignment, size_t Size>
|
37 |
+
struct AlignedCharArray {
|
38 |
+
alignas(Alignment) char buffer[Size];
|
39 |
+
};
|
40 |
+
|
41 |
+
#else // _MSC_VER
|
42 |
+
|
43 |
+
/// \brief Create a type with an aligned char buffer.
|
44 |
+
template <size_t Alignment, size_t Size>
|
45 |
+
struct AlignedCharArray;
|
46 |
+
|
47 |
+
// We provide special variations of this template for the most common
|
48 |
+
// alignments because __declspec(align(...)) doesn't actually work when it is
|
49 |
+
// a member of a by-value function argument in MSVC, even if the alignment
|
50 |
+
// request is something reasonably like 8-byte or 16-byte. Note that we can't
|
51 |
+
// even include the declspec with the union that forces the alignment because
|
52 |
+
// MSVC warns on the existence of the declspec despite the union member forcing
|
53 |
+
// proper alignment.
|
54 |
+
|
55 |
+
template <size_t Size>
|
56 |
+
struct AlignedCharArray<1, Size> {
|
57 |
+
union {
|
58 |
+
char aligned;
|
59 |
+
char buffer[Size];
|
60 |
+
};
|
61 |
+
};
|
62 |
+
|
63 |
+
template <size_t Size>
|
64 |
+
struct AlignedCharArray<2, Size> {
|
65 |
+
union {
|
66 |
+
short aligned;
|
67 |
+
char buffer[Size];
|
68 |
+
};
|
69 |
+
};
|
70 |
+
|
71 |
+
template <size_t Size>
|
72 |
+
struct AlignedCharArray<4, Size> {
|
73 |
+
union {
|
74 |
+
int aligned;
|
75 |
+
char buffer[Size];
|
76 |
+
};
|
77 |
+
};
|
78 |
+
|
79 |
+
template <size_t Size>
|
80 |
+
struct AlignedCharArray<8, Size> {
|
81 |
+
union {
|
82 |
+
double aligned;
|
83 |
+
char buffer[Size];
|
84 |
+
};
|
85 |
+
};
|
86 |
+
|
87 |
+
// The rest of these are provided with a __declspec(align(...)) and we simply
|
88 |
+
// can't pass them by-value as function arguments on MSVC.
|
89 |
+
|
90 |
+
#define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
|
91 |
+
template <size_t Size> \
|
92 |
+
struct AlignedCharArray<x, Size> { \
|
93 |
+
__declspec(align(x)) char buffer[Size]; \
|
94 |
+
};
|
95 |
+
|
96 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
|
97 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
|
98 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
|
99 |
+
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
|
100 |
+
|
101 |
+
#undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
|
102 |
+
|
103 |
+
#endif // _MSC_VER
|
104 |
+
|
105 |
+
namespace detail {
|
106 |
+
template <
|
107 |
+
typename T1,
|
108 |
+
typename T2 = char,
|
109 |
+
typename T3 = char,
|
110 |
+
typename T4 = char,
|
111 |
+
typename T5 = char,
|
112 |
+
typename T6 = char,
|
113 |
+
typename T7 = char,
|
114 |
+
typename T8 = char,
|
115 |
+
typename T9 = char,
|
116 |
+
typename T10 = char>
|
117 |
+
class AlignerImpl {
|
118 |
+
T1 t1;
|
119 |
+
T2 t2;
|
120 |
+
T3 t3;
|
121 |
+
T4 t4;
|
122 |
+
T5 t5;
|
123 |
+
T6 t6;
|
124 |
+
T7 t7;
|
125 |
+
T8 t8;
|
126 |
+
T9 t9;
|
127 |
+
T10 t10;
|
128 |
+
|
129 |
+
public:
|
130 |
+
AlignerImpl() = delete;
|
131 |
+
};
|
132 |
+
|
133 |
+
template <
|
134 |
+
typename T1,
|
135 |
+
typename T2 = char,
|
136 |
+
typename T3 = char,
|
137 |
+
typename T4 = char,
|
138 |
+
typename T5 = char,
|
139 |
+
typename T6 = char,
|
140 |
+
typename T7 = char,
|
141 |
+
typename T8 = char,
|
142 |
+
typename T9 = char,
|
143 |
+
typename T10 = char>
|
144 |
+
union SizerImpl {
|
145 |
+
char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
|
146 |
+
arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
|
147 |
+
arr9[sizeof(T9)], arr10[sizeof(T10)];
|
148 |
+
};
|
149 |
+
} // end namespace detail
|
150 |
+
|
151 |
+
/// \brief This union template exposes a suitably aligned and sized character
|
152 |
+
/// array member which can hold elements of any of up to ten types.
|
153 |
+
///
|
154 |
+
/// These types may be arrays, structs, or any other types. The goal is to
|
155 |
+
/// expose a char array buffer member which can be used as suitable storage for
|
156 |
+
/// a placement new of any of these types. Support for more than ten types can
|
157 |
+
/// be added at the cost of more boilerplate.
|
158 |
+
template <
|
159 |
+
typename T1,
|
160 |
+
typename T2 = char,
|
161 |
+
typename T3 = char,
|
162 |
+
typename T4 = char,
|
163 |
+
typename T5 = char,
|
164 |
+
typename T6 = char,
|
165 |
+
typename T7 = char,
|
166 |
+
typename T8 = char,
|
167 |
+
typename T9 = char,
|
168 |
+
typename T10 = char>
|
169 |
+
struct AlignedCharArrayUnion
|
170 |
+
: AlignedCharArray<
|
171 |
+
alignof(detail::AlignerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>),
|
172 |
+
sizeof(::c10::detail::
|
173 |
+
SizerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>)> {};
|
174 |
+
} // end namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright 2023-present Facebook. All Rights Reserved.
|
2 |
+
|
3 |
+
#pragma once
|
4 |
+
|
5 |
+
#include <chrono>
|
6 |
+
#include <cstddef>
|
7 |
+
#include <cstdint>
|
8 |
+
#include <list>
|
9 |
+
#include <string>
|
10 |
+
#include <unordered_map>
|
11 |
+
#include <vector>
|
12 |
+
|
13 |
+
#include <c10/macros/Macros.h>
|
14 |
+
#include <c10/util/Optional.h>
|
15 |
+
#include <c10/util/hash.h>
|
16 |
+
|
17 |
+
#ifndef _WIN32
|
18 |
+
#include <ctime>
|
19 |
+
#endif
|
20 |
+
#if defined(C10_IOS) && defined(C10_MOBILE)
|
21 |
+
#include <sys/time.h> // for gettimeofday()
|
22 |
+
#endif
|
23 |
+
|
24 |
+
#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
|
25 |
+
#define C10_RDTSC
|
26 |
+
#if defined(_MSC_VER)
|
27 |
+
#include <intrin.h>
|
28 |
+
#elif defined(__CUDACC__) || defined(__HIPCC__)
|
29 |
+
#undef C10_RDTSC
|
30 |
+
#elif defined(__clang__)
|
31 |
+
// `__rdtsc` is available by default.
|
32 |
+
// NB: This has to be first, because Clang will also define `__GNUC__`
|
33 |
+
#elif defined(__GNUC__)
|
34 |
+
#include <x86intrin.h>
|
35 |
+
#else
|
36 |
+
#undef C10_RDTSC
|
37 |
+
#endif
|
38 |
+
#endif
|
39 |
+
|
40 |
+
namespace c10 {
|
41 |
+
|
42 |
+
using time_t = int64_t;
|
43 |
+
using steady_clock_t = std::conditional<
|
44 |
+
std::chrono::high_resolution_clock::is_steady,
|
45 |
+
std::chrono::high_resolution_clock,
|
46 |
+
std::chrono::steady_clock>::type;
|
47 |
+
|
48 |
+
inline time_t getTimeSinceEpoch() {
|
49 |
+
auto now = std::chrono::system_clock::now().time_since_epoch();
|
50 |
+
return std::chrono::duration_cast<std::chrono::nanoseconds>(now).count();
|
51 |
+
}
|
52 |
+
|
53 |
+
inline time_t getTime(bool allow_monotonic = false) {
|
54 |
+
#if defined(C10_IOS) && defined(C10_MOBILE)
|
55 |
+
// clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS
|
56 |
+
// can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime
|
57 |
+
// is implemented or not
|
58 |
+
struct timeval now;
|
59 |
+
gettimeofday(&now, NULL);
|
60 |
+
return static_cast<time_t>(now.tv_sec) * 1000000000 +
|
61 |
+
static_cast<time_t>(now.tv_usec) * 1000;
|
62 |
+
#elif defined(_WIN32) || defined(__MACH__)
|
63 |
+
return std::chrono::duration_cast<std::chrono::nanoseconds>(
|
64 |
+
steady_clock_t::now().time_since_epoch())
|
65 |
+
.count();
|
66 |
+
#else
|
67 |
+
// clock_gettime is *much* faster than std::chrono implementation on Linux
|
68 |
+
struct timespec t {};
|
69 |
+
auto mode = CLOCK_REALTIME;
|
70 |
+
if (allow_monotonic) {
|
71 |
+
mode = CLOCK_MONOTONIC;
|
72 |
+
}
|
73 |
+
clock_gettime(mode, &t);
|
74 |
+
return static_cast<time_t>(t.tv_sec) * 1000000000 +
|
75 |
+
static_cast<time_t>(t.tv_nsec);
|
76 |
+
#endif
|
77 |
+
}
|
78 |
+
|
79 |
+
// We often do not need to capture true wall times. If a fast mechanism such
|
80 |
+
// as TSC is available we can use that instead and convert back to epoch time
|
81 |
+
// during post processing. This greatly reduce the clock's contribution to
|
82 |
+
// profiling.
|
83 |
+
// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/
|
84 |
+
// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io
|
85 |
+
// TODO: We should use
|
86 |
+
// `https://github.com/google/benchmark/blob/main/src/cycleclock.h`
|
87 |
+
inline auto getApproximateTime() {
|
88 |
+
#if defined(C10_RDTSC)
|
89 |
+
return static_cast<uint64_t>(__rdtsc());
|
90 |
+
#else
|
91 |
+
return getTime();
|
92 |
+
#endif
|
93 |
+
}
|
94 |
+
|
95 |
+
using approx_time_t = decltype(getApproximateTime());
|
96 |
+
static_assert(
|
97 |
+
std::is_same<approx_time_t, int64_t>::value ||
|
98 |
+
std::is_same<approx_time_t, uint64_t>::value,
|
99 |
+
"Expected either int64_t (`getTime`) or uint64_t (some TSC reads).");
|
100 |
+
|
101 |
+
// Convert `getCount` results to Nanoseconds since unix epoch.
|
102 |
+
class C10_API ApproximateClockToUnixTimeConverter final {
|
103 |
+
public:
|
104 |
+
ApproximateClockToUnixTimeConverter();
|
105 |
+
std::function<time_t(approx_time_t)> makeConverter();
|
106 |
+
|
107 |
+
struct UnixAndApproximateTimePair {
|
108 |
+
time_t t_;
|
109 |
+
approx_time_t approx_t_;
|
110 |
+
};
|
111 |
+
static UnixAndApproximateTimePair measurePair();
|
112 |
+
|
113 |
+
private:
|
114 |
+
static constexpr size_t replicates = 1001;
|
115 |
+
using time_pairs = std::array<UnixAndApproximateTimePair, replicates>;
|
116 |
+
time_pairs measurePairs();
|
117 |
+
|
118 |
+
time_pairs start_times_;
|
119 |
+
};
|
120 |
+
|
121 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h
ADDED
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/util/bit_cast.h>
|
5 |
+
|
6 |
+
#include <limits>
|
7 |
+
|
8 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
9 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
10 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
14 |
+
#if defined(CL_SYCL_LANGUAGE_VERSION)
|
15 |
+
#include <CL/sycl.hpp> // for SYCL 1.2.1
|
16 |
+
#else
|
17 |
+
#include <sycl/sycl.hpp> // for SYCL 2020
|
18 |
+
#endif
|
19 |
+
#include <ext/oneapi/bfloat16.hpp>
|
20 |
+
#endif
|
21 |
+
|
22 |
+
namespace c10 {
|
23 |
+
|
24 |
+
/// Constructors
|
25 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
|
26 |
+
:
|
27 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \
|
28 |
+
__CUDA_ARCH__ >= 800
|
29 |
+
x(__bfloat16_as_ushort(__float2bfloat16(value)))
|
30 |
+
#elif defined(__SYCL_DEVICE_ONLY__) && \
|
31 |
+
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
32 |
+
x(c10::bit_cast<uint16_t>(sycl::ext::oneapi::bfloat16(value)))
|
33 |
+
#else
|
34 |
+
// RNE by default
|
35 |
+
x(detail::round_to_nearest_even(value))
|
36 |
+
#endif
|
37 |
+
{
|
38 |
+
}
|
39 |
+
|
40 |
+
/// Implicit conversions
|
41 |
+
inline C10_HOST_DEVICE BFloat16::operator float() const {
|
42 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
43 |
+
return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
|
44 |
+
#elif defined(__SYCL_DEVICE_ONLY__) && \
|
45 |
+
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
46 |
+
return float(*reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x));
|
47 |
+
#else
|
48 |
+
return detail::f32_from_bits(x);
|
49 |
+
#endif
|
50 |
+
}
|
51 |
+
|
52 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
53 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) {
|
54 |
+
x = *reinterpret_cast<const unsigned short*>(&value);
|
55 |
+
}
|
56 |
+
inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const {
|
57 |
+
return *reinterpret_cast<const __nv_bfloat16*>(&x);
|
58 |
+
}
|
59 |
+
#endif
|
60 |
+
|
61 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
62 |
+
inline C10_HOST_DEVICE BFloat16::BFloat16(
|
63 |
+
const sycl::ext::oneapi::bfloat16& value) {
|
64 |
+
x = *reinterpret_cast<const unsigned short*>(&value);
|
65 |
+
}
|
66 |
+
inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const {
|
67 |
+
return *reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x);
|
68 |
+
}
|
69 |
+
#endif
|
70 |
+
|
71 |
+
// CUDA intrinsics
|
72 |
+
|
73 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
74 |
+
inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) {
|
75 |
+
#if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
|
76 |
+
return __ldg(reinterpret_cast<const __nv_bfloat16*>(ptr));
|
77 |
+
#else
|
78 |
+
return *ptr;
|
79 |
+
#endif
|
80 |
+
}
|
81 |
+
#endif
|
82 |
+
|
83 |
+
/// Arithmetic
|
84 |
+
|
85 |
+
inline C10_HOST_DEVICE BFloat16
|
86 |
+
operator+(const BFloat16& a, const BFloat16& b) {
|
87 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
88 |
+
}
|
89 |
+
|
90 |
+
inline C10_HOST_DEVICE BFloat16
|
91 |
+
operator-(const BFloat16& a, const BFloat16& b) {
|
92 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
93 |
+
}
|
94 |
+
|
95 |
+
inline C10_HOST_DEVICE BFloat16
|
96 |
+
operator*(const BFloat16& a, const BFloat16& b) {
|
97 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
98 |
+
}
|
99 |
+
|
100 |
+
inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b)
|
101 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
102 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
103 |
+
}
|
104 |
+
|
105 |
+
inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) {
|
106 |
+
return -static_cast<float>(a);
|
107 |
+
}
|
108 |
+
|
109 |
+
inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) {
|
110 |
+
a = a + b;
|
111 |
+
return a;
|
112 |
+
}
|
113 |
+
|
114 |
+
inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) {
|
115 |
+
a = a - b;
|
116 |
+
return a;
|
117 |
+
}
|
118 |
+
|
119 |
+
inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) {
|
120 |
+
a = a * b;
|
121 |
+
return a;
|
122 |
+
}
|
123 |
+
|
124 |
+
inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) {
|
125 |
+
a = a / b;
|
126 |
+
return a;
|
127 |
+
}
|
128 |
+
|
129 |
+
inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) {
|
130 |
+
a.x = a.x | b.x;
|
131 |
+
return a;
|
132 |
+
}
|
133 |
+
|
134 |
+
inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) {
|
135 |
+
a.x = a.x ^ b.x;
|
136 |
+
return a;
|
137 |
+
}
|
138 |
+
|
139 |
+
inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) {
|
140 |
+
a.x = a.x & b.x;
|
141 |
+
return a;
|
142 |
+
}
|
143 |
+
|
144 |
+
/// Arithmetic with floats
|
145 |
+
|
146 |
+
inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) {
|
147 |
+
return static_cast<float>(a) + b;
|
148 |
+
}
|
149 |
+
inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) {
|
150 |
+
return static_cast<float>(a) - b;
|
151 |
+
}
|
152 |
+
inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) {
|
153 |
+
return static_cast<float>(a) * b;
|
154 |
+
}
|
155 |
+
inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) {
|
156 |
+
return static_cast<float>(a) / b;
|
157 |
+
}
|
158 |
+
|
159 |
+
inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) {
|
160 |
+
return a + static_cast<float>(b);
|
161 |
+
}
|
162 |
+
inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) {
|
163 |
+
return a - static_cast<float>(b);
|
164 |
+
}
|
165 |
+
inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) {
|
166 |
+
return a * static_cast<float>(b);
|
167 |
+
}
|
168 |
+
inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) {
|
169 |
+
return a / static_cast<float>(b);
|
170 |
+
}
|
171 |
+
|
172 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) {
|
173 |
+
return a += static_cast<float>(b);
|
174 |
+
}
|
175 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) {
|
176 |
+
return a -= static_cast<float>(b);
|
177 |
+
}
|
178 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) {
|
179 |
+
return a *= static_cast<float>(b);
|
180 |
+
}
|
181 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) {
|
182 |
+
return a /= static_cast<float>(b);
|
183 |
+
}
|
184 |
+
|
185 |
+
/// Arithmetic with doubles
|
186 |
+
|
187 |
+
inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) {
|
188 |
+
return static_cast<double>(a) + b;
|
189 |
+
}
|
190 |
+
inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) {
|
191 |
+
return static_cast<double>(a) - b;
|
192 |
+
}
|
193 |
+
inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) {
|
194 |
+
return static_cast<double>(a) * b;
|
195 |
+
}
|
196 |
+
inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) {
|
197 |
+
return static_cast<double>(a) / b;
|
198 |
+
}
|
199 |
+
|
200 |
+
inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) {
|
201 |
+
return a + static_cast<double>(b);
|
202 |
+
}
|
203 |
+
inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) {
|
204 |
+
return a - static_cast<double>(b);
|
205 |
+
}
|
206 |
+
inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) {
|
207 |
+
return a * static_cast<double>(b);
|
208 |
+
}
|
209 |
+
inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) {
|
210 |
+
return a / static_cast<double>(b);
|
211 |
+
}
|
212 |
+
|
213 |
+
/// Arithmetic with ints
|
214 |
+
|
215 |
+
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) {
|
216 |
+
return a + static_cast<BFloat16>(b);
|
217 |
+
}
|
218 |
+
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) {
|
219 |
+
return a - static_cast<BFloat16>(b);
|
220 |
+
}
|
221 |
+
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) {
|
222 |
+
return a * static_cast<BFloat16>(b);
|
223 |
+
}
|
224 |
+
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) {
|
225 |
+
return a / static_cast<BFloat16>(b);
|
226 |
+
}
|
227 |
+
|
228 |
+
inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) {
|
229 |
+
return static_cast<BFloat16>(a) + b;
|
230 |
+
}
|
231 |
+
inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) {
|
232 |
+
return static_cast<BFloat16>(a) - b;
|
233 |
+
}
|
234 |
+
inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) {
|
235 |
+
return static_cast<BFloat16>(a) * b;
|
236 |
+
}
|
237 |
+
inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) {
|
238 |
+
return static_cast<BFloat16>(a) / b;
|
239 |
+
}
|
240 |
+
|
241 |
+
//// Arithmetic with int64_t
|
242 |
+
|
243 |
+
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) {
|
244 |
+
return a + static_cast<BFloat16>(b);
|
245 |
+
}
|
246 |
+
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) {
|
247 |
+
return a - static_cast<BFloat16>(b);
|
248 |
+
}
|
249 |
+
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) {
|
250 |
+
return a * static_cast<BFloat16>(b);
|
251 |
+
}
|
252 |
+
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) {
|
253 |
+
return a / static_cast<BFloat16>(b);
|
254 |
+
}
|
255 |
+
|
256 |
+
inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) {
|
257 |
+
return static_cast<BFloat16>(a) + b;
|
258 |
+
}
|
259 |
+
inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) {
|
260 |
+
return static_cast<BFloat16>(a) - b;
|
261 |
+
}
|
262 |
+
inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) {
|
263 |
+
return static_cast<BFloat16>(a) * b;
|
264 |
+
}
|
265 |
+
inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) {
|
266 |
+
return static_cast<BFloat16>(a) / b;
|
267 |
+
}
|
268 |
+
|
269 |
+
// Overloading < and > operators, because std::max and std::min use them.
|
270 |
+
|
271 |
+
inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) {
|
272 |
+
return float(lhs) > float(rhs);
|
273 |
+
}
|
274 |
+
|
275 |
+
inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) {
|
276 |
+
return float(lhs) < float(rhs);
|
277 |
+
}
|
278 |
+
|
279 |
+
} // namespace c10
|
280 |
+
|
281 |
+
namespace std {
|
282 |
+
|
283 |
+
template <>
|
284 |
+
class numeric_limits<c10::BFloat16> {
|
285 |
+
public:
|
286 |
+
static constexpr bool is_signed = true;
|
287 |
+
static constexpr bool is_specialized = true;
|
288 |
+
static constexpr bool is_integer = false;
|
289 |
+
static constexpr bool is_exact = false;
|
290 |
+
static constexpr bool has_infinity = true;
|
291 |
+
static constexpr bool has_quiet_NaN = true;
|
292 |
+
static constexpr bool has_signaling_NaN = true;
|
293 |
+
static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
|
294 |
+
static constexpr auto has_denorm_loss =
|
295 |
+
numeric_limits<float>::has_denorm_loss;
|
296 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
297 |
+
static constexpr bool is_iec559 = false;
|
298 |
+
static constexpr bool is_bounded = true;
|
299 |
+
static constexpr bool is_modulo = false;
|
300 |
+
static constexpr int digits = 8;
|
301 |
+
static constexpr int digits10 = 2;
|
302 |
+
static constexpr int max_digits10 = 4;
|
303 |
+
static constexpr int radix = 2;
|
304 |
+
static constexpr int min_exponent = -125;
|
305 |
+
static constexpr int min_exponent10 = -37;
|
306 |
+
static constexpr int max_exponent = 128;
|
307 |
+
static constexpr int max_exponent10 = 38;
|
308 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
309 |
+
static constexpr auto tinyness_before =
|
310 |
+
numeric_limits<float>::tinyness_before;
|
311 |
+
|
312 |
+
static constexpr c10::BFloat16 min() {
|
313 |
+
return c10::BFloat16(0x0080, c10::BFloat16::from_bits());
|
314 |
+
}
|
315 |
+
static constexpr c10::BFloat16 lowest() {
|
316 |
+
return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits());
|
317 |
+
}
|
318 |
+
static constexpr c10::BFloat16 max() {
|
319 |
+
return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits());
|
320 |
+
}
|
321 |
+
static constexpr c10::BFloat16 epsilon() {
|
322 |
+
return c10::BFloat16(0x3C00, c10::BFloat16::from_bits());
|
323 |
+
}
|
324 |
+
static constexpr c10::BFloat16 round_error() {
|
325 |
+
return c10::BFloat16(0x3F00, c10::BFloat16::from_bits());
|
326 |
+
}
|
327 |
+
static constexpr c10::BFloat16 infinity() {
|
328 |
+
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
|
329 |
+
}
|
330 |
+
static constexpr c10::BFloat16 quiet_NaN() {
|
331 |
+
return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits());
|
332 |
+
}
|
333 |
+
static constexpr c10::BFloat16 signaling_NaN() {
|
334 |
+
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
|
335 |
+
}
|
336 |
+
static constexpr c10::BFloat16 denorm_min() {
|
337 |
+
return c10::BFloat16(0x0001, c10::BFloat16::from_bits());
|
338 |
+
}
|
339 |
+
};
|
340 |
+
|
341 |
+
} // namespace std
|
342 |
+
|
343 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <atomic>
|
4 |
+
#include <mutex>
|
5 |
+
#include <utility>
|
6 |
+
|
7 |
+
#include <c10/macros/Macros.h>
|
8 |
+
#include <c10/util/C++17.h>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
// custom c10 call_once implementation to avoid the deadlock in std::call_once.
|
13 |
+
// The implementation here is a simplified version from folly and likely much
|
14 |
+
// much higher memory footprint.
|
15 |
+
template <typename Flag, typename F, typename... Args>
|
16 |
+
inline void call_once(Flag& flag, F&& f, Args&&... args) {
|
17 |
+
if (C10_LIKELY(flag.test_once())) {
|
18 |
+
return;
|
19 |
+
}
|
20 |
+
flag.call_once_slow(std::forward<F>(f), std::forward<Args>(args)...);
|
21 |
+
}
|
22 |
+
|
23 |
+
class once_flag {
|
24 |
+
public:
|
25 |
+
#ifndef _WIN32
|
26 |
+
// running into build error on MSVC. Can't seem to get a repro locally so I'm
|
27 |
+
// just avoiding constexpr
|
28 |
+
//
|
29 |
+
// C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error:
|
30 |
+
// defaulted default constructor cannot be constexpr because the
|
31 |
+
// corresponding implicitly declared default constructor would not be
|
32 |
+
// constexpr 1 error detected in the compilation of
|
33 |
+
// "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu".
|
34 |
+
constexpr
|
35 |
+
#endif
|
36 |
+
once_flag() noexcept = default;
|
37 |
+
once_flag(const once_flag&) = delete;
|
38 |
+
once_flag& operator=(const once_flag&) = delete;
|
39 |
+
|
40 |
+
private:
|
41 |
+
template <typename Flag, typename F, typename... Args>
|
42 |
+
friend void call_once(Flag& flag, F&& f, Args&&... args);
|
43 |
+
|
44 |
+
template <typename F, typename... Args>
|
45 |
+
void call_once_slow(F&& f, Args&&... args) {
|
46 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
47 |
+
if (init_.load(std::memory_order_relaxed)) {
|
48 |
+
return;
|
49 |
+
}
|
50 |
+
c10::guts::invoke(f, std::forward<Args>(args)...);
|
51 |
+
init_.store(true, std::memory_order_release);
|
52 |
+
}
|
53 |
+
|
54 |
+
bool test_once() {
|
55 |
+
return init_.load(std::memory_order_acquire);
|
56 |
+
}
|
57 |
+
|
58 |
+
void reset_once() {
|
59 |
+
init_.store(false, std::memory_order_release);
|
60 |
+
}
|
61 |
+
|
62 |
+
private:
|
63 |
+
std::mutex mutex_;
|
64 |
+
std::atomic<bool> init_{false};
|
65 |
+
};
|
66 |
+
|
67 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/DimVector.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/SymInt.h>
|
4 |
+
#include <c10/core/impl/SizesAndStrides.h>
|
5 |
+
#include <c10/util/SmallVector.h>
|
6 |
+
#include <cstdint>
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
|
10 |
+
constexpr size_t kDimVectorStaticSize = C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
|
11 |
+
|
12 |
+
/// A container for sizes or strides
|
13 |
+
using DimVector = SmallVector<int64_t, kDimVectorStaticSize>;
|
14 |
+
using SymDimVector = SmallVector<c10::SymInt, kDimVectorStaticSize>;
|
15 |
+
|
16 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Exception.h
ADDED
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_EXCEPTION_H_
|
2 |
+
#define C10_UTIL_EXCEPTION_H_
|
3 |
+
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
#include <c10/util/StringUtil.h>
|
6 |
+
|
7 |
+
#include <cstddef>
|
8 |
+
#include <exception>
|
9 |
+
#include <string>
|
10 |
+
#include <variant>
|
11 |
+
#include <vector>
|
12 |
+
|
13 |
+
#if defined(_MSC_VER) && _MSC_VER <= 1900
|
14 |
+
#define __func__ __FUNCTION__
|
15 |
+
#endif
|
16 |
+
|
17 |
+
namespace c10 {
|
18 |
+
|
19 |
+
/// The primary ATen error class.
|
20 |
+
/// Provides a complete error message with source location information via
|
21 |
+
/// `what()`, and a more concise message via `what_without_backtrace()`.
|
22 |
+
/// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead.
|
23 |
+
///
|
24 |
+
/// NB: c10::Error is handled specially by the default torch to suppress the
|
25 |
+
/// backtrace, see torch/csrc/Exceptions.h
|
26 |
+
class C10_API Error : public std::exception {
|
27 |
+
// The actual error message.
|
28 |
+
std::string msg_;
|
29 |
+
|
30 |
+
// Context for the message (in order of decreasing specificity). Context will
|
31 |
+
// be automatically formatted appropriately, so it is not necessary to add
|
32 |
+
// extra leading/trailing newlines to strings inside this vector
|
33 |
+
std::vector<std::string> context_;
|
34 |
+
|
35 |
+
// The C++ backtrace at the point when this exception was raised. This
|
36 |
+
// may be empty if there is no valid backtrace. (We don't use optional
|
37 |
+
// here to reduce the dependencies this file has.)
|
38 |
+
std::string backtrace_;
|
39 |
+
|
40 |
+
// These two are derived fields from msg_stack_ and backtrace_, but we need
|
41 |
+
// fields for the strings so that we can return a const char* (as the
|
42 |
+
// signature of std::exception requires). Currently, the invariant
|
43 |
+
// is that these fields are ALWAYS populated consistently with respect
|
44 |
+
// to msg_stack_ and backtrace_.
|
45 |
+
std::string what_;
|
46 |
+
std::string what_without_backtrace_;
|
47 |
+
|
48 |
+
// This is a little debugging trick: you can stash a relevant pointer
|
49 |
+
// in caller, and then when you catch the exception, you can compare
|
50 |
+
// against pointers you have on hand to get more information about
|
51 |
+
// where the exception came from. In Caffe2, this is used to figure
|
52 |
+
// out which operator raised an exception.
|
53 |
+
const void* caller_;
|
54 |
+
|
55 |
+
public:
|
56 |
+
// PyTorch-style Error constructor. NB: the implementation of this
|
57 |
+
// is actually in Logging.cpp
|
58 |
+
Error(SourceLocation source_location, std::string msg);
|
59 |
+
|
60 |
+
// Caffe2-style error message
|
61 |
+
Error(
|
62 |
+
const char* file,
|
63 |
+
const uint32_t line,
|
64 |
+
const char* condition,
|
65 |
+
const std::string& msg,
|
66 |
+
const std::string& backtrace,
|
67 |
+
const void* caller = nullptr);
|
68 |
+
|
69 |
+
// Base constructor
|
70 |
+
Error(std::string msg, std::string backtrace, const void* caller = nullptr);
|
71 |
+
|
72 |
+
// Add some new context to the message stack. The last added context
|
73 |
+
// will be formatted at the end of the context list upon printing.
|
74 |
+
// WARNING: This method is O(n) in the size of the stack, so don't go
|
75 |
+
// wild adding a ridiculous amount of context to error messages.
|
76 |
+
void add_context(std::string msg);
|
77 |
+
|
78 |
+
const std::string& msg() const {
|
79 |
+
return msg_;
|
80 |
+
}
|
81 |
+
|
82 |
+
const std::vector<std::string>& context() const {
|
83 |
+
return context_;
|
84 |
+
}
|
85 |
+
|
86 |
+
const std::string& backtrace() const {
|
87 |
+
return backtrace_;
|
88 |
+
}
|
89 |
+
|
90 |
+
/// Returns the complete error message, including the source location.
|
91 |
+
/// The returned pointer is invalidated if you call add_context() on
|
92 |
+
/// this object.
|
93 |
+
const char* what() const noexcept override {
|
94 |
+
return what_.c_str();
|
95 |
+
}
|
96 |
+
|
97 |
+
const void* caller() const noexcept {
|
98 |
+
return caller_;
|
99 |
+
}
|
100 |
+
|
101 |
+
/// Returns only the error message string, without source location.
|
102 |
+
/// The returned pointer is invalidated if you call add_context() on
|
103 |
+
/// this object.
|
104 |
+
virtual const char* what_without_backtrace() const noexcept {
|
105 |
+
return what_without_backtrace_.c_str();
|
106 |
+
}
|
107 |
+
|
108 |
+
private:
|
109 |
+
void refresh_what();
|
110 |
+
std::string compute_what(bool include_backtrace) const;
|
111 |
+
};
|
112 |
+
|
113 |
+
class C10_API Warning {
|
114 |
+
public:
|
115 |
+
class C10_API UserWarning {};
|
116 |
+
class C10_API DeprecationWarning {};
|
117 |
+
|
118 |
+
using warning_variant_t = std::variant<UserWarning, DeprecationWarning>;
|
119 |
+
|
120 |
+
Warning(
|
121 |
+
warning_variant_t type,
|
122 |
+
const SourceLocation& source_location,
|
123 |
+
std::string msg,
|
124 |
+
bool verbatim);
|
125 |
+
|
126 |
+
Warning(
|
127 |
+
warning_variant_t type,
|
128 |
+
SourceLocation source_location,
|
129 |
+
const char* msg,
|
130 |
+
bool verbatim);
|
131 |
+
|
132 |
+
Warning(
|
133 |
+
warning_variant_t type,
|
134 |
+
SourceLocation source_location,
|
135 |
+
::c10::detail::CompileTimeEmptyString msg,
|
136 |
+
bool verbatim);
|
137 |
+
|
138 |
+
// Getters for members
|
139 |
+
warning_variant_t type() const;
|
140 |
+
const SourceLocation& source_location() const;
|
141 |
+
const std::string& msg() const;
|
142 |
+
bool verbatim() const;
|
143 |
+
|
144 |
+
private:
|
145 |
+
// The type of warning
|
146 |
+
warning_variant_t type_;
|
147 |
+
|
148 |
+
// Where the warning happened.
|
149 |
+
SourceLocation source_location_;
|
150 |
+
|
151 |
+
// The actual warning message.
|
152 |
+
std::string msg_;
|
153 |
+
|
154 |
+
// See note: [Verbatim Warnings]
|
155 |
+
bool verbatim_;
|
156 |
+
};
|
157 |
+
|
158 |
+
using UserWarning = Warning::UserWarning;
|
159 |
+
using DeprecationWarning = Warning::DeprecationWarning;
|
160 |
+
|
161 |
+
// Issue a warning with a given message. Dispatched to the current
|
162 |
+
// warning handler.
|
163 |
+
void C10_API warn(const Warning& warning);
|
164 |
+
|
165 |
+
class C10_API WarningHandler {
|
166 |
+
public:
|
167 |
+
virtual ~WarningHandler() = default;
|
168 |
+
/// The default warning handler. Prints the message to stderr.
|
169 |
+
virtual void process(const Warning& warning);
|
170 |
+
};
|
171 |
+
|
172 |
+
namespace WarningUtils {
|
173 |
+
|
174 |
+
// Note: [Verbatim Warnings]
|
175 |
+
// Warnings originating in C++ code can appear out-of-place to Python users:
|
176 |
+
// a user runs a line in Python, but the warning references a line in C++.
|
177 |
+
// Some parts of PyTorch, like the JIT, are cognizant of this mismatch
|
178 |
+
// and take care to map warnings back to the user's program, but most
|
179 |
+
// of PyTorch simply throws a context-free warning. To allow warning
|
180 |
+
// handlers to add context where appropriate, warn takes the
|
181 |
+
// "verbatim" flag. When this is false a warning handler might append
|
182 |
+
// the C++ warning to a Python warning message that relates the warning
|
183 |
+
// back to the user's program. Callers who have already accounted for
|
184 |
+
// context in their warnings should set verbatim to true so their warnings
|
185 |
+
// appear without modification.
|
186 |
+
|
187 |
+
/// Sets the global warning handler. This is not thread-safe, so it should
|
188 |
+
/// generally be called once during initialization or while holding the GIL
|
189 |
+
/// for programs that use python.
|
190 |
+
/// User is responsible for keeping the WarningHandler alive until
|
191 |
+
/// it is not needed.
|
192 |
+
C10_API void set_warning_handler(WarningHandler* handler) noexcept(true);
|
193 |
+
/// Gets the global warning handler.
|
194 |
+
C10_API WarningHandler* get_warning_handler() noexcept(true);
|
195 |
+
|
196 |
+
class C10_API WarningHandlerGuard {
|
197 |
+
WarningHandler* prev_handler_;
|
198 |
+
|
199 |
+
public:
|
200 |
+
WarningHandlerGuard(WarningHandler* new_handler)
|
201 |
+
: prev_handler_(c10::WarningUtils::get_warning_handler()) {
|
202 |
+
c10::WarningUtils::set_warning_handler(new_handler);
|
203 |
+
}
|
204 |
+
~WarningHandlerGuard() {
|
205 |
+
c10::WarningUtils::set_warning_handler(prev_handler_);
|
206 |
+
}
|
207 |
+
};
|
208 |
+
|
209 |
+
/// The TORCH_WARN_ONCE macro is difficult to test for. Use
|
210 |
+
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
|
211 |
+
/// tested for more easily.
|
212 |
+
C10_API void set_warnAlways(bool) noexcept(true);
|
213 |
+
C10_API bool get_warnAlways() noexcept(true);
|
214 |
+
|
215 |
+
// A RAII guard that sets warn_always (not thread-local) on
|
216 |
+
// construction, and sets it back to the original value upon destruction.
|
217 |
+
struct C10_API WarnAlways {
|
218 |
+
public:
|
219 |
+
explicit WarnAlways(bool setting = true);
|
220 |
+
~WarnAlways();
|
221 |
+
|
222 |
+
private:
|
223 |
+
bool prev_setting;
|
224 |
+
};
|
225 |
+
|
226 |
+
} // namespace WarningUtils
|
227 |
+
|
228 |
+
// Like Error, but we always report the C++ backtrace, instead of only
|
229 |
+
// reporting when TORCH_SHOW_CPP_STACKTRACES
|
230 |
+
class C10_API ErrorAlwaysShowCppStacktrace : public Error {
|
231 |
+
using Error::Error;
|
232 |
+
const char* what_without_backtrace() const noexcept override {
|
233 |
+
return what();
|
234 |
+
}
|
235 |
+
};
|
236 |
+
|
237 |
+
// Used in ATen for out-of-bound indices that can reasonably only be detected
|
238 |
+
// lazily inside a kernel (See: advanced indexing). These turn into
|
239 |
+
// IndexError when they cross to Python.
|
240 |
+
class C10_API IndexError : public Error {
|
241 |
+
using Error::Error;
|
242 |
+
};
|
243 |
+
|
244 |
+
// Used in ATen for invalid values. These turn into
|
245 |
+
// ValueError when they cross to Python.
|
246 |
+
class C10_API ValueError : public Error {
|
247 |
+
using Error::Error;
|
248 |
+
};
|
249 |
+
|
250 |
+
// Used in ATen for invalid types. These turn into
|
251 |
+
// TypeError when they cross to Python.
|
252 |
+
class C10_API TypeError : public Error {
|
253 |
+
using Error::Error;
|
254 |
+
};
|
255 |
+
|
256 |
+
// Used in ATen for functionality that is not implemented. These turn into
|
257 |
+
// NotImplementedError when they cross to Python.
|
258 |
+
class C10_API NotImplementedError : public Error {
|
259 |
+
using Error::Error;
|
260 |
+
};
|
261 |
+
|
262 |
+
// Used in ATen for non finite indices. These turn into
|
263 |
+
// ExitException when they cross to Python.
|
264 |
+
class C10_API EnforceFiniteError : public Error {
|
265 |
+
using Error::Error;
|
266 |
+
};
|
267 |
+
|
268 |
+
// Used in Onnxifi backend lowering. These turn into
|
269 |
+
// ExitException when they cross to Python.
|
270 |
+
class C10_API OnnxfiBackendSystemError : public Error {
|
271 |
+
using Error::Error;
|
272 |
+
};
|
273 |
+
|
274 |
+
// Used for numerical errors from the linalg module. These
|
275 |
+
// turn into LinAlgError when they cross into Python.
|
276 |
+
class C10_API LinAlgError : public Error {
|
277 |
+
using Error::Error;
|
278 |
+
};
|
279 |
+
|
280 |
+
class C10_API OutOfMemoryError : public Error {
|
281 |
+
using Error::Error;
|
282 |
+
};
|
283 |
+
|
284 |
+
// Base error type for all distributed errors.
|
285 |
+
// These turn into DistError when they cross into Python.
|
286 |
+
class C10_API DistError : public Error {
|
287 |
+
using Error::Error;
|
288 |
+
};
|
289 |
+
|
290 |
+
// Used for collective communication library errors from the distributed module.
|
291 |
+
// These turn into DistBackendError when they cross into Python.
|
292 |
+
class C10_API DistBackendError : public DistError {
|
293 |
+
using DistError::DistError;
|
294 |
+
};
|
295 |
+
|
296 |
+
// Used for errors originating from the store.
|
297 |
+
// These turn into DistStoreError when they cross into Python.
|
298 |
+
class C10_API DistStoreError : public DistError {
|
299 |
+
using DistError::DistError;
|
300 |
+
};
|
301 |
+
|
302 |
+
// Used for errors originating from the TCP/IP stack and not from collective
|
303 |
+
// libraries. These turn into DistNetworkError when they cross into Python.
|
304 |
+
class C10_API DistNetworkError : public DistError {
|
305 |
+
using DistError::DistError;
|
306 |
+
};
|
307 |
+
|
308 |
+
// A utility function to return an exception std::string by prepending its
|
309 |
+
// exception type before its what() content
|
310 |
+
C10_API std::string GetExceptionString(const std::exception& e);
|
311 |
+
|
312 |
+
} // namespace c10
|
313 |
+
|
314 |
+
// Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK
|
315 |
+
//
|
316 |
+
// Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a
|
317 |
+
// int32_t), which is different from the definition of `SourceLocation` that
|
318 |
+
// requires unsigned int (a.k.a uint32_t) and may cause a compile error with the
|
319 |
+
// message: error C2397: conversion from 'long' to 'uint32_t' requires a
|
320 |
+
// narrowing conversion Here the static cast is used to pass the build. if this
|
321 |
+
// is used inside a lambda the __func__ macro expands to operator(), which isn't
|
322 |
+
// very useful, but hard to fix in a macro so suppressing the warning.
|
323 |
+
#define C10_THROW_ERROR(err_type, msg) \
|
324 |
+
throw ::c10::err_type( \
|
325 |
+
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
|
326 |
+
|
327 |
+
#define C10_BUILD_ERROR(err_type, msg) \
|
328 |
+
::c10::err_type({__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
|
329 |
+
|
330 |
+
// Private helper macro for workaround MSVC misexpansion of nested macro
|
331 |
+
// invocations involving __VA_ARGS__. See
|
332 |
+
// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
|
333 |
+
#define C10_EXPAND_MSVC_WORKAROUND(x) x
|
334 |
+
|
335 |
+
// On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases
|
336 |
+
// where the unlikely expression may be a constant, use this macro to ensure
|
337 |
+
// return statement analysis keeps working (at the cost of not getting the
|
338 |
+
// likely/unlikely annotation on nvcc).
|
339 |
+
// https://github.com/pytorch/pytorch/issues/21418
|
340 |
+
//
|
341 |
+
// Currently, this is only used in the error reporting macros below. If you
|
342 |
+
// want to use it more generally, move me to Macros.h
|
343 |
+
//
|
344 |
+
// TODO: Brian Vaughan observed that we might be able to get this to work on
|
345 |
+
// nvcc by writing some sort of C++ overload that distinguishes constexpr inputs
|
346 |
+
// from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY
|
347 |
+
// in nvcc is causing us perf problems, this is not yet implemented, but this
|
348 |
+
// might be an interesting piece of C++ code for an intrepid bootcamper to
|
349 |
+
// write.
|
350 |
+
#if defined(__CUDACC__)
|
351 |
+
#define C10_UNLIKELY_OR_CONST(e) e
|
352 |
+
#else
|
353 |
+
#define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e)
|
354 |
+
#endif
|
355 |
+
|
356 |
+
// ----------------------------------------------------------------------------
|
357 |
+
// Error reporting macros
|
358 |
+
// ----------------------------------------------------------------------------
|
359 |
+
|
360 |
+
#ifdef STRIP_ERROR_MESSAGES
|
361 |
+
#define TORCH_RETHROW(e, ...) throw
|
362 |
+
#else
|
363 |
+
#define TORCH_RETHROW(e, ...) \
|
364 |
+
do { \
|
365 |
+
e.add_context(::c10::str(__VA_ARGS__)); \
|
366 |
+
throw; \
|
367 |
+
} while (false)
|
368 |
+
#endif
|
369 |
+
|
370 |
+
// A utility macro to provide assert()-like functionality; that is, enforcement
|
371 |
+
// of internal invariants in code. It supports an arbitrary number of extra
|
372 |
+
// arguments (evaluated only on failure), which will be printed in the assert
|
373 |
+
// failure message using operator<< (this is useful to print some variables
|
374 |
+
// which may be useful for debugging.)
|
375 |
+
//
|
376 |
+
// Usage:
|
377 |
+
// TORCH_INTERNAL_ASSERT(should_be_true);
|
378 |
+
// TORCH_INTERNAL_ASSERT(x == 0, "x = ", x);
|
379 |
+
//
|
380 |
+
// Assuming no bugs in PyTorch, the conditions tested by this macro should
|
381 |
+
// always be true; e.g., it should be possible to disable all of these
|
382 |
+
// conditions without changing observable user behavior. If you would like to
|
383 |
+
// do error reporting for user input, please use TORCH_CHECK instead.
|
384 |
+
//
|
385 |
+
// NOTE: It is SAFE to use this macro in production code; on failure, this
|
386 |
+
// simply raises an exception, it does NOT unceremoniously quit the process
|
387 |
+
// (unlike assert()).
|
388 |
+
//
|
389 |
+
#ifdef STRIP_ERROR_MESSAGES
|
390 |
+
#define TORCH_INTERNAL_ASSERT(cond, ...) \
|
391 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
392 |
+
::c10::detail::torchCheckFail( \
|
393 |
+
__func__, \
|
394 |
+
__FILE__, \
|
395 |
+
static_cast<uint32_t>(__LINE__), \
|
396 |
+
#cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \
|
397 |
+
}
|
398 |
+
#else
|
399 |
+
// It would be nice if we could build a combined string literal out of
|
400 |
+
// the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal
|
401 |
+
// as the first argument, but there doesn't seem to be any good way to
|
402 |
+
// do that while still supporting having a first argument that isn't a
|
403 |
+
// string literal.
|
404 |
+
#define TORCH_INTERNAL_ASSERT(cond, ...) \
|
405 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
406 |
+
::c10::detail::torchInternalAssertFail( \
|
407 |
+
__func__, \
|
408 |
+
__FILE__, \
|
409 |
+
static_cast<uint32_t>(__LINE__), \
|
410 |
+
#cond \
|
411 |
+
" INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \
|
412 |
+
__LINE__) ", please report a bug to PyTorch. ", \
|
413 |
+
c10::str(__VA_ARGS__)); \
|
414 |
+
}
|
415 |
+
#endif
|
416 |
+
|
417 |
+
// A utility macro to make it easier to test for error conditions from user
|
418 |
+
// input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra
|
419 |
+
// arguments (evaluated only on failure), which will be printed in the error
|
420 |
+
// message using operator<< (e.g., you can pass any object which has
|
421 |
+
// operator<< defined. Most objects in PyTorch have these definitions!)
|
422 |
+
//
|
423 |
+
// Usage:
|
424 |
+
// TORCH_CHECK(should_be_true); // A default error message will be provided
|
425 |
+
// // in this case; but we recommend writing an
|
426 |
+
// // explicit error message, as it is more
|
427 |
+
// // user friendly.
|
428 |
+
// TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x);
|
429 |
+
//
|
430 |
+
// On failure, this macro will raise an exception. If this exception propagates
|
431 |
+
// to Python, it will convert into a Python RuntimeError.
|
432 |
+
//
|
433 |
+
// NOTE: It is SAFE to use this macro in production code; on failure, this
|
434 |
+
// simply raises an exception, it does NOT unceremoniously quit the process
|
435 |
+
// (unlike CHECK() from glog.)
|
436 |
+
//
|
437 |
+
#define TORCH_CHECK_WITH(error_t, cond, ...) \
|
438 |
+
TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__)
|
439 |
+
|
440 |
+
#ifdef STRIP_ERROR_MESSAGES
|
441 |
+
#define TORCH_CHECK_MSG(cond, type, ...) \
|
442 |
+
(#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__))
|
443 |
+
#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
|
444 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
445 |
+
C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
|
446 |
+
}
|
447 |
+
#else
|
448 |
+
namespace c10 {
|
449 |
+
namespace detail {
|
450 |
+
template <typename... Args>
|
451 |
+
decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) {
|
452 |
+
return ::c10::str(args...);
|
453 |
+
}
|
454 |
+
inline C10_API const char* torchCheckMsgImpl(const char* msg) {
|
455 |
+
return msg;
|
456 |
+
}
|
457 |
+
// If there is just 1 user-provided C-string argument, use it.
|
458 |
+
inline C10_API const char* torchCheckMsgImpl(
|
459 |
+
const char* /*msg*/,
|
460 |
+
const char* args) {
|
461 |
+
return args;
|
462 |
+
}
|
463 |
+
} // namespace detail
|
464 |
+
} // namespace c10
|
465 |
+
|
466 |
+
#define TORCH_CHECK_MSG(cond, type, ...) \
|
467 |
+
(::c10::detail::torchCheckMsgImpl( \
|
468 |
+
"Expected " #cond \
|
469 |
+
" to be true, but got false. " \
|
470 |
+
"(Could this error message be improved? If so, " \
|
471 |
+
"please report an enhancement request to PyTorch.)", \
|
472 |
+
##__VA_ARGS__))
|
473 |
+
#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
|
474 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
475 |
+
C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
|
476 |
+
}
|
477 |
+
#endif
|
478 |
+
|
479 |
+
namespace c10 {
|
480 |
+
namespace detail {
|
481 |
+
|
482 |
+
[[noreturn]] C10_API void torchCheckFail(
|
483 |
+
const char* func,
|
484 |
+
const char* file,
|
485 |
+
uint32_t line,
|
486 |
+
const std::string& msg);
|
487 |
+
[[noreturn]] C10_API void torchCheckFail(
|
488 |
+
const char* func,
|
489 |
+
const char* file,
|
490 |
+
uint32_t line,
|
491 |
+
const char* msg);
|
492 |
+
|
493 |
+
// The c10::str() call that creates userMsg can have 1 of 3 return
|
494 |
+
// types depending on the number and types of arguments passed to
|
495 |
+
// TORCH_INTERNAL_ASSERT. 0 arguments will get a
|
496 |
+
// CompileTimeEmptyString, 1 const char * will be passed straight
|
497 |
+
// through, and anything else will get converted to std::string.
|
498 |
+
[[noreturn]] C10_API void torchInternalAssertFail(
|
499 |
+
const char* func,
|
500 |
+
const char* file,
|
501 |
+
uint32_t line,
|
502 |
+
const char* condMsg,
|
503 |
+
const char* userMsg);
|
504 |
+
[[noreturn]] inline C10_API void torchInternalAssertFail(
|
505 |
+
const char* func,
|
506 |
+
const char* file,
|
507 |
+
uint32_t line,
|
508 |
+
const char* condMsg,
|
509 |
+
::c10::detail::CompileTimeEmptyString /*userMsg*/) {
|
510 |
+
torchCheckFail(func, file, line, condMsg);
|
511 |
+
}
|
512 |
+
[[noreturn]] C10_API void torchInternalAssertFail(
|
513 |
+
const char* func,
|
514 |
+
const char* file,
|
515 |
+
uint32_t line,
|
516 |
+
const char* condMsg,
|
517 |
+
const std::string& userMsg);
|
518 |
+
|
519 |
+
} // namespace detail
|
520 |
+
} // namespace c10
|
521 |
+
|
522 |
+
#ifdef STRIP_ERROR_MESSAGES
|
523 |
+
#define TORCH_CHECK(cond, ...) \
|
524 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
525 |
+
::c10::detail::torchCheckFail( \
|
526 |
+
__func__, \
|
527 |
+
__FILE__, \
|
528 |
+
static_cast<uint32_t>(__LINE__), \
|
529 |
+
TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \
|
530 |
+
}
|
531 |
+
#else
|
532 |
+
#define TORCH_CHECK(cond, ...) \
|
533 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
534 |
+
::c10::detail::torchCheckFail( \
|
535 |
+
__func__, \
|
536 |
+
__FILE__, \
|
537 |
+
static_cast<uint32_t>(__LINE__), \
|
538 |
+
TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \
|
539 |
+
}
|
540 |
+
#endif
|
541 |
+
|
542 |
+
// An utility macro that does what `TORCH_CHECK` does if compiled in the host
|
543 |
+
// code, otherwise does nothing. Supposed to be used in the code shared between
|
544 |
+
// host and device code as an alternative for `TORCH_CHECK`.
|
545 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
546 |
+
#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...)
|
547 |
+
#else
|
548 |
+
#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__)
|
549 |
+
#endif
|
550 |
+
|
551 |
+
// Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug
|
552 |
+
// build, and does nothing in release build. It is appropriate to use
|
553 |
+
// in situations where you want to add an assert to a hotpath, but it is
|
554 |
+
// too expensive to run this assert on production builds.
|
555 |
+
#ifdef NDEBUG
|
556 |
+
// Optimized version - generates no code.
|
557 |
+
#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
|
558 |
+
while (false) \
|
559 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
|
560 |
+
#else
|
561 |
+
#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
|
562 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
|
563 |
+
#endif
|
564 |
+
|
565 |
+
// TODO: We're going to get a lot of similar looking string literals
|
566 |
+
// this way; check if this actually affects binary size.
|
567 |
+
|
568 |
+
// Like TORCH_CHECK, but raises LinAlgError instead of Error.
|
569 |
+
#define TORCH_CHECK_LINALG(cond, ...) \
|
570 |
+
TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__)
|
571 |
+
|
572 |
+
// Like TORCH_CHECK, but raises IndexErrors instead of Errors.
|
573 |
+
#define TORCH_CHECK_INDEX(cond, ...) \
|
574 |
+
TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__)
|
575 |
+
|
576 |
+
// Like TORCH_CHECK, but raises ValueErrors instead of Errors.
|
577 |
+
#define TORCH_CHECK_VALUE(cond, ...) \
|
578 |
+
TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__)
|
579 |
+
|
580 |
+
// Like TORCH_CHECK, but raises TypeErrors instead of Errors.
|
581 |
+
#define TORCH_CHECK_TYPE(cond, ...) \
|
582 |
+
TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__)
|
583 |
+
|
584 |
+
// Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors.
|
585 |
+
#define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \
|
586 |
+
TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__)
|
587 |
+
|
588 |
+
#define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \
|
589 |
+
TORCH_CHECK_WITH_MSG( \
|
590 |
+
ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__)
|
591 |
+
|
592 |
+
#ifdef STRIP_ERROR_MESSAGES
|
593 |
+
#define WARNING_MESSAGE_STRING(...) \
|
594 |
+
::c10::detail::CompileTimeEmptyString {}
|
595 |
+
#else
|
596 |
+
#define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__)
|
597 |
+
#endif
|
598 |
+
|
599 |
+
// Report a warning to the user. Accepts an arbitrary number of extra
|
600 |
+
// arguments which are concatenated into the warning message using operator<<
|
601 |
+
//
|
602 |
+
#ifdef DISABLE_WARN
|
603 |
+
#define _TORCH_WARN_WITH(...) ((void)0);
|
604 |
+
#else
|
605 |
+
#define _TORCH_WARN_WITH(warning_t, ...) \
|
606 |
+
::c10::warn(::c10::Warning( \
|
607 |
+
warning_t(), \
|
608 |
+
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
|
609 |
+
WARNING_MESSAGE_STRING(__VA_ARGS__), \
|
610 |
+
false));
|
611 |
+
#endif
|
612 |
+
|
613 |
+
#define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__);
|
614 |
+
|
615 |
+
#define TORCH_WARN_DEPRECATION(...) \
|
616 |
+
_TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__);
|
617 |
+
|
618 |
+
// Report a warning to the user only once. Accepts an arbitrary number of extra
|
619 |
+
// arguments which are concatenated into the warning message using operator<<
|
620 |
+
//
|
621 |
+
#define _TORCH_WARN_ONCE(...) \
|
622 |
+
C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \
|
623 |
+
[&] { \
|
624 |
+
TORCH_WARN(__VA_ARGS__); \
|
625 |
+
return true; \
|
626 |
+
}()
|
627 |
+
|
628 |
+
#ifdef DISABLE_WARN
|
629 |
+
#define TORCH_WARN_ONCE(...) ((void)0);
|
630 |
+
#else
|
631 |
+
#define TORCH_WARN_ONCE(...) \
|
632 |
+
if (::c10::WarningUtils::get_warnAlways()) { \
|
633 |
+
TORCH_WARN(__VA_ARGS__); \
|
634 |
+
} else { \
|
635 |
+
_TORCH_WARN_ONCE(__VA_ARGS__); \
|
636 |
+
}
|
637 |
+
#endif
|
638 |
+
|
639 |
+
// Report an error with a specific argument
|
640 |
+
// NOTE: using the argument name in TORCH_CHECK's message is preferred
|
641 |
+
#define TORCH_CHECK_ARG(cond, argN, ...) \
|
642 |
+
TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__)
|
643 |
+
|
644 |
+
// ----------------------------------------------------------------------------
|
645 |
+
// Deprecated macros
|
646 |
+
// ----------------------------------------------------------------------------
|
647 |
+
|
648 |
+
namespace c10 {
|
649 |
+
namespace detail {
|
650 |
+
|
651 |
+
/*
|
652 |
+
// Deprecation disabled until we fix sites in our codebase
|
653 |
+
C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
|
654 |
+
instead.")
|
655 |
+
*/
|
656 |
+
inline void deprecated_AT_ERROR() {}
|
657 |
+
|
658 |
+
/*
|
659 |
+
// Deprecation disabled until we fix sites in our codebase
|
660 |
+
C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an
|
661 |
+
internal invariant failure, use " \
|
662 |
+
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
663 |
+
error checking, use " \ "TORCH_CHECK. See
|
664 |
+
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
665 |
+
*/
|
666 |
+
inline void deprecated_AT_ASSERT() {}
|
667 |
+
|
668 |
+
/*
|
669 |
+
// Deprecation disabled until we fix sites in our codebase
|
670 |
+
C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an
|
671 |
+
internal invariant failure, use " \
|
672 |
+
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
673 |
+
error checking, use " \ "TORCH_CHECK. See
|
674 |
+
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
675 |
+
*/
|
676 |
+
inline void deprecated_AT_ASSERTM() {}
|
677 |
+
|
678 |
+
} // namespace detail
|
679 |
+
} // namespace c10
|
680 |
+
|
681 |
+
// Deprecated alias; this alias was deprecated because people kept mistakenly
|
682 |
+
// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK
|
683 |
+
// instead. See https://github.com/pytorch/pytorch/issues/20287 for more
|
684 |
+
// details.
|
685 |
+
#define AT_ASSERT(...) \
|
686 |
+
do { \
|
687 |
+
::c10::detail::deprecated_AT_ASSERT(); \
|
688 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \
|
689 |
+
} while (false)
|
690 |
+
|
691 |
+
// Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro
|
692 |
+
// supports both 0-ary and variadic calls, so having a separate
|
693 |
+
// message-accepting macro is not necessary.
|
694 |
+
//
|
695 |
+
// NB: we MUST include cond explicitly here, as MSVC will miscompile the macro
|
696 |
+
// expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround
|
697 |
+
// can be seen at
|
698 |
+
// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
|
699 |
+
#define AT_ASSERTM(cond, ...) \
|
700 |
+
do { \
|
701 |
+
::c10::detail::deprecated_AT_ASSERTM(); \
|
702 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \
|
703 |
+
} while (false)
|
704 |
+
|
705 |
+
// Deprecated alias; this alias was deprecated because it represents extra API
|
706 |
+
// surface that makes it hard for people to understand what macro to use.
|
707 |
+
// Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to
|
708 |
+
// unconditionally fail at a line of code.
|
709 |
+
#define AT_ERROR(...) \
|
710 |
+
do { \
|
711 |
+
::c10::detail::deprecated_AT_ERROR(); \
|
712 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
|
713 |
+
} while (false)
|
714 |
+
|
715 |
+
#endif // C10_UTIL_EXCEPTION_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwned.h
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/in_place.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
|
7 |
+
// See example implementation in TensorBase.h and TensorBody.h.
|
8 |
+
// Synopsis:
|
9 |
+
//
|
10 |
+
// repr_type -- type to use to store an owned T in ExclusivelyOwned.
|
11 |
+
//
|
12 |
+
// pointer_type -- pointer-esque type to return from
|
13 |
+
// ExclusivelyOwned's get() and operator*() methods.
|
14 |
+
//
|
15 |
+
// const_pointer_type -- similar to pointer_type, used for the const methods.
|
16 |
+
//
|
17 |
+
// static repr_type nullRepr() -- return a null instance of repr_type.
|
18 |
+
//
|
19 |
+
// template <class... Args>
|
20 |
+
// static repr_type createInPlace(Args&&... args) -- used by the in-place
|
21 |
+
// ExclusivelyOwned constructor.
|
22 |
+
//
|
23 |
+
// static repr_type moveToRepr(T&& x) -- move the given x into an
|
24 |
+
// instance of repr_type. used by the ExclusivelyOwned(T&&)
|
25 |
+
// constructor.
|
26 |
+
//
|
27 |
+
// static void destroyOwned(repr_type x) -- free memory for a
|
28 |
+
// known-exclusively-owned instance of x. Replaces calling repr_type's
|
29 |
+
// destructor. Being able to implement this more efficiently than
|
30 |
+
// repr_type's destructor is the main reason to use ExclusivelyOwned
|
31 |
+
// for a type.
|
32 |
+
//
|
33 |
+
// static T take(repr_type&) -- move out of the given repr_type into an owned T.
|
34 |
+
//
|
35 |
+
// static pointer_type getImpl(const repr_type&) -- return a pointer
|
36 |
+
// to the given repr_type. May take repr_type by value if that is more
|
37 |
+
// efficient.
|
38 |
+
template <typename T>
|
39 |
+
struct ExclusivelyOwnedTraits;
|
40 |
+
|
41 |
+
/// ExclusivelyOwned is a smart-pointer-like wrapper around an
|
42 |
+
/// exclusively-owned instance of some type T that normally has
|
43 |
+
/// mandatory reference counting (currently just Tensor). If you have
|
44 |
+
/// an isolated piece of code that knows that it has sole ownership of
|
45 |
+
/// an object of one of these types (i.e., because you created it
|
46 |
+
/// directly or using a factory function) and that object will not
|
47 |
+
/// escape from that isolated piece of code, then moving the object
|
48 |
+
/// into an ExclusivelyOwned will avoid an atomic reference count
|
49 |
+
/// decrement at destruction time.
|
50 |
+
///
|
51 |
+
/// If you directly create the Tensor in the first
|
52 |
+
/// place, you can use the in_place constructor of ExclusivelyOwned to
|
53 |
+
/// additionally avoid doing any stores to initialize the refcount &
|
54 |
+
/// weakcount.
|
55 |
+
template <typename T>
|
56 |
+
class ExclusivelyOwned {
|
57 |
+
using EOT = ExclusivelyOwnedTraits<T>;
|
58 |
+
union {
|
59 |
+
char dummy_;
|
60 |
+
typename ExclusivelyOwnedTraits<T>::repr_type repr_;
|
61 |
+
};
|
62 |
+
|
63 |
+
public:
|
64 |
+
ExclusivelyOwned() : repr_(EOT::nullRepr()) {}
|
65 |
+
|
66 |
+
explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {}
|
67 |
+
|
68 |
+
template <class... Args>
|
69 |
+
explicit ExclusivelyOwned(in_place_t, Args&&... args)
|
70 |
+
: repr_(EOT::createInPlace(std::forward<Args>(args)...)) {}
|
71 |
+
|
72 |
+
ExclusivelyOwned(const ExclusivelyOwned&) = delete;
|
73 |
+
|
74 |
+
ExclusivelyOwned(ExclusivelyOwned&& rhs) noexcept
|
75 |
+
: repr_(std::move(rhs.repr_)) {
|
76 |
+
rhs.repr_ = EOT::nullRepr();
|
77 |
+
}
|
78 |
+
|
79 |
+
ExclusivelyOwned& operator=(const ExclusivelyOwned&) = delete;
|
80 |
+
|
81 |
+
ExclusivelyOwned& operator=(ExclusivelyOwned&& rhs) noexcept {
|
82 |
+
EOT::destroyOwned(repr_);
|
83 |
+
repr_ = std::move(rhs.repr_);
|
84 |
+
rhs.repr_ = EOT::nullRepr();
|
85 |
+
return *this;
|
86 |
+
}
|
87 |
+
|
88 |
+
ExclusivelyOwned& operator=(T&& rhs) noexcept {
|
89 |
+
EOT::destroyOwned(repr_);
|
90 |
+
repr_ = EOT::moveToRepr(std::move(rhs));
|
91 |
+
return *this;
|
92 |
+
}
|
93 |
+
|
94 |
+
~ExclusivelyOwned() {
|
95 |
+
EOT::destroyOwned(repr_);
|
96 |
+
// Don't bother to call the destructor of repr_, since we already
|
97 |
+
// did specialized destruction for the exclusively-owned case in
|
98 |
+
// destroyOwned!
|
99 |
+
}
|
100 |
+
|
101 |
+
// We don't provide this because it would require us to be able to
|
102 |
+
// differentiate an owned-but-empty T from a lack of T. This is
|
103 |
+
// particularly problematic for Tensor, which wants to use an
|
104 |
+
// undefined Tensor as its null state.
|
105 |
+
explicit operator bool() const noexcept = delete;
|
106 |
+
|
107 |
+
operator T() && {
|
108 |
+
return take();
|
109 |
+
}
|
110 |
+
|
111 |
+
// NOTE: the equivalent operation on MaybeOwned is a moving
|
112 |
+
// operator*. For ExclusivelyOwned, take() and operator*() may well
|
113 |
+
// have different return types, so they are different functions.
|
114 |
+
T take() && {
|
115 |
+
return EOT::take(repr_);
|
116 |
+
}
|
117 |
+
|
118 |
+
typename EOT::const_pointer_type operator->() const {
|
119 |
+
return get();
|
120 |
+
}
|
121 |
+
|
122 |
+
typename EOT::const_pointer_type get() const {
|
123 |
+
return EOT::getImpl(repr_);
|
124 |
+
}
|
125 |
+
|
126 |
+
typename EOT::pointer_type operator->() {
|
127 |
+
return get();
|
128 |
+
}
|
129 |
+
|
130 |
+
typename EOT::pointer_type get() {
|
131 |
+
return EOT::getImpl(repr_);
|
132 |
+
}
|
133 |
+
|
134 |
+
std::remove_pointer_t<typename EOT::const_pointer_type>& operator*() const {
|
135 |
+
return *get();
|
136 |
+
}
|
137 |
+
|
138 |
+
std::remove_pointer_t<typename EOT::pointer_type>& operator*() {
|
139 |
+
return *get();
|
140 |
+
}
|
141 |
+
};
|
142 |
+
|
143 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/TensorImpl.h>
|
4 |
+
|
5 |
+
#include <utility>
|
6 |
+
|
7 |
+
namespace c10 {
|
8 |
+
// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and
|
9 |
+
// at::TensorBase.
|
10 |
+
template <typename TensorType>
|
11 |
+
struct ExclusivelyOwnedTensorTraits {
|
12 |
+
using repr_type = TensorType;
|
13 |
+
using pointer_type = TensorType*;
|
14 |
+
using const_pointer_type = const TensorType*;
|
15 |
+
|
16 |
+
static repr_type nullRepr() {
|
17 |
+
return TensorType();
|
18 |
+
}
|
19 |
+
|
20 |
+
template <class... Args>
|
21 |
+
static repr_type createInPlace(Args&&... args) {
|
22 |
+
return TensorType(std::forward<Args>(args)...);
|
23 |
+
}
|
24 |
+
|
25 |
+
static repr_type moveToRepr(TensorType&& x) {
|
26 |
+
return std::move(x);
|
27 |
+
}
|
28 |
+
|
29 |
+
static void destroyOwned(TensorType& x) {
|
30 |
+
TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl();
|
31 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
32 |
+
toDestroy != nullptr, "Tensor somehow got null TensorImpl?");
|
33 |
+
// May be 0 because UndefinedTensorImpl doesn't get its refcount
|
34 |
+
// incremented.
|
35 |
+
const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton();
|
36 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
37 |
+
toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined),
|
38 |
+
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
|
39 |
+
isUndefined,
|
40 |
+
" and refcount ",
|
41 |
+
toDestroy->refcount_,
|
42 |
+
", expected 1 or, if isUndefined, 0!");
|
43 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
44 |
+
toDestroy->weakcount_ == 1 ||
|
45 |
+
(toDestroy->weakcount_ == 0 &&
|
46 |
+
toDestroy == UndefinedTensorImpl::singleton()),
|
47 |
+
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
|
48 |
+
isUndefined,
|
49 |
+
" and weakcount ",
|
50 |
+
toDestroy->weakcount_,
|
51 |
+
", expected 1 or, if isUndefined, 0!");
|
52 |
+
if (!isUndefined) {
|
53 |
+
#ifndef NDEBUG
|
54 |
+
// Needed to pass the debug assertions in ~intrusive_ptr_target.
|
55 |
+
toDestroy->refcount_ = 0;
|
56 |
+
toDestroy->weakcount_ = 0;
|
57 |
+
#endif
|
58 |
+
delete toDestroy;
|
59 |
+
}
|
60 |
+
}
|
61 |
+
|
62 |
+
static TensorType take(TensorType& x) {
|
63 |
+
return std::move(x);
|
64 |
+
}
|
65 |
+
|
66 |
+
static pointer_type getImpl(repr_type& x) {
|
67 |
+
return &x;
|
68 |
+
}
|
69 |
+
|
70 |
+
static const_pointer_type getImpl(const repr_type& x) {
|
71 |
+
return &x;
|
72 |
+
}
|
73 |
+
};
|
74 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_FBCODEMAPS_H_
|
2 |
+
#define C10_UTIL_FBCODEMAPS_H_
|
3 |
+
|
4 |
+
// Map typedefs so that we can use folly's F14 maps in fbcode without
|
5 |
+
// taking a folly dependency.
|
6 |
+
|
7 |
+
#ifdef FBCODE_CAFFE2
|
8 |
+
#include <folly/container/F14Map.h>
|
9 |
+
#include <folly/container/F14Set.h>
|
10 |
+
#else
|
11 |
+
#include <unordered_map>
|
12 |
+
#include <unordered_set>
|
13 |
+
#endif
|
14 |
+
|
15 |
+
namespace c10 {
|
16 |
+
#ifdef FBCODE_CAFFE2
|
17 |
+
template <typename Key, typename Value>
|
18 |
+
using FastMap = folly::F14FastMap<Key, Value>;
|
19 |
+
template <typename Key>
|
20 |
+
using FastSet = folly::F14FastSet<Key>;
|
21 |
+
#else
|
22 |
+
template <typename Key, typename Value>
|
23 |
+
using FastMap = std::unordered_map<Key, Value>;
|
24 |
+
template <typename Key>
|
25 |
+
using FastSet = std::unordered_set<Key>;
|
26 |
+
#endif
|
27 |
+
} // namespace c10
|
28 |
+
|
29 |
+
#endif // C10_UTIL_FBCODEMAPS_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <cstring>
|
5 |
+
#include <limits>
|
6 |
+
|
7 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
8 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
9 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
10 |
+
#endif
|
11 |
+
|
12 |
+
namespace c10 {
|
13 |
+
|
14 |
+
/// Constructors
|
15 |
+
|
16 |
+
inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value)
|
17 |
+
: x(detail::fp8e4m3fn_from_fp32_value(value)) {}
|
18 |
+
|
19 |
+
/// Implicit conversions
|
20 |
+
|
21 |
+
inline C10_HOST_DEVICE Float8_e4m3fn::operator float() const {
|
22 |
+
return detail::fp8e4m3fn_to_fp32_value(x);
|
23 |
+
}
|
24 |
+
|
25 |
+
/// Special values helper
|
26 |
+
|
27 |
+
inline C10_HOST_DEVICE bool Float8_e4m3fn::isnan() const {
|
28 |
+
return (x & 0b01111111) == 0b01111111;
|
29 |
+
}
|
30 |
+
|
31 |
+
/// Arithmetic
|
32 |
+
|
33 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
34 |
+
operator+(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
35 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
36 |
+
}
|
37 |
+
|
38 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
39 |
+
operator-(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
40 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
41 |
+
}
|
42 |
+
|
43 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
44 |
+
operator*(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
45 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
46 |
+
}
|
47 |
+
|
48 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(
|
49 |
+
const Float8_e4m3fn& a,
|
50 |
+
const Float8_e4m3fn& b) __ubsan_ignore_float_divide_by_zero__ {
|
51 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
52 |
+
}
|
53 |
+
|
54 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(const Float8_e4m3fn& a) {
|
55 |
+
return -static_cast<float>(a);
|
56 |
+
}
|
57 |
+
|
58 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator+=(
|
59 |
+
Float8_e4m3fn& a,
|
60 |
+
const Float8_e4m3fn& b) {
|
61 |
+
a = a + b;
|
62 |
+
return a;
|
63 |
+
}
|
64 |
+
|
65 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator-=(
|
66 |
+
Float8_e4m3fn& a,
|
67 |
+
const Float8_e4m3fn& b) {
|
68 |
+
a = a - b;
|
69 |
+
return a;
|
70 |
+
}
|
71 |
+
|
72 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator*=(
|
73 |
+
Float8_e4m3fn& a,
|
74 |
+
const Float8_e4m3fn& b) {
|
75 |
+
a = a * b;
|
76 |
+
return a;
|
77 |
+
}
|
78 |
+
|
79 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator/=(
|
80 |
+
Float8_e4m3fn& a,
|
81 |
+
const Float8_e4m3fn& b) {
|
82 |
+
a = a / b;
|
83 |
+
return a;
|
84 |
+
}
|
85 |
+
|
86 |
+
/// Arithmetic with floats
|
87 |
+
|
88 |
+
inline C10_HOST_DEVICE float operator+(Float8_e4m3fn a, float b) {
|
89 |
+
return static_cast<float>(a) + b;
|
90 |
+
}
|
91 |
+
inline C10_HOST_DEVICE float operator-(Float8_e4m3fn a, float b) {
|
92 |
+
return static_cast<float>(a) - b;
|
93 |
+
}
|
94 |
+
inline C10_HOST_DEVICE float operator*(Float8_e4m3fn a, float b) {
|
95 |
+
return static_cast<float>(a) * b;
|
96 |
+
}
|
97 |
+
inline C10_HOST_DEVICE float operator/(Float8_e4m3fn a, float b)
|
98 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
99 |
+
return static_cast<float>(a) / b;
|
100 |
+
}
|
101 |
+
|
102 |
+
inline C10_HOST_DEVICE float operator+(float a, Float8_e4m3fn b) {
|
103 |
+
return a + static_cast<float>(b);
|
104 |
+
}
|
105 |
+
inline C10_HOST_DEVICE float operator-(float a, Float8_e4m3fn b) {
|
106 |
+
return a - static_cast<float>(b);
|
107 |
+
}
|
108 |
+
inline C10_HOST_DEVICE float operator*(float a, Float8_e4m3fn b) {
|
109 |
+
return a * static_cast<float>(b);
|
110 |
+
}
|
111 |
+
inline C10_HOST_DEVICE float operator/(float a, Float8_e4m3fn b)
|
112 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
113 |
+
return a / static_cast<float>(b);
|
114 |
+
}
|
115 |
+
|
116 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e4m3fn& b) {
|
117 |
+
return a += static_cast<float>(b);
|
118 |
+
}
|
119 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e4m3fn& b) {
|
120 |
+
return a -= static_cast<float>(b);
|
121 |
+
}
|
122 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e4m3fn& b) {
|
123 |
+
return a *= static_cast<float>(b);
|
124 |
+
}
|
125 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e4m3fn& b) {
|
126 |
+
return a /= static_cast<float>(b);
|
127 |
+
}
|
128 |
+
|
129 |
+
/// Arithmetic with doubles
|
130 |
+
|
131 |
+
inline C10_HOST_DEVICE double operator+(Float8_e4m3fn a, double b) {
|
132 |
+
return static_cast<double>(a) + b;
|
133 |
+
}
|
134 |
+
inline C10_HOST_DEVICE double operator-(Float8_e4m3fn a, double b) {
|
135 |
+
return static_cast<double>(a) - b;
|
136 |
+
}
|
137 |
+
inline C10_HOST_DEVICE double operator*(Float8_e4m3fn a, double b) {
|
138 |
+
return static_cast<double>(a) * b;
|
139 |
+
}
|
140 |
+
inline C10_HOST_DEVICE double operator/(Float8_e4m3fn a, double b)
|
141 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
142 |
+
return static_cast<double>(a) / b;
|
143 |
+
}
|
144 |
+
|
145 |
+
inline C10_HOST_DEVICE double operator+(double a, Float8_e4m3fn b) {
|
146 |
+
return a + static_cast<double>(b);
|
147 |
+
}
|
148 |
+
inline C10_HOST_DEVICE double operator-(double a, Float8_e4m3fn b) {
|
149 |
+
return a - static_cast<double>(b);
|
150 |
+
}
|
151 |
+
inline C10_HOST_DEVICE double operator*(double a, Float8_e4m3fn b) {
|
152 |
+
return a * static_cast<double>(b);
|
153 |
+
}
|
154 |
+
inline C10_HOST_DEVICE double operator/(double a, Float8_e4m3fn b)
|
155 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
156 |
+
return a / static_cast<double>(b);
|
157 |
+
}
|
158 |
+
|
159 |
+
/// Arithmetic with ints
|
160 |
+
|
161 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int b) {
|
162 |
+
return a + static_cast<Float8_e4m3fn>(b);
|
163 |
+
}
|
164 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int b) {
|
165 |
+
return a - static_cast<Float8_e4m3fn>(b);
|
166 |
+
}
|
167 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int b) {
|
168 |
+
return a * static_cast<Float8_e4m3fn>(b);
|
169 |
+
}
|
170 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int b) {
|
171 |
+
return a / static_cast<Float8_e4m3fn>(b);
|
172 |
+
}
|
173 |
+
|
174 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(int a, Float8_e4m3fn b) {
|
175 |
+
return static_cast<Float8_e4m3fn>(a) + b;
|
176 |
+
}
|
177 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(int a, Float8_e4m3fn b) {
|
178 |
+
return static_cast<Float8_e4m3fn>(a) - b;
|
179 |
+
}
|
180 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(int a, Float8_e4m3fn b) {
|
181 |
+
return static_cast<Float8_e4m3fn>(a) * b;
|
182 |
+
}
|
183 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(int a, Float8_e4m3fn b) {
|
184 |
+
return static_cast<Float8_e4m3fn>(a) / b;
|
185 |
+
}
|
186 |
+
|
187 |
+
//// Arithmetic with int64_t
|
188 |
+
|
189 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int64_t b) {
|
190 |
+
return a + static_cast<Float8_e4m3fn>(b);
|
191 |
+
}
|
192 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int64_t b) {
|
193 |
+
return a - static_cast<Float8_e4m3fn>(b);
|
194 |
+
}
|
195 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int64_t b) {
|
196 |
+
return a * static_cast<Float8_e4m3fn>(b);
|
197 |
+
}
|
198 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int64_t b) {
|
199 |
+
return a / static_cast<Float8_e4m3fn>(b);
|
200 |
+
}
|
201 |
+
|
202 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(int64_t a, Float8_e4m3fn b) {
|
203 |
+
return static_cast<Float8_e4m3fn>(a) + b;
|
204 |
+
}
|
205 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(int64_t a, Float8_e4m3fn b) {
|
206 |
+
return static_cast<Float8_e4m3fn>(a) - b;
|
207 |
+
}
|
208 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(int64_t a, Float8_e4m3fn b) {
|
209 |
+
return static_cast<Float8_e4m3fn>(a) * b;
|
210 |
+
}
|
211 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(int64_t a, Float8_e4m3fn b) {
|
212 |
+
return static_cast<Float8_e4m3fn>(a) / b;
|
213 |
+
}
|
214 |
+
|
215 |
+
/// NOTE: we do not define comparisons directly and instead rely on the implicit
|
216 |
+
/// conversion from c10::Float8_e4m3fn to float.
|
217 |
+
|
218 |
+
} // namespace c10
|
219 |
+
|
220 |
+
namespace std {
|
221 |
+
|
222 |
+
template <>
|
223 |
+
class numeric_limits<c10::Float8_e4m3fn> {
|
224 |
+
public:
|
225 |
+
static constexpr bool is_specialized = true;
|
226 |
+
static constexpr bool is_signed = true;
|
227 |
+
static constexpr bool is_integer = false;
|
228 |
+
static constexpr bool is_exact = false;
|
229 |
+
static constexpr bool has_infinity = false;
|
230 |
+
static constexpr bool has_quiet_NaN = true;
|
231 |
+
static constexpr bool has_signaling_NaN = false;
|
232 |
+
static constexpr auto has_denorm = true;
|
233 |
+
static constexpr auto has_denorm_loss = true;
|
234 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
235 |
+
static constexpr bool is_iec559 = false;
|
236 |
+
static constexpr bool is_bounded = true;
|
237 |
+
static constexpr bool is_modulo = false;
|
238 |
+
static constexpr int digits = 4;
|
239 |
+
static constexpr int digits10 = 0;
|
240 |
+
static constexpr int max_digits10 = 3;
|
241 |
+
static constexpr int radix = 2;
|
242 |
+
static constexpr int min_exponent = -5;
|
243 |
+
static constexpr int min_exponent10 = -1;
|
244 |
+
static constexpr int max_exponent = 8;
|
245 |
+
static constexpr int max_exponent10 = 2;
|
246 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
247 |
+
static constexpr auto tinyness_before = false;
|
248 |
+
|
249 |
+
static constexpr c10::Float8_e4m3fn min() {
|
250 |
+
return c10::Float8_e4m3fn(0x08, c10::Float8_e4m3fn::from_bits());
|
251 |
+
}
|
252 |
+
static constexpr c10::Float8_e4m3fn lowest() {
|
253 |
+
return c10::Float8_e4m3fn(0xFE, c10::Float8_e4m3fn::from_bits());
|
254 |
+
}
|
255 |
+
static constexpr c10::Float8_e4m3fn max() {
|
256 |
+
return c10::Float8_e4m3fn(0x7E, c10::Float8_e4m3fn::from_bits());
|
257 |
+
}
|
258 |
+
static constexpr c10::Float8_e4m3fn epsilon() {
|
259 |
+
return c10::Float8_e4m3fn(0x20, c10::Float8_e4m3fn::from_bits());
|
260 |
+
}
|
261 |
+
static constexpr c10::Float8_e4m3fn round_error() {
|
262 |
+
return c10::Float8_e4m3fn(0x30, c10::Float8_e4m3fn::from_bits());
|
263 |
+
}
|
264 |
+
static constexpr c10::Float8_e4m3fn quiet_NaN() {
|
265 |
+
return c10::Float8_e4m3fn(0x7F, c10::Float8_e4m3fn::from_bits());
|
266 |
+
}
|
267 |
+
static constexpr c10::Float8_e4m3fn denorm_min() {
|
268 |
+
return c10::Float8_e4m3fn(0x01, c10::Float8_e4m3fn::from_bits());
|
269 |
+
}
|
270 |
+
};
|
271 |
+
|
272 |
+
} // namespace std
|
273 |
+
|
274 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
/// Defines the Float8_e4m3fn type (8-bit floating-point) including conversions
|
4 |
+
/// to standard C types and basic arithmetic operations. Note that arithmetic
|
5 |
+
/// operations are implemented by converting to floating point and
|
6 |
+
/// performing the operation in float32.
|
7 |
+
/// Binary configuration:
|
8 |
+
/// s eeee mmm
|
9 |
+
/// 1 sign bit
|
10 |
+
/// 4 exponent bits
|
11 |
+
/// 3 mantissa bits
|
12 |
+
/// bias = 7
|
13 |
+
///
|
14 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
|
15 |
+
/// and inspired by Half implementation from pytorch/c10/util/Half.h
|
16 |
+
|
17 |
+
#include <c10/macros/Macros.h>
|
18 |
+
#include <c10/util/C++17.h>
|
19 |
+
#include <c10/util/TypeSafeSignMath.h>
|
20 |
+
#include <c10/util/floating_point_utils.h>
|
21 |
+
#include <type_traits>
|
22 |
+
|
23 |
+
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
24 |
+
#include <cmath>
|
25 |
+
#include <cstdint>
|
26 |
+
#elif !defined(__OPENCL_VERSION__)
|
27 |
+
#include <math.h>
|
28 |
+
#include <stdint.h>
|
29 |
+
#endif
|
30 |
+
|
31 |
+
#ifdef _MSC_VER
|
32 |
+
#include <intrin.h>
|
33 |
+
#endif
|
34 |
+
|
35 |
+
#include <climits>
|
36 |
+
#include <cstdint>
|
37 |
+
#include <cstring>
|
38 |
+
#include <iosfwd>
|
39 |
+
#include <limits>
|
40 |
+
#include <sstream>
|
41 |
+
#include <stdexcept>
|
42 |
+
#include <string>
|
43 |
+
#include <utility>
|
44 |
+
|
45 |
+
#include <typeinfo> // operator typeid
|
46 |
+
|
47 |
+
namespace c10 {
|
48 |
+
|
49 |
+
namespace detail {
|
50 |
+
|
51 |
+
/*
|
52 |
+
* Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit
|
53 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
54 |
+
* format, in bit representation.
|
55 |
+
*
|
56 |
+
* @note The implementation doesn't use any floating-point operations.
|
57 |
+
*/
|
58 |
+
inline C10_HOST_DEVICE float fp8e4m3fn_to_fp32_value(uint8_t input) {
|
59 |
+
/*
|
60 |
+
* Extend the fp8 E4M3FN number to 32 bits and shift to the
|
61 |
+
* upper part of the 32-bit word:
|
62 |
+
* +---+----+---+-----------------------------+
|
63 |
+
* | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
|
64 |
+
* +---+----+---+-----------------------------+
|
65 |
+
* Bits 31 27-30 24-26 0-23
|
66 |
+
*
|
67 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
68 |
+
* - zero bits.
|
69 |
+
*/
|
70 |
+
const uint32_t w = (uint32_t)input << 24;
|
71 |
+
/*
|
72 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
73 |
+
*
|
74 |
+
* +---+----------------------------------+
|
75 |
+
* | S |0000000 00000000 00000000 00000000|
|
76 |
+
* +---+----------------------------------+
|
77 |
+
* Bits 31 0-31
|
78 |
+
*/
|
79 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
80 |
+
/*
|
81 |
+
* Extract mantissa and biased exponent of the input number into the bits 0-30
|
82 |
+
* of the 32-bit word:
|
83 |
+
*
|
84 |
+
* +---+----+---+-----------------------------+
|
85 |
+
* | S |EEEE|MMM|0000 0000 0000 0000 0000 0000|
|
86 |
+
* +---+----+---+-----------------------------+
|
87 |
+
* Bits 31 27-30 24-26 0-23
|
88 |
+
*/
|
89 |
+
const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
|
90 |
+
/*
|
91 |
+
* Renorm shift is the number of bits to shift mantissa left to make the
|
92 |
+
* half-precision number normalized. If the initial number is normalized, some
|
93 |
+
* of its high 5 bits (sign == 0 and 4-bit exponent) equals one. In this case
|
94 |
+
* renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
|
95 |
+
* that if we shift denormalized nonsign by renorm_shift, the unit bit of
|
96 |
+
* mantissa will shift into exponent, turning the biased exponent into 1, and
|
97 |
+
* making mantissa normalized (i.e. without leading 1).
|
98 |
+
*/
|
99 |
+
#if defined(__CUDA_ARCH__)
|
100 |
+
uint32_t renorm_shift = __clz(nonsign);
|
101 |
+
#elif defined(__SYCL_DEVICE_ONLY__)
|
102 |
+
// Note: zero is not a supported input into `__builtin_clz`
|
103 |
+
uint32_t renorm_shift =
|
104 |
+
nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
|
105 |
+
#elif defined(_MSC_VER)
|
106 |
+
unsigned long nonsign_bsr;
|
107 |
+
_BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
|
108 |
+
uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
|
109 |
+
#else
|
110 |
+
// Note: zero is not a supported input into `__builtin_clz`
|
111 |
+
uint32_t renorm_shift =
|
112 |
+
nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT;
|
113 |
+
#endif
|
114 |
+
renorm_shift = renorm_shift > 4 ? renorm_shift - 4 : 0;
|
115 |
+
/*
|
116 |
+
* Iff fp8e4m3fn number has all exponent and mantissa bits set to 1,
|
117 |
+
* the addition overflows it into bit 31, and the subsequent shift turns the
|
118 |
+
* high 9 bits into 1. Thus inf_nan_mask == 0x7F800000 if the fp8e4m3fn number
|
119 |
+
* is Nan, 0x00000000 otherwise
|
120 |
+
*/
|
121 |
+
const int32_t inf_nan_mask =
|
122 |
+
((int32_t)(nonsign + 0x01000000) >> 8) & INT32_C(0x7F800000);
|
123 |
+
/*
|
124 |
+
* Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
|
125 |
+
* into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
|
126 |
+
* broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
|
127 |
+
* 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
|
128 |
+
* 0x00000000 otherwise
|
129 |
+
*/
|
130 |
+
const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
|
131 |
+
/*
|
132 |
+
* 1. Shift nonsign left by renorm_shift to normalize it (if the input
|
133 |
+
* was denormal)
|
134 |
+
* 2. Shift nonsign right by 4 so the exponent (4 bits originally)
|
135 |
+
* becomes an 8-bit field and 3-bit mantissa shifts into the 3 high
|
136 |
+
* bits of the 23-bit mantissa of IEEE single-precision number.
|
137 |
+
* 3. Add 0x78 to the exponent (starting at bit 23) to compensate the
|
138 |
+
* different in exponent bias (0x7F for single-precision number less 0x07
|
139 |
+
* for fp8e4m3fn number).
|
140 |
+
* 4. Subtract renorm_shift from the exponent (starting at bit 23) to
|
141 |
+
* account for renormalization. As renorm_shift is less than 0x78, this
|
142 |
+
* can be combined with step 3.
|
143 |
+
* 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
|
144 |
+
* input was NaN or infinity.
|
145 |
+
* 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
|
146 |
+
* into zero if the input was zero.
|
147 |
+
* 7. Combine with the sign of the input number.
|
148 |
+
*/
|
149 |
+
uint32_t result = sign |
|
150 |
+
((((nonsign << renorm_shift >> 4) + ((0x78 - renorm_shift) << 23)) |
|
151 |
+
inf_nan_mask) &
|
152 |
+
~zero_mask);
|
153 |
+
return fp32_from_bits(result);
|
154 |
+
}
|
155 |
+
|
156 |
+
/*
|
157 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
158 |
+
* 8-bit floating-point number in fp8 E4M3FN format, in bit representation.
|
159 |
+
*/
|
160 |
+
inline C10_HOST_DEVICE uint8_t fp8e4m3fn_from_fp32_value(float f) {
|
161 |
+
/*
|
162 |
+
* Binary representation of 480.0f, which is the first value
|
163 |
+
* not representable in fp8e4m3fn range:
|
164 |
+
* 0 1111 111 - fp8e4m3fn
|
165 |
+
* 0 10000111 11100000000000000000000 - fp32
|
166 |
+
*/
|
167 |
+
constexpr uint32_t fp8_max = UINT32_C(1087) << 20;
|
168 |
+
|
169 |
+
/*
|
170 |
+
* A mask for converting fp32 numbers lower than fp8e4m3fn normal range
|
171 |
+
* into denorm representation
|
172 |
+
* magic number: ((127 - 7) + (23 - 3) + 1)
|
173 |
+
*/
|
174 |
+
constexpr uint32_t denorm_mask = UINT32_C(141) << 23;
|
175 |
+
|
176 |
+
uint32_t f_bits = fp32_to_bits(f);
|
177 |
+
|
178 |
+
uint8_t result = 0u;
|
179 |
+
|
180 |
+
/*
|
181 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
182 |
+
*
|
183 |
+
* +---+----------------------------------+
|
184 |
+
* | S |0000000 00000000 00000000 00000000|
|
185 |
+
* +---+----------------------------------+
|
186 |
+
* Bits 31 0-31
|
187 |
+
*/
|
188 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
189 |
+
|
190 |
+
/*
|
191 |
+
* Set sign bit to 0
|
192 |
+
*/
|
193 |
+
f_bits ^= sign;
|
194 |
+
|
195 |
+
if (f_bits >= fp8_max) {
|
196 |
+
// NaN - all exponent and mantissa bits set to 1
|
197 |
+
result = 0x7f;
|
198 |
+
} else {
|
199 |
+
if (f_bits < (UINT32_C(121) << 23)) {
|
200 |
+
// Input number is smaller than 2^(-6), which is the smallest
|
201 |
+
// fp8e4m3fn normal number
|
202 |
+
f_bits =
|
203 |
+
fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
204 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
205 |
+
} else {
|
206 |
+
// resulting mantissa is odd
|
207 |
+
uint8_t mant_odd = (f_bits >> 20) & 1;
|
208 |
+
|
209 |
+
// update exponent, rounding bias part 1
|
210 |
+
f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF;
|
211 |
+
|
212 |
+
// rounding bias part 2
|
213 |
+
f_bits += mant_odd;
|
214 |
+
|
215 |
+
// take the bits!
|
216 |
+
result = static_cast<uint8_t>(f_bits >> 20);
|
217 |
+
}
|
218 |
+
}
|
219 |
+
|
220 |
+
result |= static_cast<uint8_t>(sign >> 24);
|
221 |
+
return result;
|
222 |
+
}
|
223 |
+
|
224 |
+
} // namespace detail
|
225 |
+
|
226 |
+
struct alignas(1) Float8_e4m3fn {
|
227 |
+
uint8_t x;
|
228 |
+
|
229 |
+
struct from_bits_t {};
|
230 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
231 |
+
return from_bits_t();
|
232 |
+
}
|
233 |
+
|
234 |
+
Float8_e4m3fn() = default;
|
235 |
+
|
236 |
+
constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t)
|
237 |
+
: x(bits){};
|
238 |
+
inline C10_HOST_DEVICE Float8_e4m3fn(float value);
|
239 |
+
inline C10_HOST_DEVICE operator float() const;
|
240 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
241 |
+
};
|
242 |
+
|
243 |
+
C10_API std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value);
|
244 |
+
|
245 |
+
} // namespace c10
|
246 |
+
|
247 |
+
#include <c10/util/Float8_e4m3fn-inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <limits>
|
5 |
+
|
6 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
7 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
8 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
9 |
+
#endif
|
10 |
+
|
11 |
+
namespace c10 {
|
12 |
+
|
13 |
+
/// Constructors
|
14 |
+
|
15 |
+
C10_HOST_DEVICE inline Float8_e4m3fnuz::Float8_e4m3fnuz(float value)
|
16 |
+
: x(detail::fp8e4m3fnuz_from_fp32_value(value)) {}
|
17 |
+
|
18 |
+
/// Implicit conversions
|
19 |
+
|
20 |
+
C10_HOST_DEVICE inline Float8_e4m3fnuz::operator float() const {
|
21 |
+
return detail::fp8e4m3fnuz_to_fp32_value(x);
|
22 |
+
}
|
23 |
+
|
24 |
+
/// Special values helper
|
25 |
+
|
26 |
+
C10_HOST_DEVICE inline bool Float8_e4m3fnuz::isnan() const {
|
27 |
+
return x == 0b10000000;
|
28 |
+
}
|
29 |
+
|
30 |
+
} // namespace c10
|
31 |
+
|
32 |
+
namespace std {
|
33 |
+
|
34 |
+
template <>
|
35 |
+
class numeric_limits<c10::Float8_e4m3fnuz> {
|
36 |
+
public:
|
37 |
+
static constexpr bool is_specialized = true;
|
38 |
+
static constexpr bool is_signed = true;
|
39 |
+
static constexpr bool is_integer = false;
|
40 |
+
static constexpr bool is_exact = false;
|
41 |
+
static constexpr bool has_infinity = false;
|
42 |
+
static constexpr bool has_quiet_NaN = true;
|
43 |
+
static constexpr bool has_signaling_NaN = false;
|
44 |
+
static constexpr auto has_denorm = true;
|
45 |
+
static constexpr auto has_denorm_loss = true;
|
46 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
47 |
+
static constexpr bool is_iec559 = false;
|
48 |
+
static constexpr bool is_bounded = true;
|
49 |
+
static constexpr bool is_modulo = false;
|
50 |
+
static constexpr int digits = 4;
|
51 |
+
static constexpr int digits10 = 0;
|
52 |
+
static constexpr int max_digits10 = 3;
|
53 |
+
static constexpr int radix = 2;
|
54 |
+
static constexpr int min_exponent = -6;
|
55 |
+
static constexpr int min_exponent10 = -1;
|
56 |
+
static constexpr int max_exponent = 8;
|
57 |
+
static constexpr int max_exponent10 = 2;
|
58 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
59 |
+
static constexpr auto tinyness_before = false;
|
60 |
+
|
61 |
+
static constexpr c10::Float8_e4m3fnuz min() {
|
62 |
+
return c10::Float8_e4m3fnuz(0x08, c10::Float8_e4m3fnuz::from_bits());
|
63 |
+
}
|
64 |
+
static constexpr c10::Float8_e4m3fnuz lowest() {
|
65 |
+
return c10::Float8_e4m3fnuz(0xFF, c10::Float8_e4m3fnuz::from_bits());
|
66 |
+
}
|
67 |
+
static constexpr c10::Float8_e4m3fnuz max() {
|
68 |
+
return c10::Float8_e4m3fnuz(0x7F, c10::Float8_e4m3fnuz::from_bits());
|
69 |
+
}
|
70 |
+
static constexpr c10::Float8_e4m3fnuz epsilon() {
|
71 |
+
return c10::Float8_e4m3fnuz(0x28, c10::Float8_e4m3fnuz::from_bits());
|
72 |
+
}
|
73 |
+
static constexpr c10::Float8_e4m3fnuz round_error() {
|
74 |
+
return c10::Float8_e4m3fnuz(0x38, c10::Float8_e4m3fnuz::from_bits());
|
75 |
+
}
|
76 |
+
static constexpr c10::Float8_e4m3fnuz infinity() {
|
77 |
+
// NaN (no infinities)
|
78 |
+
return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits());
|
79 |
+
}
|
80 |
+
static constexpr c10::Float8_e4m3fnuz quiet_NaN() {
|
81 |
+
return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits());
|
82 |
+
}
|
83 |
+
static constexpr c10::Float8_e4m3fnuz denorm_min() {
|
84 |
+
return c10::Float8_e4m3fnuz(0x01, c10::Float8_e4m3fnuz::from_bits());
|
85 |
+
}
|
86 |
+
};
|
87 |
+
|
88 |
+
} // namespace std
|
89 |
+
|
90 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
/// Defines the Float8_e4m3fnuz type (8-bit floating-point) including
|
4 |
+
/// conversions to standard C types and basic arithmetic operations. Note that
|
5 |
+
/// arithmetic operations are implemented by converting to floating point and
|
6 |
+
/// performing the operation in float32.
|
7 |
+
///
|
8 |
+
/// Binary configuration remains the same as Float8_e4m3fn:
|
9 |
+
/// s eeee mmm
|
10 |
+
/// 1 sign bit
|
11 |
+
/// 4 exponent bits
|
12 |
+
/// 3 mantissa bits
|
13 |
+
///
|
14 |
+
/// The key differences versus Float8_e4m3fn are:
|
15 |
+
/// bias = 8
|
16 |
+
/// no infinities or negative zero
|
17 |
+
/// NaN only when sign bit is 1, rest all 0s
|
18 |
+
///
|
19 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
|
20 |
+
/// the existing Float8_e4m3fn implementation.
|
21 |
+
|
22 |
+
#include <c10/macros/Macros.h>
|
23 |
+
#include <c10/util/C++17.h>
|
24 |
+
#include <c10/util/TypeSafeSignMath.h>
|
25 |
+
#include <c10/util/floating_point_utils.h>
|
26 |
+
|
27 |
+
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
28 |
+
#include <cstdint>
|
29 |
+
#elif !defined(__OPENCL_VERSION__)
|
30 |
+
#include <math.h>
|
31 |
+
#include <stdint.h>
|
32 |
+
#endif
|
33 |
+
|
34 |
+
#include <iosfwd>
|
35 |
+
#include <ostream>
|
36 |
+
|
37 |
+
namespace c10 {
|
38 |
+
|
39 |
+
namespace detail {
|
40 |
+
|
41 |
+
/*
|
42 |
+
* Convert a 8-bit floating-point number in fp8 E4M3FNUZ format, in bit
|
43 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
44 |
+
* format, in bit representation.
|
45 |
+
*
|
46 |
+
* @note The implementation doesn't use any floating-point operations.
|
47 |
+
*/
|
48 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP__)
|
49 |
+
C10_HOST_DEVICE C10_API inline float fp8e4m3fnuz_to_fp32_value(uint8_t) {
|
50 |
+
CUDA_KERNEL_ASSERT(false && "e4m3fnuz is not supported by CUDA or HIP");
|
51 |
+
return -1.0;
|
52 |
+
}
|
53 |
+
#else
|
54 |
+
C10_API float fp8e4m3fnuz_to_fp32_value(uint8_t input);
|
55 |
+
#endif
|
56 |
+
|
57 |
+
/*
|
58 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
59 |
+
* 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation.
|
60 |
+
*/
|
61 |
+
C10_HOST_DEVICE inline uint8_t fp8e4m3fnuz_from_fp32_value(float f) {
|
62 |
+
/*
|
63 |
+
* Binary representation of 256.0f, which is the first value not representable
|
64 |
+
* (i.e. the first value which would overflow in to the sign bit, resulting in
|
65 |
+
* a NaN) in fp8e4m3fnuz range:
|
66 |
+
* 1 0000 000 - fp8e4m3fnuz
|
67 |
+
* 0 10000111 00000000000000000000000 - fp32
|
68 |
+
*/
|
69 |
+
constexpr uint32_t fnuz_max = UINT32_C(0x87) << 23;
|
70 |
+
|
71 |
+
/*
|
72 |
+
* A mask for converting fp32 numbers lower than fp8e4m3fnuz normal range
|
73 |
+
* into denormalized representation.
|
74 |
+
* magic number: ((127 - 8) + (23 - 3) + 1)
|
75 |
+
*/
|
76 |
+
constexpr uint32_t denorm_mask = UINT32_C(0x8C) << 23;
|
77 |
+
|
78 |
+
uint32_t f_bits = fp32_to_bits(f);
|
79 |
+
|
80 |
+
uint32_t result = 0u;
|
81 |
+
|
82 |
+
/*
|
83 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
84 |
+
*
|
85 |
+
* +---+----------------------------------+
|
86 |
+
* | S |0000000 00000000 00000000 00000000|
|
87 |
+
* +---+----------------------------------+
|
88 |
+
* Bits 31 0-31
|
89 |
+
*/
|
90 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
91 |
+
|
92 |
+
/*
|
93 |
+
* Set sign bit to 0
|
94 |
+
*/
|
95 |
+
f_bits ^= sign;
|
96 |
+
|
97 |
+
if (f_bits >= fnuz_max) {
|
98 |
+
// NaN -- sign bit set to 1, rest 0s.
|
99 |
+
return 0x80;
|
100 |
+
}
|
101 |
+
|
102 |
+
if (f_bits < (UINT32_C(0x78) << 23) /* 2^-7 in float32 */) {
|
103 |
+
// Input exponent is less than -7, the smallest e4m3fnuz exponent, so the
|
104 |
+
// number will become subnormal.
|
105 |
+
f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
106 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
107 |
+
if (result == 0) {
|
108 |
+
// fnuz types don't have negative zero.
|
109 |
+
return 0;
|
110 |
+
}
|
111 |
+
} else {
|
112 |
+
// resulting mantissa is odd
|
113 |
+
uint8_t mant_odd = (f_bits >> 20) & 1;
|
114 |
+
|
115 |
+
// update exponent, rounding bias part 1
|
116 |
+
f_bits += ((uint32_t)(8 - 127) << 23) + 0x7FFFF;
|
117 |
+
|
118 |
+
// rounding bias part 2
|
119 |
+
f_bits += mant_odd;
|
120 |
+
|
121 |
+
// take the bits!
|
122 |
+
result = static_cast<uint8_t>(f_bits >> 20);
|
123 |
+
}
|
124 |
+
|
125 |
+
result |= sign >> 24;
|
126 |
+
|
127 |
+
return result;
|
128 |
+
}
|
129 |
+
|
130 |
+
} // namespace detail
|
131 |
+
|
132 |
+
struct alignas(1) Float8_e4m3fnuz {
|
133 |
+
uint8_t x;
|
134 |
+
|
135 |
+
struct from_bits_t {};
|
136 |
+
static constexpr C10_HOST_DEVICE from_bits_t from_bits() {
|
137 |
+
return from_bits_t();
|
138 |
+
}
|
139 |
+
|
140 |
+
Float8_e4m3fnuz() = default;
|
141 |
+
|
142 |
+
constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t)
|
143 |
+
: x(bits){};
|
144 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz(float value);
|
145 |
+
inline C10_HOST_DEVICE operator float() const;
|
146 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
147 |
+
};
|
148 |
+
|
149 |
+
C10_API std::ostream& operator<<(
|
150 |
+
std::ostream& out,
|
151 |
+
const Float8_e4m3fnuz& value);
|
152 |
+
|
153 |
+
} // namespace c10
|
154 |
+
|
155 |
+
#include <c10/util/Float8_e4m3fnuz-inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
/// Defines the Float8_e5m2fnuz type (8-bit floating-point) including
|
4 |
+
/// conversions to standard C types and basic arithmetic operations. Note that
|
5 |
+
/// arithmetic operations are implemented by converting to floating point and
|
6 |
+
/// performing the operation in float32.
|
7 |
+
///
|
8 |
+
/// Binary configuration remains the same as e5m2:
|
9 |
+
/// s eeeee mm
|
10 |
+
/// 1 sign bit
|
11 |
+
/// 5 exponent bits
|
12 |
+
/// 2 mantissa bits
|
13 |
+
///
|
14 |
+
/// The key differences that e5m2fnuz brings are:
|
15 |
+
/// bias = 16
|
16 |
+
/// no infinities or negative zero
|
17 |
+
/// NaN only when sign bit is 1, rest all 0s
|
18 |
+
///
|
19 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
|
20 |
+
/// the existing Float8_e4m3fn implementation.
|
21 |
+
|
22 |
+
#include <c10/macros/Macros.h>
|
23 |
+
#include <c10/util/C++17.h>
|
24 |
+
#include <c10/util/TypeSafeSignMath.h>
|
25 |
+
#include <c10/util/floating_point_utils.h>
|
26 |
+
|
27 |
+
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
28 |
+
#include <cstdint>
|
29 |
+
#elif !defined(__OPENCL_VERSION__)
|
30 |
+
#include <math.h>
|
31 |
+
#include <stdint.h>
|
32 |
+
#endif
|
33 |
+
|
34 |
+
#include <iosfwd>
|
35 |
+
#include <ostream>
|
36 |
+
|
37 |
+
namespace c10 {
|
38 |
+
|
39 |
+
namespace detail {
|
40 |
+
|
41 |
+
/*
|
42 |
+
* Convert a 8-bit floating-point number in fp8 E5M2FNUZ format, in bit
|
43 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
44 |
+
* format, in bit representation.
|
45 |
+
*
|
46 |
+
* @note The implementation doesn't use any floating-point operations.
|
47 |
+
*/
|
48 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP__)
|
49 |
+
C10_HOST_DEVICE C10_API inline float fp8e5m2fnuz_to_fp32_value(uint8_t) {
|
50 |
+
CUDA_KERNEL_ASSERT(false && "e5m2fnuz is not supported by CUDA or HIP");
|
51 |
+
return -1.0;
|
52 |
+
}
|
53 |
+
#else
|
54 |
+
C10_API float fp8e5m2fnuz_to_fp32_value(uint8_t input);
|
55 |
+
#endif
|
56 |
+
|
57 |
+
/*
|
58 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
59 |
+
* 8-bit floating-point number in fp8 E5M2 format, in bit representation.
|
60 |
+
*/
|
61 |
+
C10_HOST_DEVICE inline uint8_t fp8e5m2fnuz_from_fp32_value(float f) {
|
62 |
+
/*
|
63 |
+
* Binary representation of 65536.0f, which is the first value not
|
64 |
+
* representable (i.e. the first value which would overflow in to the sign
|
65 |
+
* bit, resulting in a NaN) in fp8e4m3fnuz range:
|
66 |
+
* 1 00000 00 - fp8e5m2fnuz
|
67 |
+
* 0 10001111 00000000000000000000000 - fp32
|
68 |
+
*/
|
69 |
+
constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23;
|
70 |
+
|
71 |
+
/*
|
72 |
+
* A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range
|
73 |
+
* into denormalized representation.
|
74 |
+
* magic number: ((127 - 16) + (23 - 2) + 1)
|
75 |
+
*/
|
76 |
+
constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23;
|
77 |
+
|
78 |
+
uint32_t f_bits = fp32_to_bits(f);
|
79 |
+
|
80 |
+
uint32_t result = 0u;
|
81 |
+
|
82 |
+
/*
|
83 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
84 |
+
*
|
85 |
+
* +---+----------------------------------+
|
86 |
+
* | S |0000000 00000000 00000000 00000000|
|
87 |
+
* +---+----------------------------------+
|
88 |
+
* Bits 31 0-31
|
89 |
+
*/
|
90 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
91 |
+
|
92 |
+
/*
|
93 |
+
* Set sign bit to 0
|
94 |
+
*/
|
95 |
+
f_bits ^= sign;
|
96 |
+
|
97 |
+
if (f_bits >= fnuz_max) {
|
98 |
+
// NaN -- sign bit set to 1, rest 0s
|
99 |
+
return 0x80;
|
100 |
+
}
|
101 |
+
|
102 |
+
if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) {
|
103 |
+
// Input exponent is less than -15, the smallest e5m2fnuz exponent, so the
|
104 |
+
// number will become subnormal.
|
105 |
+
f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
106 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
107 |
+
if (result == 0) {
|
108 |
+
// fnuz types don't have negative zero.
|
109 |
+
return 0;
|
110 |
+
}
|
111 |
+
} else {
|
112 |
+
// resulting mantissa is odd
|
113 |
+
uint8_t mant_odd = (f_bits >> 21) & 1;
|
114 |
+
|
115 |
+
// update exponent, rounding bias part 1
|
116 |
+
f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF;
|
117 |
+
|
118 |
+
// rounding bias part 2
|
119 |
+
f_bits += mant_odd;
|
120 |
+
|
121 |
+
// take the bits!
|
122 |
+
result = static_cast<uint8_t>(f_bits >> 21);
|
123 |
+
}
|
124 |
+
|
125 |
+
result |= sign >> 24;
|
126 |
+
return result;
|
127 |
+
}
|
128 |
+
|
129 |
+
} // namespace detail
|
130 |
+
|
131 |
+
struct alignas(1) Float8_e5m2fnuz {
|
132 |
+
uint8_t x;
|
133 |
+
|
134 |
+
struct from_bits_t {};
|
135 |
+
static constexpr C10_HOST_DEVICE from_bits_t from_bits() {
|
136 |
+
return from_bits_t();
|
137 |
+
}
|
138 |
+
|
139 |
+
Float8_e5m2fnuz() = default;
|
140 |
+
|
141 |
+
constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t)
|
142 |
+
: x(bits){};
|
143 |
+
inline C10_HOST_DEVICE Float8_e5m2fnuz(float value);
|
144 |
+
inline C10_HOST_DEVICE operator float() const;
|
145 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
146 |
+
};
|
147 |
+
|
148 |
+
C10_API std::ostream& operator<<(
|
149 |
+
std::ostream& out,
|
150 |
+
const Float8_e5m2fnuz& value);
|
151 |
+
|
152 |
+
} // namespace c10
|
153 |
+
|
154 |
+
#include <c10/util/Float8_e5m2fnuz-inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/FunctionRef.h
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
|
2 |
+
//
|
3 |
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
//
|
7 |
+
//===----------------------------------------------------------------------===//
|
8 |
+
//
|
9 |
+
// This file contains some templates that are useful if you are working with the
|
10 |
+
// STL at all.
|
11 |
+
//
|
12 |
+
// No library is required when using these functions.
|
13 |
+
//
|
14 |
+
//===----------------------------------------------------------------------===//
|
15 |
+
|
16 |
+
// c10: modified from llvm::function_ref
|
17 |
+
// c10: added more SFINAE to enable use in overloaded functions
|
18 |
+
|
19 |
+
#pragma once
|
20 |
+
|
21 |
+
#include <cstdint>
|
22 |
+
#include <type_traits>
|
23 |
+
#include <utility>
|
24 |
+
|
25 |
+
namespace c10 {
|
26 |
+
|
27 |
+
/// An efficient, type-erasing, non-owning reference to a callable. This is
|
28 |
+
/// intended for use as the type of a function parameter that is not used
|
29 |
+
/// after the function in question returns.
|
30 |
+
///
|
31 |
+
/// This class does not own the callable, so it is not in general safe to store
|
32 |
+
/// a function_ref.
|
33 |
+
template <typename Fn>
|
34 |
+
class function_ref;
|
35 |
+
|
36 |
+
template <typename Ret, typename... Params>
|
37 |
+
class function_ref<Ret(Params...)> {
|
38 |
+
Ret (*callback)(intptr_t callable, Params... params) = nullptr;
|
39 |
+
intptr_t callable{};
|
40 |
+
|
41 |
+
template <typename Callable>
|
42 |
+
static Ret callback_fn(intptr_t callable, Params... params) {
|
43 |
+
return (*reinterpret_cast<Callable*>(callable))(std::forward<Params>(
|
44 |
+
params)...);
|
45 |
+
}
|
46 |
+
|
47 |
+
public:
|
48 |
+
function_ref() = default;
|
49 |
+
function_ref(std::nullptr_t) {}
|
50 |
+
|
51 |
+
template <typename Callable>
|
52 |
+
function_ref(
|
53 |
+
Callable&& callable,
|
54 |
+
typename std::enable_if<!std::is_same<
|
55 |
+
typename std::remove_reference<Callable>::type,
|
56 |
+
function_ref>::value>::type* = nullptr,
|
57 |
+
typename std::enable_if<std::is_convertible<
|
58 |
+
typename std::invoke_result_t<Callable, Params...>,
|
59 |
+
Ret>::value>::type* = nullptr)
|
60 |
+
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
|
61 |
+
callable(reinterpret_cast<intptr_t>(&callable)) {}
|
62 |
+
|
63 |
+
Ret operator()(Params... params) const {
|
64 |
+
return callback(callable, std::forward<Params>(params)...);
|
65 |
+
}
|
66 |
+
|
67 |
+
operator bool() const {
|
68 |
+
return callback;
|
69 |
+
}
|
70 |
+
};
|
71 |
+
|
72 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Half.h
ADDED
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
/// Defines the Half type (half-precision floating-point) including conversions
|
4 |
+
/// to standard C types and basic arithmetic operations. Note that arithmetic
|
5 |
+
/// operations are implemented by converting to floating point and
|
6 |
+
/// performing the operation in float32, instead of using CUDA half intrinsics.
|
7 |
+
/// Most uses of this type within ATen are memory bound, including the
|
8 |
+
/// element-wise kernels, and the half intrinsics aren't efficient on all GPUs.
|
9 |
+
/// If you are writing a compute bound kernel, you can use the CUDA half
|
10 |
+
/// intrinsics directly on the Half type from device code.
|
11 |
+
|
12 |
+
#include <c10/macros/Macros.h>
|
13 |
+
#include <c10/util/C++17.h>
|
14 |
+
#include <c10/util/TypeSafeSignMath.h>
|
15 |
+
#include <c10/util/complex.h>
|
16 |
+
#include <c10/util/floating_point_utils.h>
|
17 |
+
#include <type_traits>
|
18 |
+
|
19 |
+
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
20 |
+
#include <cmath>
|
21 |
+
#include <cstdint>
|
22 |
+
#elif !defined(__OPENCL_VERSION__)
|
23 |
+
#include <math.h>
|
24 |
+
#include <stdint.h>
|
25 |
+
#endif
|
26 |
+
|
27 |
+
#ifdef _MSC_VER
|
28 |
+
#include <intrin.h>
|
29 |
+
#endif
|
30 |
+
|
31 |
+
#include <complex>
|
32 |
+
#include <cstdint>
|
33 |
+
#include <cstring>
|
34 |
+
#include <iosfwd>
|
35 |
+
#include <limits>
|
36 |
+
#include <sstream>
|
37 |
+
#include <stdexcept>
|
38 |
+
#include <string>
|
39 |
+
#include <utility>
|
40 |
+
|
41 |
+
#ifdef __CUDACC__
|
42 |
+
#include <cuda_fp16.h>
|
43 |
+
#endif
|
44 |
+
|
45 |
+
#ifdef __HIPCC__
|
46 |
+
#include <hip/hip_fp16.h>
|
47 |
+
#endif
|
48 |
+
|
49 |
+
#if defined(CL_SYCL_LANGUAGE_VERSION)
|
50 |
+
#include <CL/sycl.hpp> // for SYCL 1.2.1
|
51 |
+
#elif defined(SYCL_LANGUAGE_VERSION)
|
52 |
+
#include <sycl/sycl.hpp> // for SYCL 2020
|
53 |
+
#endif
|
54 |
+
|
55 |
+
#include <typeinfo> // operator typeid
|
56 |
+
|
57 |
+
namespace c10 {
|
58 |
+
|
59 |
+
namespace detail {
|
60 |
+
|
61 |
+
/*
|
62 |
+
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
|
63 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
64 |
+
* format, in bit representation.
|
65 |
+
*
|
66 |
+
* @note The implementation doesn't use any floating-point operations.
|
67 |
+
*/
|
68 |
+
inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) {
|
69 |
+
/*
|
70 |
+
* Extend the half-precision floating-point number to 32 bits and shift to the
|
71 |
+
* upper part of the 32-bit word:
|
72 |
+
* +---+-----+------------+-------------------+
|
73 |
+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
74 |
+
* +---+-----+------------+-------------------+
|
75 |
+
* Bits 31 26-30 16-25 0-15
|
76 |
+
*
|
77 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
78 |
+
* - zero bits.
|
79 |
+
*/
|
80 |
+
const uint32_t w = (uint32_t)h << 16;
|
81 |
+
/*
|
82 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
83 |
+
*
|
84 |
+
* +---+----------------------------------+
|
85 |
+
* | S |0000000 00000000 00000000 00000000|
|
86 |
+
* +---+----------------------------------+
|
87 |
+
* Bits 31 0-31
|
88 |
+
*/
|
89 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
90 |
+
/*
|
91 |
+
* Extract mantissa and biased exponent of the input number into the bits 0-30
|
92 |
+
* of the 32-bit word:
|
93 |
+
*
|
94 |
+
* +---+-----+------------+-------------------+
|
95 |
+
* | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
96 |
+
* +---+-----+------------+-------------------+
|
97 |
+
* Bits 30 27-31 17-26 0-16
|
98 |
+
*/
|
99 |
+
const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
|
100 |
+
/*
|
101 |
+
* Renorm shift is the number of bits to shift mantissa left to make the
|
102 |
+
* half-precision number normalized. If the initial number is normalized, some
|
103 |
+
* of its high 6 bits (sign == 0 and 5-bit exponent) equals one. In this case
|
104 |
+
* renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
|
105 |
+
* that if we shift denormalized nonsign by renorm_shift, the unit bit of
|
106 |
+
* mantissa will shift into exponent, turning the biased exponent into 1, and
|
107 |
+
* making mantissa normalized (i.e. without leading 1).
|
108 |
+
*/
|
109 |
+
#ifdef _MSC_VER
|
110 |
+
unsigned long nonsign_bsr;
|
111 |
+
_BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
|
112 |
+
uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
|
113 |
+
#else
|
114 |
+
uint32_t renorm_shift = __builtin_clz(nonsign);
|
115 |
+
#endif
|
116 |
+
renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0;
|
117 |
+
/*
|
118 |
+
* Iff half-precision number has exponent of 15, the addition overflows
|
119 |
+
* it into bit 31, and the subsequent shift turns the high 9 bits
|
120 |
+
* into 1. Thus inf_nan_mask == 0x7F800000 if the half-precision number
|
121 |
+
* had exponent of 15 (i.e. was NaN or infinity) 0x00000000 otherwise
|
122 |
+
*/
|
123 |
+
const int32_t inf_nan_mask =
|
124 |
+
((int32_t)(nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000);
|
125 |
+
/*
|
126 |
+
* Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
|
127 |
+
* into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
|
128 |
+
* broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
|
129 |
+
* 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
|
130 |
+
* 0x00000000 otherwise
|
131 |
+
*/
|
132 |
+
const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
|
133 |
+
/*
|
134 |
+
* 1. Shift nonsign left by renorm_shift to normalize it (if the input
|
135 |
+
* was denormal)
|
136 |
+
* 2. Shift nonsign right by 3 so the exponent (5 bits originally)
|
137 |
+
* becomes an 8-bit field and 10-bit mantissa shifts into the 10 high
|
138 |
+
* bits of the 23-bit mantissa of IEEE single-precision number.
|
139 |
+
* 3. Add 0x70 to the exponent (starting at bit 23) to compensate the
|
140 |
+
* different in exponent bias (0x7F for single-precision number less 0xF
|
141 |
+
* for half-precision number).
|
142 |
+
* 4. Subtract renorm_shift from the exponent (starting at bit 23) to
|
143 |
+
* account for renormalization. As renorm_shift is less than 0x70, this
|
144 |
+
* can be combined with step 3.
|
145 |
+
* 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
|
146 |
+
* input was NaN or infinity.
|
147 |
+
* 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
|
148 |
+
* into zero if the input was zero.
|
149 |
+
* 7. Combine with the sign of the input number.
|
150 |
+
*/
|
151 |
+
return sign |
|
152 |
+
((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) |
|
153 |
+
inf_nan_mask) &
|
154 |
+
~zero_mask);
|
155 |
+
}
|
156 |
+
|
157 |
+
/*
|
158 |
+
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
|
159 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
160 |
+
* format.
|
161 |
+
*
|
162 |
+
* @note The implementation relies on IEEE-like (no assumption about rounding
|
163 |
+
* mode and no operations on denormals) floating-point operations and bitcasts
|
164 |
+
* between integer and floating-point variables.
|
165 |
+
*/
|
166 |
+
C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
|
167 |
+
/*
|
168 |
+
* Extend the half-precision floating-point number to 32 bits and shift to the
|
169 |
+
* upper part of the 32-bit word:
|
170 |
+
* +---+-----+------------+-------------------+
|
171 |
+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
172 |
+
* +---+-----+------------+-------------------+
|
173 |
+
* Bits 31 26-30 16-25 0-15
|
174 |
+
*
|
175 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
176 |
+
* - zero bits.
|
177 |
+
*/
|
178 |
+
const uint32_t w = (uint32_t)h << 16;
|
179 |
+
/*
|
180 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
181 |
+
*
|
182 |
+
* +---+----------------------------------+
|
183 |
+
* | S |0000000 00000000 00000000 00000000|
|
184 |
+
* +---+----------------------------------+
|
185 |
+
* Bits 31 0-31
|
186 |
+
*/
|
187 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
188 |
+
/*
|
189 |
+
* Extract mantissa and biased exponent of the input number into the high bits
|
190 |
+
* of the 32-bit word:
|
191 |
+
*
|
192 |
+
* +-----+------------+---------------------+
|
193 |
+
* |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000|
|
194 |
+
* +-----+------------+---------------------+
|
195 |
+
* Bits 27-31 17-26 0-16
|
196 |
+
*/
|
197 |
+
const uint32_t two_w = w + w;
|
198 |
+
|
199 |
+
/*
|
200 |
+
* Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become
|
201 |
+
* mantissa and exponent of a single-precision floating-point number:
|
202 |
+
*
|
203 |
+
* S|Exponent | Mantissa
|
204 |
+
* +-+---+-----+------------+----------------+
|
205 |
+
* |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000|
|
206 |
+
* +-+---+-----+------------+----------------+
|
207 |
+
* Bits | 23-31 | 0-22
|
208 |
+
*
|
209 |
+
* Next, there are some adjustments to the exponent:
|
210 |
+
* - The exponent needs to be corrected by the difference in exponent bias
|
211 |
+
* between single-precision and half-precision formats (0x7F - 0xF = 0x70)
|
212 |
+
* - Inf and NaN values in the inputs should become Inf and NaN values after
|
213 |
+
* conversion to the single-precision number. Therefore, if the biased
|
214 |
+
* exponent of the half-precision input was 0x1F (max possible value), the
|
215 |
+
* biased exponent of the single-precision output must be 0xFF (max possible
|
216 |
+
* value). We do this correction in two steps:
|
217 |
+
* - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset
|
218 |
+
* below) rather than by 0x70 suggested by the difference in the exponent bias
|
219 |
+
* (see above).
|
220 |
+
* - Then we multiply the single-precision result of exponent adjustment by
|
221 |
+
* 2**(-112) to reverse the effect of exponent adjustment by 0xE0 less the
|
222 |
+
* necessary exponent adjustment by 0x70 due to difference in exponent bias.
|
223 |
+
* The floating-point multiplication hardware would ensure than Inf and
|
224 |
+
* NaN would retain their value on at least partially IEEE754-compliant
|
225 |
+
* implementations.
|
226 |
+
*
|
227 |
+
* Note that the above operations do not handle denormal inputs (where biased
|
228 |
+
* exponent == 0). However, they also do not operate on denormal inputs, and
|
229 |
+
* do not produce denormal results.
|
230 |
+
*/
|
231 |
+
constexpr uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
232 |
+
// const float exp_scale = 0x1.0p-112f;
|
233 |
+
constexpr uint32_t scale_bits = (uint32_t)15 << 23;
|
234 |
+
float exp_scale_val = 0;
|
235 |
+
std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
|
236 |
+
const float exp_scale = exp_scale_val;
|
237 |
+
const float normalized_value =
|
238 |
+
fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
239 |
+
|
240 |
+
/*
|
241 |
+
* Convert denormalized half-precision inputs into single-precision results
|
242 |
+
* (always normalized). Zero inputs are also handled here.
|
243 |
+
*
|
244 |
+
* In a denormalized number the biased exponent is zero, and mantissa has
|
245 |
+
* on-zero bits. First, we shift mantissa into bits 0-9 of the 32-bit word.
|
246 |
+
*
|
247 |
+
* zeros | mantissa
|
248 |
+
* +---------------------------+------------+
|
249 |
+
* |0000 0000 0000 0000 0000 00|MM MMMM MMMM|
|
250 |
+
* +---------------------------+------------+
|
251 |
+
* Bits 10-31 0-9
|
252 |
+
*
|
253 |
+
* Now, remember that denormalized half-precision numbers are represented as:
|
254 |
+
* FP16 = mantissa * 2**(-24).
|
255 |
+
* The trick is to construct a normalized single-precision number with the
|
256 |
+
* same mantissa and thehalf-precision input and with an exponent which would
|
257 |
+
* scale the corresponding mantissa bits to 2**(-24). A normalized
|
258 |
+
* single-precision floating-point number is represented as: FP32 = (1 +
|
259 |
+
* mantissa * 2**(-23)) * 2**(exponent - 127) Therefore, when the biased
|
260 |
+
* exponent is 126, a unit change in the mantissa of the input denormalized
|
261 |
+
* half-precision number causes a change of the constructed single-precision
|
262 |
+
* number by 2**(-24), i.e. the same amount.
|
263 |
+
*
|
264 |
+
* The last step is to adjust the bias of the constructed single-precision
|
265 |
+
* number. When the input half-precision number is zero, the constructed
|
266 |
+
* single-precision number has the value of FP32 = 1 * 2**(126 - 127) =
|
267 |
+
* 2**(-1) = 0.5 Therefore, we need to subtract 0.5 from the constructed
|
268 |
+
* single-precision number to get the numerical equivalent of the input
|
269 |
+
* half-precision number.
|
270 |
+
*/
|
271 |
+
constexpr uint32_t magic_mask = UINT32_C(126) << 23;
|
272 |
+
constexpr float magic_bias = 0.5f;
|
273 |
+
const float denormalized_value =
|
274 |
+
fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
275 |
+
|
276 |
+
/*
|
277 |
+
* - Choose either results of conversion of input as a normalized number, or
|
278 |
+
* as a denormalized number, depending on the input exponent. The variable
|
279 |
+
* two_w contains input exponent in bits 27-31, therefore if its smaller than
|
280 |
+
* 2**27, the input is either a denormal number, or zero.
|
281 |
+
* - Combine the result of conversion of exponent and mantissa with the sign
|
282 |
+
* of the input number.
|
283 |
+
*/
|
284 |
+
constexpr uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
285 |
+
const uint32_t result = sign |
|
286 |
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value)
|
287 |
+
: fp32_to_bits(normalized_value));
|
288 |
+
return fp32_from_bits(result);
|
289 |
+
}
|
290 |
+
|
291 |
+
/*
|
292 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
293 |
+
* 16-bit floating-point number in IEEE half-precision format, in bit
|
294 |
+
* representation.
|
295 |
+
*
|
296 |
+
* @note The implementation relies on IEEE-like (no assumption about rounding
|
297 |
+
* mode and no operations on denormals) floating-point operations and bitcasts
|
298 |
+
* between integer and floating-point variables.
|
299 |
+
*/
|
300 |
+
inline uint16_t fp16_ieee_from_fp32_value(float f) {
|
301 |
+
// const float scale_to_inf = 0x1.0p+112f;
|
302 |
+
// const float scale_to_zero = 0x1.0p-110f;
|
303 |
+
constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
|
304 |
+
constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
|
305 |
+
float scale_to_inf_val = 0, scale_to_zero_val = 0;
|
306 |
+
std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
|
307 |
+
std::memcpy(
|
308 |
+
&scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
|
309 |
+
const float scale_to_inf = scale_to_inf_val;
|
310 |
+
const float scale_to_zero = scale_to_zero_val;
|
311 |
+
|
312 |
+
#if defined(_MSC_VER) && _MSC_VER == 1916
|
313 |
+
float base = ((signbit(f) != 0 ? -f : f) * scale_to_inf) * scale_to_zero;
|
314 |
+
#else
|
315 |
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
316 |
+
#endif
|
317 |
+
|
318 |
+
const uint32_t w = fp32_to_bits(f);
|
319 |
+
const uint32_t shl1_w = w + w;
|
320 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
321 |
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
322 |
+
if (bias < UINT32_C(0x71000000)) {
|
323 |
+
bias = UINT32_C(0x71000000);
|
324 |
+
}
|
325 |
+
|
326 |
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
327 |
+
const uint32_t bits = fp32_to_bits(base);
|
328 |
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
329 |
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
330 |
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
331 |
+
return static_cast<uint16_t>(
|
332 |
+
(sign >> 16) |
|
333 |
+
(shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign));
|
334 |
+
}
|
335 |
+
|
336 |
+
} // namespace detail
|
337 |
+
|
338 |
+
struct alignas(2) Half {
|
339 |
+
unsigned short x;
|
340 |
+
|
341 |
+
struct from_bits_t {};
|
342 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
343 |
+
return from_bits_t();
|
344 |
+
}
|
345 |
+
|
346 |
+
// HIP wants __host__ __device__ tag, CUDA does not
|
347 |
+
#if defined(USE_ROCM)
|
348 |
+
C10_HOST_DEVICE Half() = default;
|
349 |
+
#else
|
350 |
+
Half() = default;
|
351 |
+
#endif
|
352 |
+
|
353 |
+
constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits){};
|
354 |
+
inline C10_HOST_DEVICE Half(float value);
|
355 |
+
inline C10_HOST_DEVICE operator float() const;
|
356 |
+
|
357 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
358 |
+
inline C10_HOST_DEVICE Half(const __half& value);
|
359 |
+
inline C10_HOST_DEVICE operator __half() const;
|
360 |
+
#endif
|
361 |
+
#ifdef SYCL_LANGUAGE_VERSION
|
362 |
+
inline C10_HOST_DEVICE Half(const sycl::half& value);
|
363 |
+
inline C10_HOST_DEVICE operator sycl::half() const;
|
364 |
+
#endif
|
365 |
+
};
|
366 |
+
|
367 |
+
// TODO : move to complex.h
|
368 |
+
template <>
|
369 |
+
struct alignas(4) complex<Half> {
|
370 |
+
Half real_;
|
371 |
+
Half imag_;
|
372 |
+
|
373 |
+
// Constructors
|
374 |
+
complex() = default;
|
375 |
+
// Half constructor is not constexpr so the following constructor can't
|
376 |
+
// be constexpr
|
377 |
+
C10_HOST_DEVICE explicit inline complex(const Half& real, const Half& imag)
|
378 |
+
: real_(real), imag_(imag) {}
|
379 |
+
C10_HOST_DEVICE inline complex(const c10::complex<float>& value)
|
380 |
+
: real_(value.real()), imag_(value.imag()) {}
|
381 |
+
|
382 |
+
// Conversion operator
|
383 |
+
inline C10_HOST_DEVICE operator c10::complex<float>() const {
|
384 |
+
return {real_, imag_};
|
385 |
+
}
|
386 |
+
|
387 |
+
constexpr C10_HOST_DEVICE Half real() const {
|
388 |
+
return real_;
|
389 |
+
}
|
390 |
+
constexpr C10_HOST_DEVICE Half imag() const {
|
391 |
+
return imag_;
|
392 |
+
}
|
393 |
+
|
394 |
+
C10_HOST_DEVICE complex<Half>& operator+=(const complex<Half>& other) {
|
395 |
+
real_ = static_cast<float>(real_) + static_cast<float>(other.real_);
|
396 |
+
imag_ = static_cast<float>(imag_) + static_cast<float>(other.imag_);
|
397 |
+
return *this;
|
398 |
+
}
|
399 |
+
|
400 |
+
C10_HOST_DEVICE complex<Half>& operator-=(const complex<Half>& other) {
|
401 |
+
real_ = static_cast<float>(real_) - static_cast<float>(other.real_);
|
402 |
+
imag_ = static_cast<float>(imag_) - static_cast<float>(other.imag_);
|
403 |
+
return *this;
|
404 |
+
}
|
405 |
+
|
406 |
+
C10_HOST_DEVICE complex<Half>& operator*=(const complex<Half>& other) {
|
407 |
+
auto a = static_cast<float>(real_);
|
408 |
+
auto b = static_cast<float>(imag_);
|
409 |
+
auto c = static_cast<float>(other.real());
|
410 |
+
auto d = static_cast<float>(other.imag());
|
411 |
+
real_ = a * c - b * d;
|
412 |
+
imag_ = a * d + b * c;
|
413 |
+
return *this;
|
414 |
+
}
|
415 |
+
};
|
416 |
+
|
417 |
+
// In some versions of MSVC, there will be a compiler error when building.
|
418 |
+
// C4146: unary minus operator applied to unsigned type, result still unsigned
|
419 |
+
// C4804: unsafe use of type 'bool' in operation
|
420 |
+
// It can be addressed by disabling the following warning.
|
421 |
+
#ifdef _MSC_VER
|
422 |
+
#pragma warning(push)
|
423 |
+
#pragma warning(disable : 4146)
|
424 |
+
#pragma warning(disable : 4804)
|
425 |
+
#pragma warning(disable : 4018)
|
426 |
+
#endif
|
427 |
+
|
428 |
+
// The overflow checks may involve float to int conversion which may
|
429 |
+
// trigger precision loss warning. Re-enable the warning once the code
|
430 |
+
// is fixed. See T58053069.
|
431 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
432 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
|
433 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
|
434 |
+
#endif
|
435 |
+
|
436 |
+
// bool can be converted to any type.
|
437 |
+
// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build:
|
438 |
+
// `error: comparison of constant '255' with boolean expression is always false`
|
439 |
+
// for `f > limit::max()` below
|
440 |
+
template <typename To, typename From>
|
441 |
+
typename std::enable_if<std::is_same<From, bool>::value, bool>::type overflows(
|
442 |
+
From /*f*/) {
|
443 |
+
return false;
|
444 |
+
}
|
445 |
+
|
446 |
+
// skip isnan and isinf check for integral types
|
447 |
+
template <typename To, typename From>
|
448 |
+
typename std::enable_if<
|
449 |
+
std::is_integral<From>::value && !std::is_same<From, bool>::value,
|
450 |
+
bool>::type
|
451 |
+
overflows(From f) {
|
452 |
+
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
|
453 |
+
if (!limit::is_signed && std::numeric_limits<From>::is_signed) {
|
454 |
+
// allow for negative numbers to wrap using two's complement arithmetic.
|
455 |
+
// For example, with uint8, this allows for `a - b` to be treated as
|
456 |
+
// `a + 255 * b`.
|
457 |
+
return greater_than_max<To>(f) ||
|
458 |
+
(c10::is_negative(f) && -static_cast<uint64_t>(f) > limit::max());
|
459 |
+
} else {
|
460 |
+
return c10::less_than_lowest<To>(f) || greater_than_max<To>(f);
|
461 |
+
}
|
462 |
+
}
|
463 |
+
|
464 |
+
template <typename To, typename From>
|
465 |
+
typename std::enable_if<std::is_floating_point<From>::value, bool>::type
|
466 |
+
overflows(From f) {
|
467 |
+
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
|
468 |
+
if (limit::has_infinity && std::isinf(static_cast<double>(f))) {
|
469 |
+
return false;
|
470 |
+
}
|
471 |
+
if (!limit::has_quiet_NaN && (f != f)) {
|
472 |
+
return true;
|
473 |
+
}
|
474 |
+
return f < limit::lowest() || f > limit::max();
|
475 |
+
}
|
476 |
+
|
477 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
478 |
+
|
479 |
+
#ifdef _MSC_VER
|
480 |
+
#pragma warning(pop)
|
481 |
+
#endif
|
482 |
+
|
483 |
+
template <typename To, typename From>
|
484 |
+
typename std::enable_if<is_complex<From>::value, bool>::type overflows(From f) {
|
485 |
+
// casts from complex to real are considered to overflow if the
|
486 |
+
// imaginary component is non-zero
|
487 |
+
if (!is_complex<To>::value && f.imag() != 0) {
|
488 |
+
return true;
|
489 |
+
}
|
490 |
+
// Check for overflow componentwise
|
491 |
+
// (Technically, the imag overflow check is guaranteed to be false
|
492 |
+
// when !is_complex<To>, but any optimizer worth its salt will be
|
493 |
+
// able to figure it out.)
|
494 |
+
return overflows<
|
495 |
+
typename scalar_value_type<To>::type,
|
496 |
+
typename From::value_type>(f.real()) ||
|
497 |
+
overflows<
|
498 |
+
typename scalar_value_type<To>::type,
|
499 |
+
typename From::value_type>(f.imag());
|
500 |
+
}
|
501 |
+
|
502 |
+
C10_API std::ostream& operator<<(std::ostream& out, const Half& value);
|
503 |
+
|
504 |
+
} // namespace c10
|
505 |
+
|
506 |
+
#include <c10/util/Half-inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <cstddef>
|
5 |
+
#include <functional>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
|
10 |
+
/**
|
11 |
+
* This template simplifies generation of simple classes that wrap an id
|
12 |
+
* in a typesafe way. Namely, you can use it to create a very lightweight
|
13 |
+
* type that only offers equality comparators and hashing. Example:
|
14 |
+
*
|
15 |
+
* struct MyIdType final : IdWrapper<MyIdType, uint32_t> {
|
16 |
+
* constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {}
|
17 |
+
* };
|
18 |
+
*
|
19 |
+
* Then in the global top level namespace:
|
20 |
+
*
|
21 |
+
* C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType);
|
22 |
+
*
|
23 |
+
* That's it - equality operators and hash functions are automatically defined
|
24 |
+
* for you, given the underlying type supports it.
|
25 |
+
*/
|
26 |
+
template <class ConcreteType, class UnderlyingType>
|
27 |
+
class IdWrapper {
|
28 |
+
public:
|
29 |
+
using underlying_type = UnderlyingType;
|
30 |
+
using concrete_type = ConcreteType;
|
31 |
+
|
32 |
+
protected:
|
33 |
+
constexpr explicit IdWrapper(underlying_type id) noexcept(
|
34 |
+
noexcept(underlying_type(std::declval<underlying_type>())))
|
35 |
+
: id_(id) {}
|
36 |
+
|
37 |
+
constexpr underlying_type underlyingId() const
|
38 |
+
noexcept(noexcept(underlying_type(std::declval<underlying_type>()))) {
|
39 |
+
return id_;
|
40 |
+
}
|
41 |
+
|
42 |
+
private:
|
43 |
+
friend size_t hash_value(const concrete_type& v) {
|
44 |
+
return std::hash<underlying_type>()(v.id_);
|
45 |
+
}
|
46 |
+
|
47 |
+
// TODO Making operator== noexcept if underlying type is noexcept equality
|
48 |
+
// comparable doesn't work with GCC 4.8.
|
49 |
+
// Fix this once we don't need GCC 4.8 anymore.
|
50 |
+
friend constexpr bool operator==(
|
51 |
+
const concrete_type& lhs,
|
52 |
+
const concrete_type& rhs) noexcept {
|
53 |
+
return lhs.id_ == rhs.id_;
|
54 |
+
}
|
55 |
+
|
56 |
+
// TODO Making operator!= noexcept if operator== is noexcept doesn't work with
|
57 |
+
// GCC 4.8.
|
58 |
+
// Fix this once we don't need GCC 4.8 anymore.
|
59 |
+
friend constexpr bool operator!=(
|
60 |
+
const concrete_type& lhs,
|
61 |
+
const concrete_type& rhs) noexcept {
|
62 |
+
return !(lhs == rhs);
|
63 |
+
}
|
64 |
+
|
65 |
+
underlying_type id_;
|
66 |
+
};
|
67 |
+
|
68 |
+
} // namespace c10
|
69 |
+
|
70 |
+
#define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \
|
71 |
+
namespace std { \
|
72 |
+
template <> \
|
73 |
+
struct hash<ClassName> { \
|
74 |
+
size_t operator()(ClassName x) const { \
|
75 |
+
return hash_value(x); \
|
76 |
+
} \
|
77 |
+
}; \
|
78 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Load.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/macros/Macros.h>
|
3 |
+
#include <cstring>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
namespace detail {
|
7 |
+
|
8 |
+
template <typename T>
|
9 |
+
struct LoadImpl {
|
10 |
+
C10_HOST_DEVICE static T apply(const void* src) {
|
11 |
+
return *reinterpret_cast<const T*>(src);
|
12 |
+
}
|
13 |
+
};
|
14 |
+
|
15 |
+
template <>
|
16 |
+
struct LoadImpl<bool> {
|
17 |
+
C10_HOST_DEVICE static bool apply(const void* src) {
|
18 |
+
static_assert(sizeof(bool) == sizeof(char));
|
19 |
+
// NOTE: [Loading boolean values]
|
20 |
+
// Protect against invalid boolean values by loading as a byte
|
21 |
+
// first, then converting to bool (see gh-54789).
|
22 |
+
return *reinterpret_cast<const unsigned char*>(src);
|
23 |
+
}
|
24 |
+
};
|
25 |
+
|
26 |
+
} // namespace detail
|
27 |
+
|
28 |
+
template <typename T>
|
29 |
+
C10_HOST_DEVICE T load(const void* src) {
|
30 |
+
return c10::detail::LoadImpl<T>::apply(src);
|
31 |
+
}
|
32 |
+
|
33 |
+
template <typename scalar_t>
|
34 |
+
C10_HOST_DEVICE scalar_t load(const scalar_t* src) {
|
35 |
+
return c10::detail::LoadImpl<scalar_t>::apply(src);
|
36 |
+
}
|
37 |
+
|
38 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Logging.h
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_LOGGING_H_
|
2 |
+
#define C10_UTIL_LOGGING_H_
|
3 |
+
|
4 |
+
#include <climits>
|
5 |
+
#include <exception>
|
6 |
+
#include <functional>
|
7 |
+
#include <limits>
|
8 |
+
#include <sstream>
|
9 |
+
|
10 |
+
#include <c10/macros/Macros.h>
|
11 |
+
#include <c10/util/Exception.h>
|
12 |
+
#include <c10/util/Flags.h>
|
13 |
+
#include <c10/util/StringUtil.h>
|
14 |
+
|
15 |
+
// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off
|
16 |
+
// logging at compile time so no logging message below that level is produced
|
17 |
+
// at all. The value should be between INT_MIN and CAFFE_FATAL.
|
18 |
+
#ifndef CAFFE2_LOG_THRESHOLD
|
19 |
+
// If we have not defined the compile time log threshold, we keep all the
|
20 |
+
// log cases.
|
21 |
+
#define CAFFE2_LOG_THRESHOLD INT_MIN
|
22 |
+
#endif // CAFFE2_LOG_THRESHOLD
|
23 |
+
|
24 |
+
// Below are different implementations for glog and non-glog cases.
|
25 |
+
#ifdef C10_USE_GLOG
|
26 |
+
#include <c10/util/logging_is_google_glog.h>
|
27 |
+
#else // !C10_USE_GLOG
|
28 |
+
#include <c10/util/logging_is_not_google_glog.h>
|
29 |
+
#endif // C10_USE_GLOG
|
30 |
+
|
31 |
+
C10_DECLARE_int(caffe2_log_level);
|
32 |
+
C10_DECLARE_bool(caffe2_use_fatal_for_enforce);
|
33 |
+
|
34 |
+
// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's
|
35 |
+
// not available - just short-circuit to the always working one one.
|
36 |
+
// We define the C10_ name to avoid confusing other files
|
37 |
+
#ifdef LOG_EVERY_MS
|
38 |
+
#define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms)
|
39 |
+
#else
|
40 |
+
#define C10_LOG_EVERY_MS(severity, ms) LOG(severity)
|
41 |
+
#endif
|
42 |
+
|
43 |
+
// Same for LOG_FIRST_N
|
44 |
+
#ifdef LOG_FIRST_N
|
45 |
+
#define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n)
|
46 |
+
#else
|
47 |
+
#define C10_LOG_FIRST_N(severity, n) LOG(severity)
|
48 |
+
#endif
|
49 |
+
|
50 |
+
// Same for LOG_EVERY_N
|
51 |
+
#ifdef LOG_EVERY_N
|
52 |
+
#define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
|
53 |
+
#else
|
54 |
+
#define C10_LOG_EVERY_N(severity, n) LOG(severity)
|
55 |
+
#endif
|
56 |
+
|
57 |
+
namespace c10 {
|
58 |
+
|
59 |
+
using std::string;
|
60 |
+
|
61 |
+
// Functions that we use for initialization.
|
62 |
+
C10_API bool InitCaffeLogging(int* argc, char** argv);
|
63 |
+
C10_API void UpdateLoggingLevelsFromFlags();
|
64 |
+
|
65 |
+
[[noreturn]] C10_API void ThrowEnforceNotMet(
|
66 |
+
const char* file,
|
67 |
+
const int line,
|
68 |
+
const char* condition,
|
69 |
+
const std::string& msg,
|
70 |
+
const void* caller = nullptr);
|
71 |
+
|
72 |
+
[[noreturn]] C10_API void ThrowEnforceNotMet(
|
73 |
+
const char* file,
|
74 |
+
const int line,
|
75 |
+
const char* condition,
|
76 |
+
const char* msg,
|
77 |
+
const void* caller = nullptr);
|
78 |
+
|
79 |
+
[[noreturn]] C10_API inline void ThrowEnforceNotMet(
|
80 |
+
const char* file,
|
81 |
+
const int line,
|
82 |
+
const char* condition,
|
83 |
+
detail::CompileTimeEmptyString /*msg*/,
|
84 |
+
const void* caller = nullptr) {
|
85 |
+
ThrowEnforceNotMet(file, line, condition, "", caller);
|
86 |
+
}
|
87 |
+
|
88 |
+
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
|
89 |
+
const char* file,
|
90 |
+
const int line,
|
91 |
+
const char* condition,
|
92 |
+
const std::string& msg,
|
93 |
+
const void* caller = nullptr);
|
94 |
+
|
95 |
+
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
|
96 |
+
const char* file,
|
97 |
+
const int line,
|
98 |
+
const char* condition,
|
99 |
+
const char* msg,
|
100 |
+
const void* caller = nullptr);
|
101 |
+
|
102 |
+
[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet(
|
103 |
+
const char* file,
|
104 |
+
const int line,
|
105 |
+
const char* condition,
|
106 |
+
detail::CompileTimeEmptyString /*msg*/,
|
107 |
+
const void* caller = nullptr) {
|
108 |
+
ThrowEnforceFiniteNotMet(file, line, condition, "", caller);
|
109 |
+
}
|
110 |
+
|
111 |
+
constexpr bool IsUsingGoogleLogging() {
|
112 |
+
#ifdef C10_USE_GLOG
|
113 |
+
return true;
|
114 |
+
#else
|
115 |
+
return false;
|
116 |
+
#endif
|
117 |
+
}
|
118 |
+
|
119 |
+
/**
|
120 |
+
* A utility to allow one to show log info to stderr after the program starts.
|
121 |
+
*
|
122 |
+
* This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level
|
123 |
+
* to smaller than INFO. You are recommended to only use this in a few sparse
|
124 |
+
* cases, such as when you want to write a tutorial or something. Normally, use
|
125 |
+
* the commandline flags to set the log level.
|
126 |
+
*/
|
127 |
+
C10_API void ShowLogInfoToStderr();
|
128 |
+
|
129 |
+
C10_API void SetStackTraceFetcher(std::function<string(void)> fetcher);
|
130 |
+
|
131 |
+
using EnforceNotMet = ::c10::Error;
|
132 |
+
|
133 |
+
#define CAFFE_ENFORCE(condition, ...) \
|
134 |
+
do { \
|
135 |
+
if (C10_UNLIKELY(!(condition))) { \
|
136 |
+
::c10::ThrowEnforceNotMet( \
|
137 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
|
138 |
+
} \
|
139 |
+
} while (false)
|
140 |
+
|
141 |
+
#define CAFFE_ENFORCE_FINITE(condition, ...) \
|
142 |
+
do { \
|
143 |
+
if (C10_UNLIKELY(!(condition))) { \
|
144 |
+
::c10::ThrowEnforceFiniteNotMet( \
|
145 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
|
146 |
+
} \
|
147 |
+
} while (false)
|
148 |
+
|
149 |
+
#define CAFFE_ENFORCE_WITH_CALLER(condition, ...) \
|
150 |
+
do { \
|
151 |
+
if (C10_UNLIKELY(!(condition))) { \
|
152 |
+
::c10::ThrowEnforceNotMet( \
|
153 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); \
|
154 |
+
} \
|
155 |
+
} while (false)
|
156 |
+
|
157 |
+
#define CAFFE_THROW(...) \
|
158 |
+
::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__))
|
159 |
+
|
160 |
+
/**
|
161 |
+
* Rich logging messages
|
162 |
+
*
|
163 |
+
* CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that
|
164 |
+
* capture input argument values and add it to the exception message. E.g.
|
165 |
+
* `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")`
|
166 |
+
* would evaluate both foo and bar only once and if the results are not equal -
|
167 |
+
* include them in the exception message.
|
168 |
+
*
|
169 |
+
* Some of the basic checker functions like Equals or Greater are already
|
170 |
+
* defined below. Other header might define customized checkers by adding
|
171 |
+
* functions to caffe2::enforce_detail namespace. For example:
|
172 |
+
*
|
173 |
+
* namespace caffe2 { namespace enforce_detail {
|
174 |
+
* inline EnforceFailMessage IsVector(const vector<int64_t>& shape) {
|
175 |
+
* if (shape.size() == 1) { return EnforceOK(); }
|
176 |
+
* return c10::str("Shape ", shape, " is not a vector");
|
177 |
+
* }
|
178 |
+
* }}
|
179 |
+
*
|
180 |
+
* With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
|
181 |
+
*
|
182 |
+
* Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
|
183 |
+
* too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
|
184 |
+
* user-provided input.
|
185 |
+
*/
|
186 |
+
|
187 |
+
namespace enforce_detail {
|
188 |
+
|
189 |
+
template <typename T1, typename T2>
|
190 |
+
std::string enforceFailMsgImpl(const T1& x, const T2& y) {
|
191 |
+
return c10::str(x, " vs ", y);
|
192 |
+
}
|
193 |
+
|
194 |
+
template <typename T1, typename T2, typename... Args>
|
195 |
+
std::string enforceFailMsgImpl(const T1& x, const T2& y, const Args&... args) {
|
196 |
+
return c10::str(x, " vs ", y, ". ", args...);
|
197 |
+
}
|
198 |
+
|
199 |
+
// GCC7 is getting an internal compiler error on the new
|
200 |
+
// implementation, so keep the old one (which evaluates the error
|
201 |
+
// message eagerly and therefore is undesirable for general use
|
202 |
+
// compared to the new one) around for it.
|
203 |
+
#if defined(__GNUG__) && __GNUC__ <= 7 && !defined(__clang__)
|
204 |
+
template <typename Pred, typename T1, typename T2, typename... Args>
|
205 |
+
void enforceThatImpl(
|
206 |
+
Pred p,
|
207 |
+
const T1& lhs,
|
208 |
+
const T2& rhs,
|
209 |
+
const char* file,
|
210 |
+
int line,
|
211 |
+
const char* expr,
|
212 |
+
const void* caller,
|
213 |
+
const Args&... args) {
|
214 |
+
if (C10_UNLIKELY(!(p(lhs, rhs)))) {
|
215 |
+
::c10::ThrowEnforceNotMet(
|
216 |
+
file,
|
217 |
+
line,
|
218 |
+
expr,
|
219 |
+
::c10::enforce_detail::enforceFailMsgImpl(lhs, rhs, args...),
|
220 |
+
caller);
|
221 |
+
}
|
222 |
+
}
|
223 |
+
|
224 |
+
#define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \
|
225 |
+
::c10::enforce_detail::enforceThatImpl( \
|
226 |
+
op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__)
|
227 |
+
|
228 |
+
#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \
|
229 |
+
::c10::enforce_detail::enforceThatImpl( \
|
230 |
+
op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__)
|
231 |
+
|
232 |
+
#else
|
233 |
+
template <typename Pred, typename T1, typename T2, typename GetFailMsgFunc>
|
234 |
+
void enforceThatImpl(
|
235 |
+
Pred p,
|
236 |
+
const T1& lhs,
|
237 |
+
const T2& rhs,
|
238 |
+
const char* file,
|
239 |
+
int line,
|
240 |
+
const char* expr,
|
241 |
+
const void* caller,
|
242 |
+
GetFailMsgFunc getFailMsg) {
|
243 |
+
if (C10_UNLIKELY(!(p(lhs, rhs)))) {
|
244 |
+
::c10::ThrowEnforceNotMet(file, line, expr, getFailMsg(lhs, rhs), caller);
|
245 |
+
}
|
246 |
+
}
|
247 |
+
|
248 |
+
#define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \
|
249 |
+
::c10::enforce_detail::enforceThatImpl( \
|
250 |
+
op, \
|
251 |
+
(lhs), \
|
252 |
+
(rhs), \
|
253 |
+
__FILE__, \
|
254 |
+
__LINE__, \
|
255 |
+
expr, \
|
256 |
+
nullptr, \
|
257 |
+
[&](const auto& arg1, const auto& arg2) { \
|
258 |
+
return ::c10::enforce_detail::enforceFailMsgImpl( \
|
259 |
+
arg1, arg2, ##__VA_ARGS__); \
|
260 |
+
})
|
261 |
+
|
262 |
+
#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \
|
263 |
+
::c10::enforce_detail::enforceThatImpl( \
|
264 |
+
op, \
|
265 |
+
(lhs), \
|
266 |
+
(rhs), \
|
267 |
+
__FILE__, \
|
268 |
+
__LINE__, \
|
269 |
+
expr, \
|
270 |
+
this, \
|
271 |
+
[&](const auto& arg1, const auto& arg2) { \
|
272 |
+
return ::c10::enforce_detail::enforceFailMsgImpl( \
|
273 |
+
arg1, arg2, ##__VA_ARGS__); \
|
274 |
+
})
|
275 |
+
#endif
|
276 |
+
|
277 |
+
} // namespace enforce_detail
|
278 |
+
|
279 |
+
#define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) \
|
280 |
+
CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__)
|
281 |
+
|
282 |
+
#define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) \
|
283 |
+
CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
|
284 |
+
#define CAFFE_ENFORCE_EQ(x, y, ...) \
|
285 |
+
CAFFE_ENFORCE_BINARY_OP(std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
|
286 |
+
#define CAFFE_ENFORCE_NE(x, y, ...) \
|
287 |
+
CAFFE_ENFORCE_BINARY_OP(std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
|
288 |
+
#define CAFFE_ENFORCE_LE(x, y, ...) \
|
289 |
+
CAFFE_ENFORCE_BINARY_OP(std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
|
290 |
+
#define CAFFE_ENFORCE_LT(x, y, ...) \
|
291 |
+
CAFFE_ENFORCE_BINARY_OP(std::less<void>(), <, x, y, ##__VA_ARGS__)
|
292 |
+
#define CAFFE_ENFORCE_GE(x, y, ...) \
|
293 |
+
CAFFE_ENFORCE_BINARY_OP(std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
|
294 |
+
#define CAFFE_ENFORCE_GT(x, y, ...) \
|
295 |
+
CAFFE_ENFORCE_BINARY_OP(std::greater<void>(), >, x, y, ##__VA_ARGS__)
|
296 |
+
|
297 |
+
#define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) \
|
298 |
+
CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( \
|
299 |
+
cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
|
300 |
+
#define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \
|
301 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
302 |
+
std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
|
303 |
+
#define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \
|
304 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
305 |
+
std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
|
306 |
+
#define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \
|
307 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
308 |
+
std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
|
309 |
+
#define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \
|
310 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less<void>(), <, x, y, ##__VA_ARGS__)
|
311 |
+
#define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \
|
312 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
313 |
+
std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
|
314 |
+
#define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \
|
315 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
316 |
+
std::greater<void>(), >, x, y, ##__VA_ARGS__)
|
317 |
+
|
318 |
+
/**
|
319 |
+
* Very lightweight logging for the first time API usage. It's beneficial for
|
320 |
+
* tracking of individual functionality usage in larger applications.
|
321 |
+
*
|
322 |
+
* In order to ensure light-weightedness of logging, we utilize static variable
|
323 |
+
* trick - LogAPIUsage will be invoked only once and further invocations will
|
324 |
+
* just do an atomic check.
|
325 |
+
*
|
326 |
+
* Example:
|
327 |
+
* // Logs caller info with an arbitrary text event, if there is a usage.
|
328 |
+
* C10_LOG_API_USAGE_ONCE("my_api");
|
329 |
+
*/
|
330 |
+
#define C10_LOG_API_USAGE_ONCE(...) \
|
331 |
+
C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = \
|
332 |
+
::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__);
|
333 |
+
|
334 |
+
// API usage logging capabilities
|
335 |
+
C10_API void SetAPIUsageLogger(std::function<void(const std::string&)> logger);
|
336 |
+
C10_API void LogAPIUsage(const std::string& context);
|
337 |
+
|
338 |
+
C10_API void SetAPIUsageMetadataLogger(
|
339 |
+
std::function<void(
|
340 |
+
const std::string&,
|
341 |
+
const std::map<std::string, std::string>& metadata_map)> logger);
|
342 |
+
C10_API void LogAPIUsageMetadata(
|
343 |
+
const std::string& context,
|
344 |
+
const std::map<std::string, std::string>& metadata_map);
|
345 |
+
|
346 |
+
// PyTorch ddp usage logging capabilities
|
347 |
+
// DDPLoggingData holds data that can be logged in applications
|
348 |
+
// for analysis and debugging. Data structure is defined in
|
349 |
+
// c10 directory so that it can be easily imported by both c10
|
350 |
+
// and torch files.
|
351 |
+
struct DDPLoggingData {
|
352 |
+
// logging fields that are string types.
|
353 |
+
std::map<std::string, std::string> strs_map;
|
354 |
+
// logging fields that are int64_t types.
|
355 |
+
std::map<std::string, int64_t> ints_map;
|
356 |
+
};
|
357 |
+
|
358 |
+
C10_API void SetPyTorchDDPUsageLogger(
|
359 |
+
std::function<void(const DDPLoggingData&)> logger);
|
360 |
+
C10_API void LogPyTorchDDPUsage(const DDPLoggingData& ddpData);
|
361 |
+
|
362 |
+
namespace detail {
|
363 |
+
// Return value is needed to do the static variable initialization trick
|
364 |
+
C10_API bool LogAPIUsageFakeReturn(const std::string& context);
|
365 |
+
} // namespace detail
|
366 |
+
|
367 |
+
// Initializes the c10 logger.
|
368 |
+
C10_API void initLogging();
|
369 |
+
|
370 |
+
// Sets the rank, which will be included in log messages
|
371 |
+
C10_API void SetGlobalRank(int64_t rank);
|
372 |
+
|
373 |
+
} // namespace c10
|
374 |
+
|
375 |
+
#endif // C10_UTIL_LOGGING_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
#include <c10/util/in_place.h>
|
6 |
+
|
7 |
+
#include <memory>
|
8 |
+
#include <type_traits>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
/// MaybeOwnedTraits<T> describes how to borrow from T. Here is how we
|
13 |
+
/// can implement borrowing from an arbitrary type T using a raw
|
14 |
+
/// pointer to const:
|
15 |
+
template <typename T>
|
16 |
+
struct MaybeOwnedTraitsGenericImpl {
|
17 |
+
using owned_type = T;
|
18 |
+
using borrow_type = const T*;
|
19 |
+
|
20 |
+
static borrow_type createBorrow(const owned_type& from) {
|
21 |
+
return &from;
|
22 |
+
}
|
23 |
+
|
24 |
+
static void assignBorrow(borrow_type& lhs, borrow_type rhs) {
|
25 |
+
lhs = rhs;
|
26 |
+
}
|
27 |
+
|
28 |
+
static void destroyBorrow(borrow_type& /*toDestroy*/) {}
|
29 |
+
|
30 |
+
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
|
31 |
+
return *borrow;
|
32 |
+
}
|
33 |
+
|
34 |
+
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
|
35 |
+
return borrow;
|
36 |
+
}
|
37 |
+
|
38 |
+
static bool debugBorrowIsValid(const borrow_type& borrow) {
|
39 |
+
return borrow != nullptr;
|
40 |
+
}
|
41 |
+
};
|
42 |
+
|
43 |
+
/// It is possible to eliminate the extra layer of indirection for
|
44 |
+
/// borrows for some types that we control. For examples, see
|
45 |
+
/// intrusive_ptr.h and TensorBody.h.
|
46 |
+
|
47 |
+
template <typename T>
|
48 |
+
struct MaybeOwnedTraits;
|
49 |
+
|
50 |
+
// Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
|
51 |
+
// MaybeOwned to be used for any type right away.
|
52 |
+
template <typename T>
|
53 |
+
struct MaybeOwnedTraits<std::shared_ptr<T>>
|
54 |
+
: public MaybeOwnedTraitsGenericImpl<std::shared_ptr<T>> {};
|
55 |
+
|
56 |
+
/// A smart pointer around either a borrowed or owned T. When
|
57 |
+
/// constructed with borrowed(), the caller MUST ensure that the
|
58 |
+
/// borrowed-from argument outlives this MaybeOwned<T>. Compare to
|
59 |
+
/// Rust's std::borrow::Cow
|
60 |
+
/// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note
|
61 |
+
/// that it is probably not suitable for general use because C++ has
|
62 |
+
/// no borrow checking. Included here to support
|
63 |
+
/// Tensor::expect_contiguous.
|
64 |
+
template <typename T>
|
65 |
+
class MaybeOwned final {
|
66 |
+
using borrow_type = typename MaybeOwnedTraits<T>::borrow_type;
|
67 |
+
using owned_type = typename MaybeOwnedTraits<T>::owned_type;
|
68 |
+
|
69 |
+
bool isBorrowed_;
|
70 |
+
union {
|
71 |
+
borrow_type borrow_;
|
72 |
+
owned_type own_;
|
73 |
+
};
|
74 |
+
|
75 |
+
/// Don't use this; use borrowed() instead.
|
76 |
+
explicit MaybeOwned(const owned_type& t)
|
77 |
+
: isBorrowed_(true), borrow_(MaybeOwnedTraits<T>::createBorrow(t)) {}
|
78 |
+
|
79 |
+
/// Don't use this; use owned() instead.
|
80 |
+
explicit MaybeOwned(T&& t) noexcept(
|
81 |
+
std::is_nothrow_move_constructible<T>::value)
|
82 |
+
: isBorrowed_(false), own_(std::move(t)) {}
|
83 |
+
|
84 |
+
/// Don't use this; use owned() instead.
|
85 |
+
template <class... Args>
|
86 |
+
explicit MaybeOwned(in_place_t, Args&&... args)
|
87 |
+
: isBorrowed_(false), own_(std::forward<Args>(args)...) {}
|
88 |
+
|
89 |
+
public:
|
90 |
+
explicit MaybeOwned() : isBorrowed_(true), borrow_() {}
|
91 |
+
|
92 |
+
// Copying a borrow yields another borrow of the original, as with a
|
93 |
+
// T*. Copying an owned T yields another owned T for safety: no
|
94 |
+
// chains of borrowing by default! (Note you could get that behavior
|
95 |
+
// with MaybeOwned<T>::borrowed(*rhs) if you wanted it.)
|
96 |
+
MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) {
|
97 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
98 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
99 |
+
} else {
|
100 |
+
new (&own_) T(rhs.own_);
|
101 |
+
}
|
102 |
+
}
|
103 |
+
|
104 |
+
MaybeOwned& operator=(const MaybeOwned& rhs) {
|
105 |
+
if (this == &rhs) {
|
106 |
+
return *this;
|
107 |
+
}
|
108 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
109 |
+
if (rhs.isBorrowed_) {
|
110 |
+
own_.~T();
|
111 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
112 |
+
isBorrowed_ = true;
|
113 |
+
} else {
|
114 |
+
own_ = rhs.own_;
|
115 |
+
}
|
116 |
+
} else {
|
117 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
118 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
119 |
+
} else {
|
120 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
121 |
+
new (&own_) T(rhs.own_);
|
122 |
+
isBorrowed_ = false;
|
123 |
+
}
|
124 |
+
}
|
125 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_);
|
126 |
+
return *this;
|
127 |
+
}
|
128 |
+
|
129 |
+
MaybeOwned(MaybeOwned&& rhs) noexcept(
|
130 |
+
std::is_nothrow_move_constructible_v<T>&&
|
131 |
+
std::is_nothrow_move_assignable_v<borrow_type>)
|
132 |
+
: isBorrowed_(rhs.isBorrowed_) {
|
133 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
134 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
135 |
+
} else {
|
136 |
+
new (&own_) T(std::move(rhs.own_));
|
137 |
+
}
|
138 |
+
}
|
139 |
+
|
140 |
+
MaybeOwned& operator=(MaybeOwned&& rhs) noexcept(
|
141 |
+
std::is_nothrow_move_assignable_v<T>&& std::is_nothrow_move_assignable_v<
|
142 |
+
borrow_type>&& std::is_nothrow_move_constructible_v<T>&&
|
143 |
+
std::is_nothrow_destructible_v<T>&&
|
144 |
+
std::is_nothrow_destructible_v<borrow_type>) {
|
145 |
+
if (this == &rhs) {
|
146 |
+
return *this;
|
147 |
+
}
|
148 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
149 |
+
if (rhs.isBorrowed_) {
|
150 |
+
own_.~T();
|
151 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
152 |
+
isBorrowed_ = true;
|
153 |
+
} else {
|
154 |
+
own_ = std::move(rhs.own_);
|
155 |
+
}
|
156 |
+
} else {
|
157 |
+
if (C10_LIKELY(rhs.isBorrowed_)) {
|
158 |
+
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
|
159 |
+
} else {
|
160 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
161 |
+
new (&own_) T(std::move(rhs.own_));
|
162 |
+
isBorrowed_ = false;
|
163 |
+
}
|
164 |
+
}
|
165 |
+
return *this;
|
166 |
+
}
|
167 |
+
|
168 |
+
static MaybeOwned borrowed(const T& t) {
|
169 |
+
return MaybeOwned(t);
|
170 |
+
}
|
171 |
+
|
172 |
+
static MaybeOwned owned(T&& t) noexcept(
|
173 |
+
std::is_nothrow_move_constructible<T>::value) {
|
174 |
+
return MaybeOwned(std::move(t));
|
175 |
+
}
|
176 |
+
|
177 |
+
template <class... Args>
|
178 |
+
static MaybeOwned owned(in_place_t, Args&&... args) {
|
179 |
+
return MaybeOwned(in_place, std::forward<Args>(args)...);
|
180 |
+
}
|
181 |
+
|
182 |
+
~MaybeOwned() noexcept(std::is_nothrow_destructible_v<T>&&
|
183 |
+
std::is_nothrow_destructible_v<borrow_type>) {
|
184 |
+
if (C10_UNLIKELY(!isBorrowed_)) {
|
185 |
+
own_.~T();
|
186 |
+
} else {
|
187 |
+
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
|
188 |
+
}
|
189 |
+
}
|
190 |
+
|
191 |
+
// This is an implementation detail! You should know what you're doing
|
192 |
+
// if you are testing this. If you just want to guarantee ownership move
|
193 |
+
// this into a T
|
194 |
+
bool unsafeIsBorrowed() const {
|
195 |
+
return isBorrowed_;
|
196 |
+
}
|
197 |
+
|
198 |
+
const T& operator*() const& {
|
199 |
+
if (isBorrowed_) {
|
200 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
201 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
202 |
+
}
|
203 |
+
return C10_LIKELY(isBorrowed_)
|
204 |
+
? MaybeOwnedTraits<T>::referenceFromBorrow(borrow_)
|
205 |
+
: own_;
|
206 |
+
}
|
207 |
+
|
208 |
+
const T* operator->() const {
|
209 |
+
if (isBorrowed_) {
|
210 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
211 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
212 |
+
}
|
213 |
+
return C10_LIKELY(isBorrowed_)
|
214 |
+
? MaybeOwnedTraits<T>::pointerFromBorrow(borrow_)
|
215 |
+
: &own_;
|
216 |
+
}
|
217 |
+
|
218 |
+
// If borrowed, copy the underlying T. If owned, move from
|
219 |
+
// it. borrowed/owned state remains the same, and either we
|
220 |
+
// reference the same borrow as before or we are an owned moved-from
|
221 |
+
// T.
|
222 |
+
T operator*() && {
|
223 |
+
if (isBorrowed_) {
|
224 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
225 |
+
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
|
226 |
+
return MaybeOwnedTraits<T>::referenceFromBorrow(borrow_);
|
227 |
+
} else {
|
228 |
+
return std::move(own_);
|
229 |
+
}
|
230 |
+
}
|
231 |
+
};
|
232 |
+
|
233 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Optional.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_OPTIONAL_H_
|
2 |
+
#define C10_UTIL_OPTIONAL_H_
|
3 |
+
|
4 |
+
#include <optional>
|
5 |
+
#include <type_traits>
|
6 |
+
|
7 |
+
// Macros.h is not needed, but it does namespace shenanigans that lots
|
8 |
+
// of downstream code seems to rely on. Feel free to remove it and fix
|
9 |
+
// up builds.
|
10 |
+
#include <c10/macros/Macros.h>
|
11 |
+
#include <c10/util/Metaprogramming.h>
|
12 |
+
|
13 |
+
namespace c10 {
|
14 |
+
using std::bad_optional_access;
|
15 |
+
using std::in_place;
|
16 |
+
using std::in_place_t;
|
17 |
+
using std::make_optional;
|
18 |
+
using std::nullopt;
|
19 |
+
using std::nullopt_t;
|
20 |
+
using std::optional;
|
21 |
+
|
22 |
+
namespace detail_ {
|
23 |
+
// the call to convert<A>(b) has return type A and converts b to type A iff b
|
24 |
+
// decltype(b) is implicitly convertible to A
|
25 |
+
template <class U>
|
26 |
+
constexpr U convert(U v) {
|
27 |
+
return v;
|
28 |
+
}
|
29 |
+
} // namespace detail_
|
30 |
+
template <class T, class F>
|
31 |
+
constexpr T value_or_else(const optional<T>& v, F&& func) {
|
32 |
+
static_assert(
|
33 |
+
std::is_convertible<typename std::invoke_result_t<F>, T>::value,
|
34 |
+
"func parameters must be a callable that returns a type convertible to the value stored in the optional");
|
35 |
+
return v.has_value() ? *v : detail_::convert<T>(std::forward<F>(func)());
|
36 |
+
}
|
37 |
+
|
38 |
+
template <class T, class F>
|
39 |
+
constexpr T value_or_else(optional<T>&& v, F&& func) {
|
40 |
+
static_assert(
|
41 |
+
std::is_convertible<typename std::invoke_result_t<F>, T>::value,
|
42 |
+
"func parameters must be a callable that returns a type convertible to the value stored in the optional");
|
43 |
+
return v.has_value() ? constexpr_move(std::move(v).contained_val())
|
44 |
+
: detail_::convert<T>(std::forward<F>(func)());
|
45 |
+
}
|
46 |
+
} // namespace c10
|
47 |
+
#endif // C10_UTIL_OPTIONAL_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Registry.h
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_REGISTRY_H_
|
2 |
+
#define C10_UTIL_REGISTRY_H_
|
3 |
+
|
4 |
+
/**
|
5 |
+
* Simple registry implementation that uses static variables to
|
6 |
+
* register object creators during program initialization time.
|
7 |
+
*/
|
8 |
+
|
9 |
+
// NB: This Registry works poorly when you have other namespaces.
|
10 |
+
// Make all macro invocations from inside the at namespace.
|
11 |
+
|
12 |
+
#include <algorithm>
|
13 |
+
#include <cstdio>
|
14 |
+
#include <cstdlib>
|
15 |
+
#include <functional>
|
16 |
+
#include <memory>
|
17 |
+
#include <mutex>
|
18 |
+
#include <stdexcept>
|
19 |
+
#include <string>
|
20 |
+
#include <unordered_map>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include <c10/macros/Macros.h>
|
24 |
+
#include <c10/util/Type.h>
|
25 |
+
|
26 |
+
namespace c10 {
|
27 |
+
|
28 |
+
template <typename KeyType>
|
29 |
+
inline std::string KeyStrRepr(const KeyType& /*key*/) {
|
30 |
+
return "[key type printing not supported]";
|
31 |
+
}
|
32 |
+
|
33 |
+
template <>
|
34 |
+
inline std::string KeyStrRepr(const std::string& key) {
|
35 |
+
return key;
|
36 |
+
}
|
37 |
+
|
38 |
+
enum RegistryPriority {
|
39 |
+
REGISTRY_FALLBACK = 1,
|
40 |
+
REGISTRY_DEFAULT = 2,
|
41 |
+
REGISTRY_PREFERRED = 3,
|
42 |
+
};
|
43 |
+
|
44 |
+
/**
|
45 |
+
* @brief A template class that allows one to register classes by keys.
|
46 |
+
*
|
47 |
+
* The keys are usually a std::string specifying the name, but can be anything
|
48 |
+
* that can be used in a std::map.
|
49 |
+
*
|
50 |
+
* You should most likely not use the Registry class explicitly, but use the
|
51 |
+
* helper macros below to declare specific registries as well as registering
|
52 |
+
* objects.
|
53 |
+
*/
|
54 |
+
template <class SrcType, class ObjectPtrType, class... Args>
|
55 |
+
class Registry {
|
56 |
+
public:
|
57 |
+
typedef std::function<ObjectPtrType(Args...)> Creator;
|
58 |
+
|
59 |
+
Registry(bool warning = true)
|
60 |
+
: registry_(), priority_(), terminate_(true), warning_(warning) {}
|
61 |
+
|
62 |
+
void Register(
|
63 |
+
const SrcType& key,
|
64 |
+
Creator creator,
|
65 |
+
const RegistryPriority priority = REGISTRY_DEFAULT) {
|
66 |
+
std::lock_guard<std::mutex> lock(register_mutex_);
|
67 |
+
// The if statement below is essentially the same as the following line:
|
68 |
+
// TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
|
69 |
+
// << " registered twice.";
|
70 |
+
// However, TORCH_CHECK_EQ depends on google logging, and since registration
|
71 |
+
// is carried out at static initialization time, we do not want to have an
|
72 |
+
// explicit dependency on glog's initialization function.
|
73 |
+
if (registry_.count(key) != 0) {
|
74 |
+
auto cur_priority = priority_[key];
|
75 |
+
if (priority > cur_priority) {
|
76 |
+
#ifdef DEBUG
|
77 |
+
std::string warn_msg =
|
78 |
+
"Overwriting already registered item for key " + KeyStrRepr(key);
|
79 |
+
fprintf(stderr, "%s\n", warn_msg.c_str());
|
80 |
+
#endif
|
81 |
+
registry_[key] = creator;
|
82 |
+
priority_[key] = priority;
|
83 |
+
} else if (priority == cur_priority) {
|
84 |
+
std::string err_msg =
|
85 |
+
"Key already registered with the same priority: " + KeyStrRepr(key);
|
86 |
+
fprintf(stderr, "%s\n", err_msg.c_str());
|
87 |
+
if (terminate_) {
|
88 |
+
std::exit(1);
|
89 |
+
} else {
|
90 |
+
throw std::runtime_error(err_msg);
|
91 |
+
}
|
92 |
+
} else if (warning_) {
|
93 |
+
std::string warn_msg =
|
94 |
+
"Higher priority item already registered, skipping registration of " +
|
95 |
+
KeyStrRepr(key);
|
96 |
+
fprintf(stderr, "%s\n", warn_msg.c_str());
|
97 |
+
}
|
98 |
+
} else {
|
99 |
+
registry_[key] = creator;
|
100 |
+
priority_[key] = priority;
|
101 |
+
}
|
102 |
+
}
|
103 |
+
|
104 |
+
void Register(
|
105 |
+
const SrcType& key,
|
106 |
+
Creator creator,
|
107 |
+
const std::string& help_msg,
|
108 |
+
const RegistryPriority priority = REGISTRY_DEFAULT) {
|
109 |
+
Register(key, creator, priority);
|
110 |
+
help_message_[key] = help_msg;
|
111 |
+
}
|
112 |
+
|
113 |
+
inline bool Has(const SrcType& key) {
|
114 |
+
return (registry_.count(key) != 0);
|
115 |
+
}
|
116 |
+
|
117 |
+
ObjectPtrType Create(const SrcType& key, Args... args) {
|
118 |
+
auto it = registry_.find(key);
|
119 |
+
if (it == registry_.end()) {
|
120 |
+
// Returns nullptr if the key is not registered.
|
121 |
+
return nullptr;
|
122 |
+
}
|
123 |
+
return it->second(args...);
|
124 |
+
}
|
125 |
+
|
126 |
+
/**
|
127 |
+
* Returns the keys currently registered as a std::vector.
|
128 |
+
*/
|
129 |
+
std::vector<SrcType> Keys() const {
|
130 |
+
std::vector<SrcType> keys;
|
131 |
+
keys.reserve(registry_.size());
|
132 |
+
for (const auto& it : registry_) {
|
133 |
+
keys.push_back(it.first);
|
134 |
+
}
|
135 |
+
return keys;
|
136 |
+
}
|
137 |
+
|
138 |
+
inline const std::unordered_map<SrcType, std::string>& HelpMessage() const {
|
139 |
+
return help_message_;
|
140 |
+
}
|
141 |
+
|
142 |
+
const char* HelpMessage(const SrcType& key) const {
|
143 |
+
auto it = help_message_.find(key);
|
144 |
+
if (it == help_message_.end()) {
|
145 |
+
return nullptr;
|
146 |
+
}
|
147 |
+
return it->second.c_str();
|
148 |
+
}
|
149 |
+
|
150 |
+
// Used for testing, if terminate is unset, Registry throws instead of
|
151 |
+
// calling std::exit
|
152 |
+
void SetTerminate(bool terminate) {
|
153 |
+
terminate_ = terminate;
|
154 |
+
}
|
155 |
+
|
156 |
+
private:
|
157 |
+
std::unordered_map<SrcType, Creator> registry_;
|
158 |
+
std::unordered_map<SrcType, RegistryPriority> priority_;
|
159 |
+
bool terminate_;
|
160 |
+
const bool warning_;
|
161 |
+
std::unordered_map<SrcType, std::string> help_message_;
|
162 |
+
std::mutex register_mutex_;
|
163 |
+
|
164 |
+
C10_DISABLE_COPY_AND_ASSIGN(Registry);
|
165 |
+
};
|
166 |
+
|
167 |
+
template <class SrcType, class ObjectPtrType, class... Args>
|
168 |
+
class Registerer {
|
169 |
+
public:
|
170 |
+
explicit Registerer(
|
171 |
+
const SrcType& key,
|
172 |
+
Registry<SrcType, ObjectPtrType, Args...>* registry,
|
173 |
+
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
|
174 |
+
const std::string& help_msg = "") {
|
175 |
+
registry->Register(key, creator, help_msg);
|
176 |
+
}
|
177 |
+
|
178 |
+
explicit Registerer(
|
179 |
+
const SrcType& key,
|
180 |
+
const RegistryPriority priority,
|
181 |
+
Registry<SrcType, ObjectPtrType, Args...>* registry,
|
182 |
+
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
|
183 |
+
const std::string& help_msg = "") {
|
184 |
+
registry->Register(key, creator, help_msg, priority);
|
185 |
+
}
|
186 |
+
|
187 |
+
template <class DerivedType>
|
188 |
+
static ObjectPtrType DefaultCreator(Args... args) {
|
189 |
+
return ObjectPtrType(new DerivedType(args...));
|
190 |
+
}
|
191 |
+
};
|
192 |
+
|
193 |
+
/**
|
194 |
+
* C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function
|
195 |
+
* declaration, as well as creating a convenient typename for its corresponding
|
196 |
+
* registerer.
|
197 |
+
*/
|
198 |
+
// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE
|
199 |
+
// as import and DEFINE as export, because these registry macros will be used
|
200 |
+
// in downstream shared libraries as well, and one cannot use *_API - the API
|
201 |
+
// macro will be defined on a per-shared-library basis. Semantically, when one
|
202 |
+
// declares a typed registry it is always going to be IMPORT, and when one
|
203 |
+
// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE),
|
204 |
+
// the instantiation unit is always going to be exported.
|
205 |
+
//
|
206 |
+
// The only unique condition is when in the same file one does DECLARE and
|
207 |
+
// DEFINE - in Windows compilers, this generates a warning that dllimport and
|
208 |
+
// dllexport are mixed, but the warning is fine and linker will be properly
|
209 |
+
// exporting the symbol. Same thing happens in the gflags flag declaration and
|
210 |
+
// definition caes.
|
211 |
+
#define C10_DECLARE_TYPED_REGISTRY( \
|
212 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
213 |
+
C10_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
214 |
+
RegistryName(); \
|
215 |
+
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
216 |
+
Registerer##RegistryName
|
217 |
+
|
218 |
+
#define TORCH_DECLARE_TYPED_REGISTRY( \
|
219 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
220 |
+
TORCH_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
221 |
+
RegistryName(); \
|
222 |
+
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
223 |
+
Registerer##RegistryName
|
224 |
+
|
225 |
+
#define C10_DEFINE_TYPED_REGISTRY( \
|
226 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
227 |
+
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
228 |
+
RegistryName() { \
|
229 |
+
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
230 |
+
registry = new ::c10:: \
|
231 |
+
Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>(); \
|
232 |
+
return registry; \
|
233 |
+
}
|
234 |
+
|
235 |
+
#define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
236 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
237 |
+
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
238 |
+
RegistryName() { \
|
239 |
+
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
240 |
+
registry = \
|
241 |
+
new ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>( \
|
242 |
+
false); \
|
243 |
+
return registry; \
|
244 |
+
}
|
245 |
+
|
246 |
+
// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated
|
247 |
+
// creator with comma in its templated arguments.
|
248 |
+
#define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \
|
249 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
250 |
+
key, RegistryName(), ##__VA_ARGS__);
|
251 |
+
|
252 |
+
#define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
|
253 |
+
RegistryName, key, priority, ...) \
|
254 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
255 |
+
key, priority, RegistryName(), ##__VA_ARGS__);
|
256 |
+
|
257 |
+
#define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \
|
258 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
259 |
+
key, \
|
260 |
+
RegistryName(), \
|
261 |
+
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
|
262 |
+
::c10::demangle_type<__VA_ARGS__>());
|
263 |
+
|
264 |
+
#define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
|
265 |
+
RegistryName, key, priority, ...) \
|
266 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
267 |
+
key, \
|
268 |
+
priority, \
|
269 |
+
RegistryName(), \
|
270 |
+
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
|
271 |
+
::c10::demangle_type<__VA_ARGS__>());
|
272 |
+
|
273 |
+
// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use
|
274 |
+
// std::string as the key type, because that is the most commonly used cases.
|
275 |
+
#define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
|
276 |
+
C10_DECLARE_TYPED_REGISTRY( \
|
277 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
278 |
+
|
279 |
+
#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
|
280 |
+
TORCH_DECLARE_TYPED_REGISTRY( \
|
281 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
282 |
+
|
283 |
+
#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \
|
284 |
+
C10_DEFINE_TYPED_REGISTRY( \
|
285 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
286 |
+
|
287 |
+
#define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \
|
288 |
+
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
289 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
290 |
+
|
291 |
+
#define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
292 |
+
C10_DECLARE_TYPED_REGISTRY( \
|
293 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
294 |
+
|
295 |
+
#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
296 |
+
TORCH_DECLARE_TYPED_REGISTRY( \
|
297 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
298 |
+
|
299 |
+
#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
300 |
+
C10_DEFINE_TYPED_REGISTRY( \
|
301 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
302 |
+
|
303 |
+
#define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \
|
304 |
+
RegistryName, ObjectType, ...) \
|
305 |
+
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
306 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
307 |
+
|
308 |
+
// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string
|
309 |
+
// as the key
|
310 |
+
// type, because that is the most commonly used cases.
|
311 |
+
#define C10_REGISTER_CREATOR(RegistryName, key, ...) \
|
312 |
+
C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__)
|
313 |
+
|
314 |
+
#define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \
|
315 |
+
C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
|
316 |
+
RegistryName, #key, priority, __VA_ARGS__)
|
317 |
+
|
318 |
+
#define C10_REGISTER_CLASS(RegistryName, key, ...) \
|
319 |
+
C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__)
|
320 |
+
|
321 |
+
#define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \
|
322 |
+
C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
|
323 |
+
RegistryName, #key, priority, __VA_ARGS__)
|
324 |
+
|
325 |
+
} // namespace c10
|
326 |
+
|
327 |
+
#endif // C10_UTIL_REGISTRY_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ScopeExit.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <type_traits>
|
4 |
+
#include <utility>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
/**
|
9 |
+
* Mostly copied from https://llvm.org/doxygen/ScopeExit_8h_source.html
|
10 |
+
*/
|
11 |
+
template <typename Callable>
|
12 |
+
class scope_exit {
|
13 |
+
Callable ExitFunction;
|
14 |
+
bool Engaged = true; // False once moved-from or release()d.
|
15 |
+
|
16 |
+
public:
|
17 |
+
template <typename Fp>
|
18 |
+
// constructor accepting a forwarding reference can hide the
|
19 |
+
// move constructor
|
20 |
+
// @lint-ignore CLANGTIDY
|
21 |
+
explicit scope_exit(Fp&& F) : ExitFunction(std::forward<Fp>(F)) {}
|
22 |
+
|
23 |
+
scope_exit(scope_exit&& Rhs) noexcept
|
24 |
+
: ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
|
25 |
+
Rhs.release();
|
26 |
+
}
|
27 |
+
scope_exit(const scope_exit&) = delete;
|
28 |
+
scope_exit& operator=(scope_exit&&) = delete;
|
29 |
+
scope_exit& operator=(const scope_exit&) = delete;
|
30 |
+
|
31 |
+
void release() {
|
32 |
+
Engaged = false;
|
33 |
+
}
|
34 |
+
|
35 |
+
~scope_exit() {
|
36 |
+
if (Engaged) {
|
37 |
+
ExitFunction();
|
38 |
+
}
|
39 |
+
}
|
40 |
+
};
|
41 |
+
|
42 |
+
// Keeps the callable object that is passed in, and execute it at the
|
43 |
+
// destruction of the returned object (usually at the scope exit where the
|
44 |
+
// returned object is kept).
|
45 |
+
//
|
46 |
+
// Interface is specified by p0052r2.
|
47 |
+
template <typename Callable>
|
48 |
+
scope_exit<typename std::decay<Callable>::type> make_scope_exit(Callable&& F) {
|
49 |
+
return scope_exit<typename std::decay<Callable>::type>(
|
50 |
+
std::forward<Callable>(F));
|
51 |
+
}
|
52 |
+
|
53 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <array>
|
3 |
+
#include <type_traits>
|
4 |
+
|
5 |
+
/** Helper class for allocating temporary fixed size arrays with SBO.
|
6 |
+
*
|
7 |
+
* This is intentionally much simpler than SmallVector, to improve performance
|
8 |
+
* at the expense of many features:
|
9 |
+
* - No zero-initialization for numeric types
|
10 |
+
* - No resizing after construction
|
11 |
+
* - No copy/move
|
12 |
+
* - No non-trivial types
|
13 |
+
*/
|
14 |
+
|
15 |
+
namespace c10 {
|
16 |
+
|
17 |
+
template <typename T, size_t N>
|
18 |
+
class SmallBuffer {
|
19 |
+
static_assert(
|
20 |
+
std::is_trivial<T>::value,
|
21 |
+
"SmallBuffer is intended for POD types");
|
22 |
+
|
23 |
+
std::array<T, N> storage_;
|
24 |
+
size_t size_{};
|
25 |
+
T* data_{};
|
26 |
+
|
27 |
+
public:
|
28 |
+
SmallBuffer(size_t size) : size_(size) {
|
29 |
+
if (size > N) {
|
30 |
+
data_ = new T[size];
|
31 |
+
} else {
|
32 |
+
data_ = &storage_[0];
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
SmallBuffer(const SmallBuffer&) = delete;
|
37 |
+
SmallBuffer& operator=(const SmallBuffer&) = delete;
|
38 |
+
|
39 |
+
// move constructor is needed in function return
|
40 |
+
SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} {
|
41 |
+
rhs.size_ = 0;
|
42 |
+
if (size_ > N) {
|
43 |
+
data_ = rhs.data_;
|
44 |
+
rhs.data_ = nullptr;
|
45 |
+
} else {
|
46 |
+
storage_ = std::move(rhs.storage_);
|
47 |
+
data_ = &storage_[0];
|
48 |
+
}
|
49 |
+
}
|
50 |
+
|
51 |
+
SmallBuffer& operator=(SmallBuffer&&) = delete;
|
52 |
+
|
53 |
+
~SmallBuffer() {
|
54 |
+
if (size_ > N) {
|
55 |
+
delete[] data_;
|
56 |
+
}
|
57 |
+
}
|
58 |
+
|
59 |
+
T& operator[](int64_t idx) {
|
60 |
+
return data()[idx];
|
61 |
+
}
|
62 |
+
const T& operator[](int64_t idx) const {
|
63 |
+
return data()[idx];
|
64 |
+
}
|
65 |
+
T* data() {
|
66 |
+
return data_;
|
67 |
+
}
|
68 |
+
const T* data() const {
|
69 |
+
return data_;
|
70 |
+
}
|
71 |
+
size_t size() const {
|
72 |
+
return size_;
|
73 |
+
}
|
74 |
+
T* begin() {
|
75 |
+
return data_;
|
76 |
+
}
|
77 |
+
const T* begin() const {
|
78 |
+
return data_;
|
79 |
+
}
|
80 |
+
T* end() {
|
81 |
+
return data_ + size_;
|
82 |
+
}
|
83 |
+
const T* end() const {
|
84 |
+
return data_ + size_;
|
85 |
+
}
|
86 |
+
};
|
87 |
+
|
88 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_STRINGUTIL_H_
|
2 |
+
#define C10_UTIL_STRINGUTIL_H_
|
3 |
+
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
#include <c10/util/string_utils.h>
|
6 |
+
#include <c10/util/string_view.h>
|
7 |
+
|
8 |
+
#include <cstddef>
|
9 |
+
#include <ostream>
|
10 |
+
#include <sstream>
|
11 |
+
#include <string>
|
12 |
+
#include <vector>
|
13 |
+
|
14 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
15 |
+
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
|
16 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
|
17 |
+
#endif
|
18 |
+
|
19 |
+
namespace c10 {
|
20 |
+
|
21 |
+
namespace detail {
|
22 |
+
|
23 |
+
// Obtains the base name from a full path.
|
24 |
+
C10_API std::string StripBasename(const std::string& full_path);
|
25 |
+
|
26 |
+
C10_API std::string ExcludeFileExtension(const std::string& full_path);
|
27 |
+
|
28 |
+
struct CompileTimeEmptyString {
|
29 |
+
operator const std::string&() const {
|
30 |
+
static const std::string empty_string_literal;
|
31 |
+
return empty_string_literal;
|
32 |
+
}
|
33 |
+
operator const char*() const {
|
34 |
+
return "";
|
35 |
+
}
|
36 |
+
};
|
37 |
+
|
38 |
+
template <typename T>
|
39 |
+
struct CanonicalizeStrTypes {
|
40 |
+
using type = const T&;
|
41 |
+
};
|
42 |
+
|
43 |
+
template <size_t N>
|
44 |
+
struct CanonicalizeStrTypes<char[N]> {
|
45 |
+
using type = const char*;
|
46 |
+
};
|
47 |
+
|
48 |
+
inline std::ostream& _str(std::ostream& ss) {
|
49 |
+
return ss;
|
50 |
+
}
|
51 |
+
|
52 |
+
template <typename T>
|
53 |
+
inline std::ostream& _str(std::ostream& ss, const T& t) {
|
54 |
+
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
|
55 |
+
ss << t;
|
56 |
+
return ss;
|
57 |
+
}
|
58 |
+
|
59 |
+
template <>
|
60 |
+
inline std::ostream& _str<CompileTimeEmptyString>(
|
61 |
+
std::ostream& ss,
|
62 |
+
const CompileTimeEmptyString&) {
|
63 |
+
return ss;
|
64 |
+
}
|
65 |
+
|
66 |
+
template <typename T, typename... Args>
|
67 |
+
inline std::ostream& _str(std::ostream& ss, const T& t, const Args&... args) {
|
68 |
+
return _str(_str(ss, t), args...);
|
69 |
+
}
|
70 |
+
|
71 |
+
template <typename... Args>
|
72 |
+
struct _str_wrapper final {
|
73 |
+
static std::string call(const Args&... args) {
|
74 |
+
std::ostringstream ss;
|
75 |
+
_str(ss, args...);
|
76 |
+
return ss.str();
|
77 |
+
}
|
78 |
+
};
|
79 |
+
|
80 |
+
// Specializations for already-a-string types.
|
81 |
+
template <>
|
82 |
+
struct _str_wrapper<std::string> final {
|
83 |
+
// return by reference to avoid the binary size of a string copy
|
84 |
+
static const std::string& call(const std::string& str) {
|
85 |
+
return str;
|
86 |
+
}
|
87 |
+
};
|
88 |
+
|
89 |
+
template <>
|
90 |
+
struct _str_wrapper<const char*> final {
|
91 |
+
static const char* call(const char* str) {
|
92 |
+
return str;
|
93 |
+
}
|
94 |
+
};
|
95 |
+
|
96 |
+
// For c10::str() with an empty argument list (which is common in our assert
|
97 |
+
// macros), we don't want to pay the binary size for constructing and
|
98 |
+
// destructing a stringstream or even constructing a string.
|
99 |
+
template <>
|
100 |
+
struct _str_wrapper<> final {
|
101 |
+
static CompileTimeEmptyString call() {
|
102 |
+
return CompileTimeEmptyString();
|
103 |
+
}
|
104 |
+
};
|
105 |
+
|
106 |
+
} // namespace detail
|
107 |
+
|
108 |
+
// Convert a list of string-like arguments into a single string.
|
109 |
+
template <typename... Args>
|
110 |
+
inline decltype(auto) str(const Args&... args) {
|
111 |
+
return detail::_str_wrapper<
|
112 |
+
typename detail::CanonicalizeStrTypes<Args>::type...>::call(args...);
|
113 |
+
}
|
114 |
+
|
115 |
+
template <class Container>
|
116 |
+
inline std::string Join(const std::string& delimiter, const Container& v) {
|
117 |
+
std::stringstream s;
|
118 |
+
int cnt = static_cast<int64_t>(v.size()) - 1;
|
119 |
+
for (auto i = v.begin(); i != v.end(); ++i, --cnt) {
|
120 |
+
s << (*i) << (cnt ? delimiter : "");
|
121 |
+
}
|
122 |
+
return s.str();
|
123 |
+
}
|
124 |
+
|
125 |
+
// Replace all occurrences of "from" substring to "to" string.
|
126 |
+
// Returns number of replacements
|
127 |
+
size_t C10_API
|
128 |
+
ReplaceAll(std::string& s, c10::string_view from, c10::string_view to);
|
129 |
+
|
130 |
+
/// Represents a location in source code (for debugging).
|
131 |
+
struct C10_API SourceLocation {
|
132 |
+
const char* function;
|
133 |
+
const char* file;
|
134 |
+
uint32_t line;
|
135 |
+
};
|
136 |
+
|
137 |
+
std::ostream& operator<<(std::ostream& out, const SourceLocation& loc);
|
138 |
+
|
139 |
+
// unix isprint but insensitive to locale
|
140 |
+
inline static bool isPrint(char s) {
|
141 |
+
return s > 0x1f && s < 0x7f;
|
142 |
+
}
|
143 |
+
|
144 |
+
inline void printQuotedString(std::ostream& stmt, const string_view str) {
|
145 |
+
stmt << "\"";
|
146 |
+
for (auto s : str) {
|
147 |
+
switch (s) {
|
148 |
+
case '\\':
|
149 |
+
stmt << "\\\\";
|
150 |
+
break;
|
151 |
+
case '\'':
|
152 |
+
stmt << "\\'";
|
153 |
+
break;
|
154 |
+
case '\"':
|
155 |
+
stmt << "\\\"";
|
156 |
+
break;
|
157 |
+
case '\a':
|
158 |
+
stmt << "\\a";
|
159 |
+
break;
|
160 |
+
case '\b':
|
161 |
+
stmt << "\\b";
|
162 |
+
break;
|
163 |
+
case '\f':
|
164 |
+
stmt << "\\f";
|
165 |
+
break;
|
166 |
+
case '\n':
|
167 |
+
stmt << "\\n";
|
168 |
+
break;
|
169 |
+
case '\r':
|
170 |
+
stmt << "\\r";
|
171 |
+
break;
|
172 |
+
case '\t':
|
173 |
+
stmt << "\\t";
|
174 |
+
break;
|
175 |
+
case '\v':
|
176 |
+
stmt << "\\v";
|
177 |
+
break;
|
178 |
+
default:
|
179 |
+
if (isPrint(s)) {
|
180 |
+
stmt << s;
|
181 |
+
} else {
|
182 |
+
// C++ io has stateful formatting settings. Messing with
|
183 |
+
// them is probably worse than doing this manually.
|
184 |
+
char buf[4] = "000";
|
185 |
+
buf[2] += s % 8;
|
186 |
+
s /= 8;
|
187 |
+
buf[1] += s % 8;
|
188 |
+
s /= 8;
|
189 |
+
buf[0] += s;
|
190 |
+
stmt << "\\" << buf;
|
191 |
+
}
|
192 |
+
break;
|
193 |
+
}
|
194 |
+
}
|
195 |
+
stmt << "\"";
|
196 |
+
}
|
197 |
+
|
198 |
+
} // namespace c10
|
199 |
+
|
200 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
201 |
+
|
202 |
+
#endif // C10_UTIL_STRINGUTIL_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
|
5 |
+
/**
|
6 |
+
* Android versions with libgnustl incorrectly handle thread_local C++
|
7 |
+
* qualifier with composite types. NDK up to r17 version is affected.
|
8 |
+
*
|
9 |
+
* (A fix landed on Jun 4 2018:
|
10 |
+
* https://android-review.googlesource.com/c/toolchain/gcc/+/683601)
|
11 |
+
*
|
12 |
+
* In such cases, use c10::ThreadLocal<T> wrapper
|
13 |
+
* which is `pthread_*` based with smart pointer semantics.
|
14 |
+
*
|
15 |
+
* In addition, convenient macro C10_DEFINE_TLS_static is available.
|
16 |
+
* To define static TLS variable of type std::string, do the following
|
17 |
+
* ```
|
18 |
+
* C10_DEFINE_TLS_static(std::string, str_tls_);
|
19 |
+
* ///////
|
20 |
+
* {
|
21 |
+
* *str_tls_ = "abc";
|
22 |
+
* assert(str_tls_->length(), 3);
|
23 |
+
* }
|
24 |
+
* ```
|
25 |
+
*
|
26 |
+
* (see c10/test/util/ThreadLocal_test.cpp for more examples)
|
27 |
+
*/
|
28 |
+
#if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
29 |
+
|
30 |
+
#if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
|
31 |
+
#define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE
|
32 |
+
#endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
|
33 |
+
|
34 |
+
#endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
35 |
+
|
36 |
+
#if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
37 |
+
#include <c10/util/Exception.h>
|
38 |
+
#include <errno.h>
|
39 |
+
#include <pthread.h>
|
40 |
+
#include <memory>
|
41 |
+
namespace c10 {
|
42 |
+
|
43 |
+
/**
|
44 |
+
* @brief Temporary thread_local C++ qualifier replacement for Android
|
45 |
+
* based on `pthread_*`.
|
46 |
+
* To be used with composite types that provide default ctor.
|
47 |
+
*/
|
48 |
+
template <typename Type>
|
49 |
+
class ThreadLocal {
|
50 |
+
public:
|
51 |
+
ThreadLocal() {
|
52 |
+
pthread_key_create(
|
53 |
+
&key_, [](void* buf) { delete static_cast<Type*>(buf); });
|
54 |
+
}
|
55 |
+
|
56 |
+
~ThreadLocal() {
|
57 |
+
if (void* current = pthread_getspecific(key_)) {
|
58 |
+
delete static_cast<Type*>(current);
|
59 |
+
}
|
60 |
+
|
61 |
+
pthread_key_delete(key_);
|
62 |
+
}
|
63 |
+
|
64 |
+
ThreadLocal(const ThreadLocal&) = delete;
|
65 |
+
ThreadLocal& operator=(const ThreadLocal&) = delete;
|
66 |
+
|
67 |
+
Type& get() {
|
68 |
+
if (void* current = pthread_getspecific(key_)) {
|
69 |
+
return *static_cast<Type*>(current);
|
70 |
+
}
|
71 |
+
|
72 |
+
std::unique_ptr<Type> ptr = std::make_unique<Type>();
|
73 |
+
if (0 == pthread_setspecific(key_, ptr.get())) {
|
74 |
+
return *ptr.release();
|
75 |
+
}
|
76 |
+
|
77 |
+
int err = errno;
|
78 |
+
TORCH_INTERNAL_ASSERT(false, "pthread_setspecific() failed, errno = ", err);
|
79 |
+
}
|
80 |
+
|
81 |
+
Type& operator*() {
|
82 |
+
return get();
|
83 |
+
}
|
84 |
+
|
85 |
+
Type* operator->() {
|
86 |
+
return &get();
|
87 |
+
}
|
88 |
+
|
89 |
+
private:
|
90 |
+
pthread_key_t key_;
|
91 |
+
};
|
92 |
+
|
93 |
+
} // namespace c10
|
94 |
+
|
95 |
+
#define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal<Type> Name
|
96 |
+
|
97 |
+
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
|
98 |
+
static ::c10::ThreadLocal<Type> Name
|
99 |
+
|
100 |
+
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
|
101 |
+
::c10::ThreadLocal<Type> Class::Name
|
102 |
+
|
103 |
+
#else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
104 |
+
|
105 |
+
namespace c10 {
|
106 |
+
|
107 |
+
/**
|
108 |
+
* @brief Default thread_local implementation for non-Android cases.
|
109 |
+
* To be used with composite types that provide default ctor.
|
110 |
+
*/
|
111 |
+
template <typename Type>
|
112 |
+
class ThreadLocal {
|
113 |
+
public:
|
114 |
+
using Accessor = Type* (*)();
|
115 |
+
explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {}
|
116 |
+
|
117 |
+
ThreadLocal(const ThreadLocal&) = delete;
|
118 |
+
ThreadLocal& operator=(const ThreadLocal&) = delete;
|
119 |
+
|
120 |
+
Type& get() {
|
121 |
+
return *accessor_();
|
122 |
+
}
|
123 |
+
|
124 |
+
Type& operator*() {
|
125 |
+
return get();
|
126 |
+
}
|
127 |
+
|
128 |
+
Type* operator->() {
|
129 |
+
return &get();
|
130 |
+
}
|
131 |
+
|
132 |
+
private:
|
133 |
+
Accessor accessor_;
|
134 |
+
};
|
135 |
+
|
136 |
+
} // namespace c10
|
137 |
+
|
138 |
+
#define C10_DEFINE_TLS_static(Type, Name) \
|
139 |
+
static ::c10::ThreadLocal<Type> Name([]() { \
|
140 |
+
static thread_local Type var; \
|
141 |
+
return &var; \
|
142 |
+
})
|
143 |
+
|
144 |
+
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
|
145 |
+
static ::c10::ThreadLocal<Type> Name
|
146 |
+
|
147 |
+
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
|
148 |
+
::c10::ThreadLocal<Type> Class::Name([]() { \
|
149 |
+
static thread_local Type var; \
|
150 |
+
return &var; \
|
151 |
+
})
|
152 |
+
|
153 |
+
#endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Type.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef C10_UTIL_TYPE_H_
|
2 |
+
#define C10_UTIL_TYPE_H_
|
3 |
+
|
4 |
+
#include <cstddef>
|
5 |
+
#include <string>
|
6 |
+
#ifdef __GXX_RTTI
|
7 |
+
#include <typeinfo>
|
8 |
+
#endif // __GXX_RTTI
|
9 |
+
|
10 |
+
#include <c10/macros/Macros.h>
|
11 |
+
|
12 |
+
namespace c10 {
|
13 |
+
|
14 |
+
/// Utility to demangle a C++ symbol name.
|
15 |
+
C10_API std::string demangle(const char* name);
|
16 |
+
|
17 |
+
/// Returns the printable name of the type.
|
18 |
+
template <typename T>
|
19 |
+
inline const char* demangle_type() {
|
20 |
+
#ifdef __GXX_RTTI
|
21 |
+
static const auto& name = *(new std::string(demangle(typeid(T).name())));
|
22 |
+
return name.c_str();
|
23 |
+
#else // __GXX_RTTI
|
24 |
+
return "(RTTI disabled, cannot show name)";
|
25 |
+
#endif // __GXX_RTTI
|
26 |
+
}
|
27 |
+
|
28 |
+
} // namespace c10
|
29 |
+
|
30 |
+
#endif // C10_UTIL_TYPE_H_
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/macros/Macros.h>
|
3 |
+
#include <c10/util/BFloat16.h>
|
4 |
+
#include <c10/util/Float8_e4m3fn.h>
|
5 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
6 |
+
#include <c10/util/Float8_e5m2.h>
|
7 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
8 |
+
#include <c10/util/Half.h>
|
9 |
+
|
10 |
+
#include <type_traits>
|
11 |
+
|
12 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
13 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
|
14 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
|
15 |
+
#endif
|
16 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
17 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
18 |
+
#endif
|
19 |
+
|
20 |
+
namespace c10 {
|
21 |
+
|
22 |
+
template <typename dest_t, typename src_t>
|
23 |
+
struct needs_real {
|
24 |
+
constexpr static bool value =
|
25 |
+
(is_complex<src_t>::value && !is_complex<dest_t>::value);
|
26 |
+
};
|
27 |
+
|
28 |
+
template <bool, typename src_t>
|
29 |
+
struct maybe_real {
|
30 |
+
C10_HOST_DEVICE static inline src_t apply(src_t src) {
|
31 |
+
return src;
|
32 |
+
}
|
33 |
+
};
|
34 |
+
|
35 |
+
template <typename src_t>
|
36 |
+
struct maybe_real<true, src_t> {
|
37 |
+
C10_HOST_DEVICE static inline decltype(auto) apply(src_t src) {
|
38 |
+
return src.real();
|
39 |
+
}
|
40 |
+
};
|
41 |
+
|
42 |
+
// Note: deliberately ignores undefined behavior, consistent with NumPy.
|
43 |
+
// PyTorch's type conversions can cause a variety of undefined behavior,
|
44 |
+
// including float to integral overflow and signed to unsigned integer overflow.
|
45 |
+
// Some of this undefined behavior is addressed below.
|
46 |
+
template <typename dest_t, typename src_t>
|
47 |
+
struct static_cast_with_inter_type {
|
48 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline dest_t apply(
|
49 |
+
src_t src) {
|
50 |
+
constexpr bool real = needs_real<dest_t, src_t>::value;
|
51 |
+
auto r = maybe_real<real, src_t>::apply(src);
|
52 |
+
return static_cast<dest_t>(r);
|
53 |
+
}
|
54 |
+
};
|
55 |
+
|
56 |
+
// Partial template instantiation for casting to uint8.
|
57 |
+
// Note: Converting from negative float values to unsigned integer types is
|
58 |
+
// undefined behavior in C++, and current CPU and GPU compilers exhibit
|
59 |
+
// divergent behavior. Casting from negative float values to signed
|
60 |
+
// integer types and then to unsigned integer types is not undefined,
|
61 |
+
// however, so this cast improves the consistency of type conversions
|
62 |
+
// to uint8 across compilers.
|
63 |
+
// Further note: Type conversions across compilers still have other undefined
|
64 |
+
// and divergent behavior.
|
65 |
+
template <typename src_t>
|
66 |
+
struct static_cast_with_inter_type<uint8_t, src_t> {
|
67 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline uint8_t apply(
|
68 |
+
src_t src) {
|
69 |
+
constexpr bool real = needs_real<uint8_t, src_t>::value;
|
70 |
+
return static_cast<uint8_t>(
|
71 |
+
static_cast<int64_t>(maybe_real<real, src_t>::apply(src)));
|
72 |
+
}
|
73 |
+
};
|
74 |
+
|
75 |
+
template <>
|
76 |
+
struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::BFloat16> {
|
77 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
78 |
+
c10::Half>
|
79 |
+
apply(c10::BFloat16 src) {
|
80 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
81 |
+
}
|
82 |
+
};
|
83 |
+
|
84 |
+
template <>
|
85 |
+
struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::Float8_e5m2> {
|
86 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
87 |
+
c10::Half>
|
88 |
+
apply(c10::Float8_e5m2 src) {
|
89 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
90 |
+
}
|
91 |
+
};
|
92 |
+
|
93 |
+
template <>
|
94 |
+
struct static_cast_with_inter_type<
|
95 |
+
c10::complex<c10::Half>,
|
96 |
+
c10::Float8_e5m2fnuz> {
|
97 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
98 |
+
c10::Half>
|
99 |
+
apply(c10::Float8_e5m2fnuz src) {
|
100 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
101 |
+
}
|
102 |
+
};
|
103 |
+
|
104 |
+
template <>
|
105 |
+
struct static_cast_with_inter_type<
|
106 |
+
c10::complex<c10::Half>,
|
107 |
+
c10::Float8_e4m3fn> {
|
108 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
109 |
+
c10::Half>
|
110 |
+
apply(c10::Float8_e4m3fn src) {
|
111 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
112 |
+
}
|
113 |
+
};
|
114 |
+
|
115 |
+
template <>
|
116 |
+
struct static_cast_with_inter_type<
|
117 |
+
c10::complex<c10::Half>,
|
118 |
+
c10::Float8_e4m3fnuz> {
|
119 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
120 |
+
c10::Half>
|
121 |
+
apply(c10::Float8_e4m3fnuz src) {
|
122 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
123 |
+
}
|
124 |
+
};
|
125 |
+
|
126 |
+
template <>
|
127 |
+
struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::Half> {
|
128 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
129 |
+
c10::Half>
|
130 |
+
apply(c10::Half src) {
|
131 |
+
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
|
132 |
+
}
|
133 |
+
};
|
134 |
+
|
135 |
+
template <>
|
136 |
+
struct static_cast_with_inter_type<
|
137 |
+
c10::complex<c10::Half>,
|
138 |
+
c10::complex<double>> {
|
139 |
+
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
|
140 |
+
c10::Half>
|
141 |
+
apply(c10::complex<double> src) {
|
142 |
+
return static_cast<c10::complex<c10::Half>>(
|
143 |
+
static_cast<c10::complex<float>>(src));
|
144 |
+
}
|
145 |
+
};
|
146 |
+
|
147 |
+
template <typename To, typename From>
|
148 |
+
C10_HOST_DEVICE To convert(From f) {
|
149 |
+
return static_cast_with_inter_type<To, From>::apply(f);
|
150 |
+
}
|
151 |
+
|
152 |
+
// Define separately to avoid being inlined and prevent code-size bloat
|
153 |
+
C10_API void report_overflow(const char* name);
|
154 |
+
|
155 |
+
template <typename To, typename From>
|
156 |
+
To checked_convert(From f, const char* name) {
|
157 |
+
// Converting to bool can't overflow so we exclude this case from checking.
|
158 |
+
if (!std::is_same<To, bool>::value && overflows<To, From>(f)) {
|
159 |
+
report_overflow(name);
|
160 |
+
}
|
161 |
+
return convert<To, From>(f);
|
162 |
+
}
|
163 |
+
|
164 |
+
} // namespace c10
|
165 |
+
|
166 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
167 |
+
|
168 |
+
// Trigger tests for D25440771. TODO: Remove this line any time you want.
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/C++17.h>
|
4 |
+
#include <c10/util/ConstexprCrc.h>
|
5 |
+
#include <c10/util/IdWrapper.h>
|
6 |
+
#include <c10/util/string_view.h>
|
7 |
+
#include <cinttypes>
|
8 |
+
#include <functional>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
namespace util {
|
12 |
+
|
13 |
+
// TODO Make it work for more compilers
|
14 |
+
|
15 |
+
// Intel compiler works
|
16 |
+
#if defined(__INTEL_COMPILER)
|
17 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
|
18 |
+
#define C10_TYPENAME_CONSTEXPR
|
19 |
+
|
20 |
+
// Clang works
|
21 |
+
#elif defined(__clang__)
|
22 |
+
|
23 |
+
// except for NVCC
|
24 |
+
#if defined(__CUDACC__)
|
25 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
|
26 |
+
#define C10_TYPENAME_CONSTEXPR
|
27 |
+
#else
|
28 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
|
29 |
+
#define C10_TYPENAME_CONSTEXPR constexpr
|
30 |
+
#endif
|
31 |
+
|
32 |
+
// Windows works
|
33 |
+
#elif defined(_MSC_VER)
|
34 |
+
|
35 |
+
// except for NVCC
|
36 |
+
#if defined(__CUDACC__)
|
37 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
|
38 |
+
#define C10_TYPENAME_CONSTEXPR
|
39 |
+
#else
|
40 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
|
41 |
+
#define C10_TYPENAME_CONSTEXPR constexpr
|
42 |
+
#endif
|
43 |
+
|
44 |
+
// GCC works
|
45 |
+
#elif defined(__GNUC__)
|
46 |
+
|
47 |
+
// except when gcc < 9
|
48 |
+
#if (__GNUC__ < 9) || defined(__CUDACC__)
|
49 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
|
50 |
+
#define C10_TYPENAME_CONSTEXPR
|
51 |
+
#else
|
52 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
|
53 |
+
#define C10_TYPENAME_CONSTEXPR constexpr
|
54 |
+
#endif
|
55 |
+
|
56 |
+
// some other compiler we don't know about
|
57 |
+
#else
|
58 |
+
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
|
59 |
+
#define C10_TYPENAME_CONSTEXPR constexpr
|
60 |
+
#endif
|
61 |
+
|
62 |
+
struct type_index final : IdWrapper<type_index, uint64_t> {
|
63 |
+
constexpr explicit type_index(uint64_t checksum) : IdWrapper(checksum) {}
|
64 |
+
|
65 |
+
// Allow usage in std::map / std::set
|
66 |
+
// TODO Disallow this and rather use std::unordered_map/set everywhere
|
67 |
+
friend constexpr bool operator<(type_index lhs, type_index rhs) noexcept {
|
68 |
+
return lhs.underlyingId() < rhs.underlyingId();
|
69 |
+
}
|
70 |
+
|
71 |
+
friend std::ostream& operator<<(std::ostream& stream, type_index typeId) {
|
72 |
+
return stream << typeId.underlyingId();
|
73 |
+
}
|
74 |
+
};
|
75 |
+
|
76 |
+
namespace detail {
|
77 |
+
|
78 |
+
#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
|
79 |
+
__GNUC__ < 5
|
80 |
+
// Getting __PRETTY_FUNCTION__ at compile time only works with GCC >= 5
|
81 |
+
#error "You're running a too old version of GCC. We need GCC 5 or later."
|
82 |
+
#endif
|
83 |
+
|
84 |
+
#if defined(__clang__) && __clang_major__ < 4
|
85 |
+
// Getting __PRETTY_FUNCTION__ at compile time only works with Clang >= 4
|
86 |
+
#error "You're running a too old version of Clang. We need Clang 4 or later."
|
87 |
+
#endif
|
88 |
+
|
89 |
+
inline constexpr string_view extract(
|
90 |
+
string_view prefix,
|
91 |
+
string_view suffix,
|
92 |
+
string_view str) {
|
93 |
+
#if !defined(__CUDA_ARCH__) // CUDA doesn't like std::logic_error in device code
|
94 |
+
return (!str.starts_with(prefix) || !str.ends_with(suffix))
|
95 |
+
? (throw std::logic_error("Invalid pattern"), string_view())
|
96 |
+
: str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
|
97 |
+
#else
|
98 |
+
return str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
|
99 |
+
#endif
|
100 |
+
}
|
101 |
+
|
102 |
+
template <typename T>
|
103 |
+
inline C10_TYPENAME_CONSTEXPR c10::string_view fully_qualified_type_name_impl() {
|
104 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
105 |
+
#if defined(__NVCC__)
|
106 |
+
return extract(
|
107 |
+
"c10::basic_string_view<char> c10::util::detail::fully_qualified_type_name_impl<",
|
108 |
+
">()",
|
109 |
+
__FUNCSIG__);
|
110 |
+
#else
|
111 |
+
return extract(
|
112 |
+
"class c10::basic_string_view<char> __cdecl c10::util::detail::fully_qualified_type_name_impl<",
|
113 |
+
">(void)",
|
114 |
+
__FUNCSIG__);
|
115 |
+
#endif
|
116 |
+
#elif defined(__clang__)
|
117 |
+
return extract(
|
118 |
+
"c10::string_view c10::util::detail::fully_qualified_type_name_impl() [T = ",
|
119 |
+
"]",
|
120 |
+
__PRETTY_FUNCTION__);
|
121 |
+
#elif defined(__GNUC__)
|
122 |
+
return extract(
|
123 |
+
#if C10_TYPENAME_SUPPORTS_CONSTEXPR
|
124 |
+
"constexpr c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
|
125 |
+
#else
|
126 |
+
"c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
|
127 |
+
#endif
|
128 |
+
"; c10::string_view = c10::basic_string_view<char>]",
|
129 |
+
__PRETTY_FUNCTION__);
|
130 |
+
#endif
|
131 |
+
}
|
132 |
+
|
133 |
+
#if !defined(__CUDA_ARCH__)
|
134 |
+
template <typename T>
|
135 |
+
inline constexpr uint64_t type_index_impl() {
|
136 |
+
// Idea: __PRETTY_FUNCTION__ (or __FUNCSIG__ on msvc) contains a qualified name
|
137 |
+
// of this function, including its template parameter, i.e. including the
|
138 |
+
// type we want an id for. We use this name and run crc64 on it to get a type
|
139 |
+
// id.
|
140 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
141 |
+
return crc64(__FUNCSIG__, sizeof(__FUNCSIG__)).checksum();
|
142 |
+
#elif defined(__clang__)
|
143 |
+
return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
|
144 |
+
#elif defined(__GNUC__)
|
145 |
+
return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
|
146 |
+
#endif
|
147 |
+
}
|
148 |
+
#endif
|
149 |
+
|
150 |
+
} // namespace detail
|
151 |
+
|
152 |
+
template <typename T>
|
153 |
+
inline constexpr type_index get_type_index() {
|
154 |
+
#if !defined(__CUDA_ARCH__)
|
155 |
+
// To enforce that this is really computed at compile time, we pass the
|
156 |
+
// type index through std::integral_constant.
|
157 |
+
return type_index{std::integral_constant<
|
158 |
+
uint64_t,
|
159 |
+
detail::type_index_impl<std::decay_t<T>>()>::value};
|
160 |
+
#else
|
161 |
+
// There's nothing in theory preventing us from running this on device code
|
162 |
+
// except for nvcc throwing a compiler error if we enable it.
|
163 |
+
return (abort(), type_index(0));
|
164 |
+
#endif
|
165 |
+
}
|
166 |
+
|
167 |
+
#if !defined(TORCH_PEDANTIC)
|
168 |
+
// Use precomputed hashsum for std::string
|
169 |
+
// Needed to workaround ambiguity in class name resolution
|
170 |
+
// into __PRETTY_FUNCTION__ when abovementioned class is defined in inlined
|
171 |
+
// namespace. In multi-ABI C++ library, `std::string` is an alias to
|
172 |
+
// `std::__cxx11::basic_string<char>` which depending on compiler flags can be
|
173 |
+
// resolved to `basic_string<char>` either in `std` namespace or in
|
174 |
+
// `std::__cxx11` one (`__cxx11` is an inline namespace)
|
175 |
+
template <>
|
176 |
+
inline constexpr type_index get_type_index<std::string>() {
|
177 |
+
// hashsum for std::basic_string<char>
|
178 |
+
return type_index{4193213214807308375ULL};
|
179 |
+
}
|
180 |
+
#endif
|
181 |
+
|
182 |
+
template <typename T>
|
183 |
+
inline C10_TYPENAME_CONSTEXPR string_view
|
184 |
+
get_fully_qualified_type_name() noexcept {
|
185 |
+
#if C10_TYPENAME_SUPPORTS_CONSTEXPR
|
186 |
+
constexpr
|
187 |
+
#else
|
188 |
+
static
|
189 |
+
#endif
|
190 |
+
string_view name = detail::fully_qualified_type_name_impl<T>();
|
191 |
+
return name;
|
192 |
+
}
|
193 |
+
} // namespace util
|
194 |
+
} // namespace c10
|
195 |
+
|
196 |
+
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::type_index);
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeList.h
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/C++17.h>
|
4 |
+
#include <c10/util/TypeTraits.h>
|
5 |
+
#include <algorithm>
|
6 |
+
|
7 |
+
namespace c10 {
|
8 |
+
namespace guts {
|
9 |
+
|
10 |
+
template <class... T>
|
11 |
+
struct false_t : std::false_type {};
|
12 |
+
template <template <class> class... T>
|
13 |
+
struct false_higher_t : std::false_type {};
|
14 |
+
|
15 |
+
namespace typelist {
|
16 |
+
|
17 |
+
/**
|
18 |
+
* Type holding a list of types for compile time type computations
|
19 |
+
*/
|
20 |
+
template <class... Items>
|
21 |
+
struct typelist final {
|
22 |
+
public:
|
23 |
+
typelist() = delete; // not for instantiation
|
24 |
+
};
|
25 |
+
|
26 |
+
/**
|
27 |
+
* Returns the number of types in a typelist
|
28 |
+
* Example:
|
29 |
+
* 3 == size<typelist<int, int, double>>::value
|
30 |
+
*/
|
31 |
+
template <class TypeList>
|
32 |
+
struct size final {
|
33 |
+
static_assert(
|
34 |
+
false_t<TypeList>::value,
|
35 |
+
"In typelist::size<T>, T must be typelist<...>.");
|
36 |
+
};
|
37 |
+
template <class... Types>
|
38 |
+
struct size<typelist<Types...>> final {
|
39 |
+
static constexpr size_t value = sizeof...(Types);
|
40 |
+
};
|
41 |
+
|
42 |
+
/**
|
43 |
+
* Transforms a list of types into a tuple holding these types.
|
44 |
+
* Example:
|
45 |
+
* std::tuple<int, string> == to_tuple_t<typelist<int, string>>
|
46 |
+
*/
|
47 |
+
template <class TypeList>
|
48 |
+
struct to_tuple final {
|
49 |
+
static_assert(
|
50 |
+
false_t<TypeList>::value,
|
51 |
+
"In typelist::to_tuple<T>, T must be typelist<...>.");
|
52 |
+
};
|
53 |
+
template <class... Types>
|
54 |
+
struct to_tuple<typelist<Types...>> final {
|
55 |
+
using type = std::tuple<Types...>;
|
56 |
+
};
|
57 |
+
template <class TypeList>
|
58 |
+
using to_tuple_t = typename to_tuple<TypeList>::type;
|
59 |
+
|
60 |
+
/**
|
61 |
+
* Creates a typelist containing the types of a given tuple.
|
62 |
+
* Example:
|
63 |
+
* typelist<int, string> == from_tuple_t<std::tuple<int, string>>
|
64 |
+
*/
|
65 |
+
template <class Tuple>
|
66 |
+
struct from_tuple final {
|
67 |
+
static_assert(
|
68 |
+
false_t<Tuple>::value,
|
69 |
+
"In typelist::from_tuple<T>, T must be std::tuple<...>.");
|
70 |
+
};
|
71 |
+
template <class... Types>
|
72 |
+
struct from_tuple<std::tuple<Types...>> final {
|
73 |
+
using type = typelist<Types...>;
|
74 |
+
};
|
75 |
+
template <class Tuple>
|
76 |
+
using from_tuple_t = typename from_tuple<Tuple>::type;
|
77 |
+
|
78 |
+
/**
|
79 |
+
* Concatenates multiple type lists.
|
80 |
+
* Example:
|
81 |
+
* typelist<int, string, int> == concat_t<typelist<int, string>,
|
82 |
+
* typelist<int>>
|
83 |
+
*/
|
84 |
+
template <class... TypeLists>
|
85 |
+
struct concat final {
|
86 |
+
static_assert(
|
87 |
+
false_t<TypeLists...>::value,
|
88 |
+
"In typelist::concat<T1, ...>, the T arguments each must be typelist<...>.");
|
89 |
+
};
|
90 |
+
template <class... Head1Types, class... Head2Types, class... TailLists>
|
91 |
+
struct concat<typelist<Head1Types...>, typelist<Head2Types...>, TailLists...>
|
92 |
+
final {
|
93 |
+
using type =
|
94 |
+
typename concat<typelist<Head1Types..., Head2Types...>, TailLists...>::
|
95 |
+
type;
|
96 |
+
};
|
97 |
+
template <class... HeadTypes>
|
98 |
+
struct concat<typelist<HeadTypes...>> final {
|
99 |
+
using type = typelist<HeadTypes...>;
|
100 |
+
};
|
101 |
+
template <>
|
102 |
+
struct concat<> final {
|
103 |
+
using type = typelist<>;
|
104 |
+
};
|
105 |
+
template <class... TypeLists>
|
106 |
+
using concat_t = typename concat<TypeLists...>::type;
|
107 |
+
|
108 |
+
/**
|
109 |
+
* Filters the types in a type list by a type trait.
|
110 |
+
* Examples:
|
111 |
+
* typelist<int&, const string&&> == filter_t<std::is_reference,
|
112 |
+
* typelist<void, string, int&, bool, const string&&, int>>
|
113 |
+
*/
|
114 |
+
template <template <class> class Condition, class TypeList>
|
115 |
+
struct filter final {
|
116 |
+
static_assert(
|
117 |
+
false_t<TypeList>::value,
|
118 |
+
"In typelist::filter<Condition, TypeList>, the TypeList argument must be typelist<...>.");
|
119 |
+
};
|
120 |
+
template <template <class> class Condition, class Head, class... Tail>
|
121 |
+
struct filter<Condition, typelist<Head, Tail...>> final {
|
122 |
+
static_assert(
|
123 |
+
is_type_condition<Condition>::value,
|
124 |
+
"In typelist::filter<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
|
125 |
+
using type = std::conditional_t<
|
126 |
+
Condition<Head>::value,
|
127 |
+
concat_t<
|
128 |
+
typelist<Head>,
|
129 |
+
typename filter<Condition, typelist<Tail...>>::type>,
|
130 |
+
typename filter<Condition, typelist<Tail...>>::type>;
|
131 |
+
};
|
132 |
+
template <template <class> class Condition>
|
133 |
+
struct filter<Condition, typelist<>> final {
|
134 |
+
static_assert(
|
135 |
+
is_type_condition<Condition>::value,
|
136 |
+
"In typelist::filter<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
|
137 |
+
using type = typelist<>;
|
138 |
+
};
|
139 |
+
template <template <class> class Condition, class TypeList>
|
140 |
+
using filter_t = typename filter<Condition, TypeList>::type;
|
141 |
+
|
142 |
+
/**
|
143 |
+
* Counts how many types in the list fulfill a type trait
|
144 |
+
* Examples:
|
145 |
+
* 2 == count_if<std::is_reference, typelist<void, string, int&, bool, const
|
146 |
+
* string&&, int>>
|
147 |
+
*/
|
148 |
+
template <template <class> class Condition, class TypeList>
|
149 |
+
struct count_if final {
|
150 |
+
static_assert(
|
151 |
+
is_type_condition<Condition>::value,
|
152 |
+
"In typelist::count_if<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
|
153 |
+
static_assert(
|
154 |
+
is_instantiation_of<typelist, TypeList>::value,
|
155 |
+
"In typelist::count_if<Condition, TypeList>, the TypeList argument must be typelist<...>.");
|
156 |
+
// TODO Direct implementation might be faster
|
157 |
+
static constexpr size_t value = size<filter_t<Condition, TypeList>>::value;
|
158 |
+
};
|
159 |
+
|
160 |
+
/**
|
161 |
+
* Checks if a typelist contains a certain type.
|
162 |
+
* Examples:
|
163 |
+
* contains<typelist<int, string>, string> == true_type
|
164 |
+
* contains<typelist<int, string>, double> == false_type
|
165 |
+
*/
|
166 |
+
namespace detail {
|
167 |
+
template <class TypeList, class Type, class Enable = void>
|
168 |
+
struct contains {};
|
169 |
+
template <class Type>
|
170 |
+
struct contains<typelist<>, Type, void> : std::false_type {};
|
171 |
+
template <class Type, class Head, class... Tail>
|
172 |
+
struct contains<
|
173 |
+
typelist<Head, Tail...>,
|
174 |
+
Type,
|
175 |
+
std::enable_if_t<std::is_same<Head, Type>::value>> : std::true_type {};
|
176 |
+
template <class Type, class Head, class... Tail>
|
177 |
+
struct contains<
|
178 |
+
typelist<Head, Tail...>,
|
179 |
+
Type,
|
180 |
+
std::enable_if_t<!std::is_same<Head, Type>::value>>
|
181 |
+
: contains<typelist<Tail...>, Type> {};
|
182 |
+
} // namespace detail
|
183 |
+
template <class TypeList, class Type>
|
184 |
+
using contains = typename detail::contains<TypeList, Type>::type;
|
185 |
+
|
186 |
+
/**
|
187 |
+
* Returns true iff the type trait is true for all types in the type list
|
188 |
+
* Examples:
|
189 |
+
* true == all<std::is_reference, typelist<int&, const float&&, const
|
190 |
+
* MyClass&>>::value false == all<std::is_reference, typelist<int&, const
|
191 |
+
* float&&, MyClass>>::value
|
192 |
+
*/
|
193 |
+
template <template <class> class Condition, class TypeList>
|
194 |
+
struct all {
|
195 |
+
static_assert(
|
196 |
+
false_t<TypeList>::value,
|
197 |
+
"In typelist::all<Condition, TypeList>, the TypeList argument must be typelist<...>.");
|
198 |
+
};
|
199 |
+
template <template <class> class Condition, class... Types>
|
200 |
+
struct all<Condition, typelist<Types...>>
|
201 |
+
: guts::conjunction<Condition<Types>...> {
|
202 |
+
static_assert(
|
203 |
+
is_type_condition<Condition>::value,
|
204 |
+
"In typelist::all<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
|
205 |
+
};
|
206 |
+
|
207 |
+
/**
|
208 |
+
* Returns true iff the type trait is true for any type in the type list
|
209 |
+
* Examples:
|
210 |
+
* true == true_for_any_type<std::is_reference, typelist<int, const
|
211 |
+
* float&&, const MyClass>>::value false ==
|
212 |
+
* true_for_any_type<std::is_reference, typelist<int, const float,
|
213 |
+
* MyClass>>::value
|
214 |
+
*/
|
215 |
+
template <template <class> class Condition, class TypeList>
|
216 |
+
struct true_for_any_type final {
|
217 |
+
static_assert(
|
218 |
+
false_t<TypeList>::value,
|
219 |
+
"In typelist::true_for_any_type<Condition, TypeList>, the TypeList argument must be typelist<...>.");
|
220 |
+
};
|
221 |
+
template <template <class> class Condition, class... Types>
|
222 |
+
struct true_for_any_type<Condition, typelist<Types...>> final
|
223 |
+
: guts::disjunction<Condition<Types>...> {
|
224 |
+
static_assert(
|
225 |
+
is_type_condition<Condition>::value,
|
226 |
+
"In typelist::true_for_any_type<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
|
227 |
+
};
|
228 |
+
|
229 |
+
/**
|
230 |
+
* Maps types of a type list using a type trait
|
231 |
+
* Example:
|
232 |
+
* typelist<int&, double&, string&> == map_t<std::add_lvalue_reference_t,
|
233 |
+
* typelist<int, double, string>>
|
234 |
+
*/
|
235 |
+
template <template <class> class Mapper, class TypeList>
|
236 |
+
struct map final {
|
237 |
+
static_assert(
|
238 |
+
false_t<TypeList>::value,
|
239 |
+
"In typelist::map<Mapper, TypeList>, the TypeList argument must be typelist<...>.");
|
240 |
+
};
|
241 |
+
template <template <class> class Mapper, class... Types>
|
242 |
+
struct map<Mapper, typelist<Types...>> final {
|
243 |
+
using type = typelist<Mapper<Types>...>;
|
244 |
+
};
|
245 |
+
template <template <class> class Mapper, class TypeList>
|
246 |
+
using map_t = typename map<Mapper, TypeList>::type;
|
247 |
+
|
248 |
+
/**
|
249 |
+
* Returns the first element of a type list.
|
250 |
+
* Example:
|
251 |
+
* int == head_t<typelist<int, string>>
|
252 |
+
*/
|
253 |
+
template <class TypeList>
|
254 |
+
struct head final {
|
255 |
+
static_assert(
|
256 |
+
false_t<TypeList>::value,
|
257 |
+
"In typelist::head<T>, the T argument must be typelist<...>.");
|
258 |
+
};
|
259 |
+
template <class Head, class... Tail>
|
260 |
+
struct head<typelist<Head, Tail...>> final {
|
261 |
+
using type = Head;
|
262 |
+
};
|
263 |
+
template <class TypeList>
|
264 |
+
using head_t = typename head<TypeList>::type;
|
265 |
+
|
266 |
+
/**
|
267 |
+
* Returns the first element of a type list, or the specified default if the
|
268 |
+
* type list is empty. Example: int == head_t<bool, typelist<int, string>>
|
269 |
+
* bool == head_t<bool, typelist<>>
|
270 |
+
*/
|
271 |
+
template <class Default, class TypeList>
|
272 |
+
struct head_with_default final {
|
273 |
+
using type = Default;
|
274 |
+
};
|
275 |
+
template <class Default, class Head, class... Tail>
|
276 |
+
struct head_with_default<Default, typelist<Head, Tail...>> final {
|
277 |
+
using type = Head;
|
278 |
+
};
|
279 |
+
template <class Default, class TypeList>
|
280 |
+
using head_with_default_t = typename head_with_default<Default, TypeList>::type;
|
281 |
+
|
282 |
+
/**
|
283 |
+
* Returns the N-th element of a type list.
|
284 |
+
* Example:
|
285 |
+
* int == element_t<1, typelist<float, int, char>>
|
286 |
+
*/
|
287 |
+
|
288 |
+
/// Base template.
|
289 |
+
template <size_t Index, class TypeList>
|
290 |
+
struct element final {
|
291 |
+
static_assert(
|
292 |
+
false_t<TypeList>::value,
|
293 |
+
"In typelist::element<T>, the T argument must be typelist<...>.");
|
294 |
+
};
|
295 |
+
|
296 |
+
/// Successful case, we have reached the zero index and can "return" the head
|
297 |
+
/// type.
|
298 |
+
template <class Head, class... Tail>
|
299 |
+
struct element<0, typelist<Head, Tail...>> {
|
300 |
+
using type = Head;
|
301 |
+
};
|
302 |
+
|
303 |
+
/// Error case, we have an index but ran out of types! It will only be selected
|
304 |
+
/// if `Ts...` is actually empty!
|
305 |
+
template <size_t Index, class... Ts>
|
306 |
+
struct element<Index, typelist<Ts...>> {
|
307 |
+
static_assert(
|
308 |
+
Index < sizeof...(Ts),
|
309 |
+
"Index is out of bounds in typelist::element");
|
310 |
+
};
|
311 |
+
|
312 |
+
/// Shave off types until we hit the <0, Head, Tail...> or <Index> case.
|
313 |
+
template <size_t Index, class Head, class... Tail>
|
314 |
+
struct element<Index, typelist<Head, Tail...>>
|
315 |
+
: element<Index - 1, typelist<Tail...>> {};
|
316 |
+
|
317 |
+
/// Convenience alias.
|
318 |
+
template <size_t Index, class TypeList>
|
319 |
+
using element_t = typename element<Index, TypeList>::type;
|
320 |
+
|
321 |
+
/**
|
322 |
+
* Returns the last element of a type list.
|
323 |
+
* Example:
|
324 |
+
* int == last_t<typelist<int, string>>
|
325 |
+
*/
|
326 |
+
template <class TypeList>
|
327 |
+
struct last final {
|
328 |
+
static_assert(
|
329 |
+
false_t<TypeList>::value,
|
330 |
+
"In typelist::last<T>, the T argument must be typelist<...>.");
|
331 |
+
};
|
332 |
+
template <class Head, class... Tail>
|
333 |
+
struct last<typelist<Head, Tail...>> final {
|
334 |
+
using type = typename last<typelist<Tail...>>::type;
|
335 |
+
};
|
336 |
+
template <class Head>
|
337 |
+
struct last<typelist<Head>> final {
|
338 |
+
using type = Head;
|
339 |
+
};
|
340 |
+
template <class TypeList>
|
341 |
+
using last_t = typename last<TypeList>::type;
|
342 |
+
static_assert(std::is_same<int, last_t<typelist<double, float, int>>>::value);
|
343 |
+
|
344 |
+
/**
|
345 |
+
* Take/drop a number of arguments from a typelist.
|
346 |
+
* Example:
|
347 |
+
* typelist<int, string> == take_t<typelist<int, string, bool>, 2>
|
348 |
+
* typelist<bool> == drop_t<typelist<int, string, bool>, 2>
|
349 |
+
*/
|
350 |
+
namespace detail {
|
351 |
+
template <class TypeList, size_t offset, class IndexSequence>
|
352 |
+
struct take_elements final {};
|
353 |
+
|
354 |
+
template <class TypeList, size_t offset, size_t... Indices>
|
355 |
+
struct take_elements<TypeList, offset, std::index_sequence<Indices...>> final {
|
356 |
+
using type = typelist<typename element<offset + Indices, TypeList>::type...>;
|
357 |
+
};
|
358 |
+
} // namespace detail
|
359 |
+
|
360 |
+
template <class TypeList, size_t num>
|
361 |
+
struct take final {
|
362 |
+
static_assert(
|
363 |
+
is_instantiation_of<typelist, TypeList>::value,
|
364 |
+
"In typelist::take<T, num>, the T argument must be typelist<...>.");
|
365 |
+
static_assert(
|
366 |
+
num <= size<TypeList>::value,
|
367 |
+
"Tried to typelist::take more elements than there are in the list");
|
368 |
+
using type = typename detail::
|
369 |
+
take_elements<TypeList, 0, std::make_index_sequence<num>>::type;
|
370 |
+
};
|
371 |
+
template <class TypeList, size_t num>
|
372 |
+
using take_t = typename take<TypeList, num>::type;
|
373 |
+
|
374 |
+
template <class TypeList, size_t num>
|
375 |
+
struct drop final {
|
376 |
+
static_assert(
|
377 |
+
is_instantiation_of<typelist, TypeList>::value,
|
378 |
+
"In typelist::drop<T, num>, the T argument must be typelist<...>.");
|
379 |
+
static_assert(
|
380 |
+
num <= size<TypeList>::value,
|
381 |
+
"Tried to typelist::drop more elements than there are in the list");
|
382 |
+
using type = typename detail::take_elements<
|
383 |
+
TypeList,
|
384 |
+
num,
|
385 |
+
std::make_index_sequence<size<TypeList>::value - num>>::type;
|
386 |
+
};
|
387 |
+
template <class TypeList, size_t num>
|
388 |
+
using drop_t = typename drop<TypeList, num>::type;
|
389 |
+
|
390 |
+
/**
|
391 |
+
* Like drop, but returns an empty list rather than an assertion error if `num`
|
392 |
+
* is larger than the size of the TypeList.
|
393 |
+
* Example:
|
394 |
+
* typelist<> == drop_if_nonempty_t<typelist<string, bool>, 2>
|
395 |
+
* typelist<> == drop_if_nonempty_t<typelist<int, string, bool>, 3>
|
396 |
+
*/
|
397 |
+
template <class TypeList, size_t num>
|
398 |
+
struct drop_if_nonempty final {
|
399 |
+
static_assert(
|
400 |
+
is_instantiation_of<typelist, TypeList>::value,
|
401 |
+
"In typelist::drop<T, num>, the T argument must be typelist<...>.");
|
402 |
+
using type = typename detail::take_elements<
|
403 |
+
TypeList,
|
404 |
+
std::min(num, size<TypeList>::value),
|
405 |
+
std::make_index_sequence<
|
406 |
+
size<TypeList>::value - std::min(num, size<TypeList>::value)>>::type;
|
407 |
+
};
|
408 |
+
template <class TypeList, size_t num>
|
409 |
+
using drop_if_nonempty_t = typename drop_if_nonempty<TypeList, num>::type;
|
410 |
+
|
411 |
+
/**
|
412 |
+
* Reverses a typelist.
|
413 |
+
* Example:
|
414 |
+
* typelist<int, string> == reverse_t<typelist<string, int>>
|
415 |
+
*/
|
416 |
+
template <class TypeList>
|
417 |
+
struct reverse final {
|
418 |
+
static_assert(
|
419 |
+
false_t<TypeList>::value,
|
420 |
+
"In typelist::reverse<T>, the T argument must be typelist<...>.");
|
421 |
+
};
|
422 |
+
template <class Head, class... Tail>
|
423 |
+
struct reverse<typelist<Head, Tail...>> final {
|
424 |
+
using type =
|
425 |
+
concat_t<typename reverse<typelist<Tail...>>::type, typelist<Head>>;
|
426 |
+
};
|
427 |
+
template <>
|
428 |
+
struct reverse<typelist<>> final {
|
429 |
+
using type = typelist<>;
|
430 |
+
};
|
431 |
+
template <class TypeList>
|
432 |
+
using reverse_t = typename reverse<TypeList>::type;
|
433 |
+
|
434 |
+
/**
|
435 |
+
* Find the index of the first type in a typelist fulfilling a type trait
|
436 |
+
* condition. Example:
|
437 |
+
*
|
438 |
+
* 2 == find_if<typelist<char, int, char&, int&>, std::is_reference>::value
|
439 |
+
*/
|
440 |
+
template <class TypeList, template <class> class Condition, class Enable = void>
|
441 |
+
struct find_if final {
|
442 |
+
static_assert(
|
443 |
+
false_t<TypeList>::value,
|
444 |
+
"In typelist::find_if<TypeList, Condition>, the TypeList argument must be typelist<...>.");
|
445 |
+
};
|
446 |
+
template <template <class> class Condition>
|
447 |
+
struct find_if<typelist<>, Condition, void> final {
|
448 |
+
static_assert(
|
449 |
+
false_higher_t<Condition>::value,
|
450 |
+
"In typelist::find_if<Type/List, Condition>, didn't find any type fulfilling the Condition.");
|
451 |
+
};
|
452 |
+
template <class Head, class... Tail, template <class> class Condition>
|
453 |
+
struct find_if<
|
454 |
+
typelist<Head, Tail...>,
|
455 |
+
Condition,
|
456 |
+
std::enable_if_t<Condition<Head>::value>>
|
457 |
+
final {
|
458 |
+
static constexpr size_t value = 0;
|
459 |
+
};
|
460 |
+
template <class Head, class... Tail, template <class> class Condition>
|
461 |
+
struct find_if<
|
462 |
+
typelist<Head, Tail...>,
|
463 |
+
Condition,
|
464 |
+
std::enable_if_t<!Condition<Head>::value>>
|
465 |
+
final {
|
466 |
+
static constexpr size_t value =
|
467 |
+
1 + find_if<typelist<Tail...>, Condition>::value;
|
468 |
+
};
|
469 |
+
|
470 |
+
/**
|
471 |
+
* Maps a list of types into a list of values.
|
472 |
+
* Examples:
|
473 |
+
* // Example 1
|
474 |
+
* auto sizes =
|
475 |
+
* map_types_to_values<typelist<int64_t, bool, uint32_t>>(
|
476 |
+
* [] (auto t) { return sizeof(decltype(t)::type); }
|
477 |
+
* );
|
478 |
+
* // sizes == std::tuple<size_t, size_t, size_t>{8, 1, 4}
|
479 |
+
*
|
480 |
+
* // Example 2
|
481 |
+
* auto shared_ptrs =
|
482 |
+
* map_types_to_values<typelist<int, double>>(
|
483 |
+
* [] (auto t) { return make_shared<typename decltype(t)::type>(); }
|
484 |
+
* );
|
485 |
+
* // shared_ptrs == std::tuple<shared_ptr<int>, shared_ptr<double>>()
|
486 |
+
*/
|
487 |
+
namespace detail {
|
488 |
+
template <class T>
|
489 |
+
struct type_ final {
|
490 |
+
using type = T;
|
491 |
+
};
|
492 |
+
template <class TypeList>
|
493 |
+
struct map_types_to_values final {
|
494 |
+
static_assert(
|
495 |
+
false_t<TypeList>::value,
|
496 |
+
"In typelist::map_types_to_values<T>, the T argument must be typelist<...>.");
|
497 |
+
};
|
498 |
+
template <class... Types>
|
499 |
+
struct map_types_to_values<typelist<Types...>> final {
|
500 |
+
template <class Func>
|
501 |
+
static std::tuple<c10::invoke_result_t<Func, type_<Types>>...> call(
|
502 |
+
Func&& func) {
|
503 |
+
return std::tuple<c10::invoke_result_t<Func, type_<Types>>...>{
|
504 |
+
std::forward<Func>(func)(type_<Types>())...};
|
505 |
+
}
|
506 |
+
};
|
507 |
+
} // namespace detail
|
508 |
+
|
509 |
+
template <class TypeList, class Func>
|
510 |
+
decltype(auto) map_types_to_values(Func&& func) {
|
511 |
+
return detail::map_types_to_values<TypeList>::call(std::forward<Func>(func));
|
512 |
+
}
|
513 |
+
|
514 |
+
} // namespace typelist
|
515 |
+
} // namespace guts
|
516 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeSafeSignMath.h
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <limits>
|
5 |
+
#include <type_traits>
|
6 |
+
|
7 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
8 |
+
#if C10_CLANG_HAS_WARNING("-Wstring-conversion")
|
9 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wstring-conversion")
|
10 |
+
#endif
|
11 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
12 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
13 |
+
#endif
|
14 |
+
|
15 |
+
namespace c10 {
|
16 |
+
|
17 |
+
/// Returns false since we cannot have x < 0 if x is unsigned.
|
18 |
+
template <typename T>
|
19 |
+
static inline constexpr bool is_negative(
|
20 |
+
const T& /*x*/,
|
21 |
+
std::true_type /*is_unsigned*/) {
|
22 |
+
return false;
|
23 |
+
}
|
24 |
+
|
25 |
+
/// Returns true if a signed variable x < 0
|
26 |
+
template <typename T>
|
27 |
+
static inline constexpr bool is_negative(
|
28 |
+
const T& x,
|
29 |
+
std::false_type /*is_unsigned*/) {
|
30 |
+
return x < T(0);
|
31 |
+
}
|
32 |
+
|
33 |
+
/// Returns true if x < 0
|
34 |
+
/// NOTE: Will fail on an unsigned custom type
|
35 |
+
/// For the most part it's possible to fix this if
|
36 |
+
/// the custom type has a constexpr constructor.
|
37 |
+
/// However, notably, c10::Half does not :-(
|
38 |
+
template <typename T>
|
39 |
+
inline constexpr bool is_negative(const T& x) {
|
40 |
+
return is_negative(x, std::is_unsigned<T>());
|
41 |
+
}
|
42 |
+
|
43 |
+
/// Returns the sign of an unsigned variable x as 0, 1
|
44 |
+
template <typename T>
|
45 |
+
static inline constexpr int signum(const T& x, std::true_type /*is_unsigned*/) {
|
46 |
+
return T(0) < x;
|
47 |
+
}
|
48 |
+
|
49 |
+
/// Returns the sign of a signed variable x as -1, 0, 1
|
50 |
+
template <typename T>
|
51 |
+
static inline constexpr int signum(
|
52 |
+
const T& x,
|
53 |
+
std::false_type /*is_unsigned*/) {
|
54 |
+
return (T(0) < x) - (x < T(0));
|
55 |
+
}
|
56 |
+
|
57 |
+
/// Returns the sign of x as -1, 0, 1
|
58 |
+
/// NOTE: Will fail on an unsigned custom type
|
59 |
+
/// For the most part it's possible to fix this if
|
60 |
+
/// the custom type has a constexpr constructor.
|
61 |
+
/// However, notably, c10::Half does not :-(
|
62 |
+
template <typename T>
|
63 |
+
inline constexpr int signum(const T& x) {
|
64 |
+
return signum(x, std::is_unsigned<T>());
|
65 |
+
}
|
66 |
+
|
67 |
+
/// Returns true if a and b are not both negative
|
68 |
+
template <typename T, typename U>
|
69 |
+
inline constexpr bool signs_differ(const T& a, const U& b) {
|
70 |
+
return is_negative(a) != is_negative(b);
|
71 |
+
}
|
72 |
+
|
73 |
+
// Suppress sign compare warning when compiling with GCC
|
74 |
+
// as later does not account for short-circuit rule before
|
75 |
+
// raising the warning, see https://godbolt.org/z/Tr3Msnz99
|
76 |
+
#ifdef __GNUC__
|
77 |
+
#pragma GCC diagnostic push
|
78 |
+
#pragma GCC diagnostic ignored "-Wsign-compare"
|
79 |
+
#endif
|
80 |
+
|
81 |
+
/// Returns true if x is greater than the greatest value of the type Limit
|
82 |
+
template <typename Limit, typename T>
|
83 |
+
inline constexpr bool greater_than_max(const T& x) {
|
84 |
+
constexpr bool can_overflow =
|
85 |
+
std::numeric_limits<T>::digits > std::numeric_limits<Limit>::digits;
|
86 |
+
return can_overflow && x > std::numeric_limits<Limit>::max();
|
87 |
+
}
|
88 |
+
|
89 |
+
#ifdef __GNUC__
|
90 |
+
#pragma GCC diagnostic pop
|
91 |
+
#endif
|
92 |
+
|
93 |
+
/// Returns true if x < lowest(Limit). Standard comparison
|
94 |
+
template <typename Limit, typename T>
|
95 |
+
static inline constexpr bool less_than_lowest(
|
96 |
+
const T& x,
|
97 |
+
std::false_type /*limit_is_unsigned*/,
|
98 |
+
std::false_type /*x_is_unsigned*/) {
|
99 |
+
return x < std::numeric_limits<Limit>::lowest();
|
100 |
+
}
|
101 |
+
|
102 |
+
/// Returns false since all the limit is signed and therefore includes
|
103 |
+
/// negative values but x cannot be negative because it is unsigned
|
104 |
+
template <typename Limit, typename T>
|
105 |
+
static inline constexpr bool less_than_lowest(
|
106 |
+
const T& /*x*/,
|
107 |
+
std::false_type /*limit_is_unsigned*/,
|
108 |
+
std::true_type /*x_is_unsigned*/) {
|
109 |
+
return false;
|
110 |
+
}
|
111 |
+
|
112 |
+
/// Returns true if x < 0, where 0 is constructed from T.
|
113 |
+
/// Limit is not signed, so its lower value is zero
|
114 |
+
template <typename Limit, typename T>
|
115 |
+
static inline constexpr bool less_than_lowest(
|
116 |
+
const T& x,
|
117 |
+
std::true_type /*limit_is_unsigned*/,
|
118 |
+
std::false_type /*x_is_unsigned*/) {
|
119 |
+
return x < T(0);
|
120 |
+
}
|
121 |
+
|
122 |
+
/// Returns false sign both types are unsigned
|
123 |
+
template <typename Limit, typename T>
|
124 |
+
static inline constexpr bool less_than_lowest(
|
125 |
+
const T& /*x*/,
|
126 |
+
std::true_type /*limit_is_unsigned*/,
|
127 |
+
std::true_type /*x_is_unsigned*/) {
|
128 |
+
return false;
|
129 |
+
}
|
130 |
+
|
131 |
+
/// Returns true if x is less than the lowest value of type T
|
132 |
+
/// NOTE: Will fail on an unsigned custom type
|
133 |
+
/// For the most part it's possible to fix this if
|
134 |
+
/// the custom type has a constexpr constructor.
|
135 |
+
/// However, notably, c10::Half does not :
|
136 |
+
template <typename Limit, typename T>
|
137 |
+
inline constexpr bool less_than_lowest(const T& x) {
|
138 |
+
return less_than_lowest<Limit>(
|
139 |
+
x, std::is_unsigned<Limit>(), std::is_unsigned<T>());
|
140 |
+
}
|
141 |
+
|
142 |
+
} // namespace c10
|
143 |
+
|
144 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/TypeTraits.h
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/C++17.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
namespace guts {
|
7 |
+
|
8 |
+
/**
|
9 |
+
* is_equality_comparable<T> is true_type iff the equality operator is defined
|
10 |
+
* for T.
|
11 |
+
*/
|
12 |
+
template <class T, class Enable = void>
|
13 |
+
struct is_equality_comparable : std::false_type {};
|
14 |
+
template <class T>
|
15 |
+
struct is_equality_comparable<
|
16 |
+
T,
|
17 |
+
void_t<decltype(std::declval<T&>() == std::declval<T&>())>>
|
18 |
+
: std::true_type {};
|
19 |
+
template <class T>
|
20 |
+
using is_equality_comparable_t = typename is_equality_comparable<T>::type;
|
21 |
+
|
22 |
+
/**
|
23 |
+
* is_hashable<T> is true_type iff std::hash is defined for T
|
24 |
+
*/
|
25 |
+
template <class T, class Enable = void>
|
26 |
+
struct is_hashable : std::false_type {};
|
27 |
+
template <class T>
|
28 |
+
struct is_hashable<T, void_t<decltype(std::hash<T>()(std::declval<T&>()))>>
|
29 |
+
: std::true_type {};
|
30 |
+
template <class T>
|
31 |
+
using is_hashable_t = typename is_hashable<T>::type;
|
32 |
+
|
33 |
+
/**
|
34 |
+
* is_function_type<T> is true_type iff T is a plain function type (i.e.
|
35 |
+
* "Result(Args...)")
|
36 |
+
*/
|
37 |
+
template <class T>
|
38 |
+
struct is_function_type : std::false_type {};
|
39 |
+
template <class Result, class... Args>
|
40 |
+
struct is_function_type<Result(Args...)> : std::true_type {};
|
41 |
+
template <class T>
|
42 |
+
using is_function_type_t = typename is_function_type<T>::type;
|
43 |
+
|
44 |
+
/**
|
45 |
+
* is_instantiation_of<T, I> is true_type iff I is a template instantiation of T
|
46 |
+
* (e.g. vector<int> is an instantiation of vector) Example:
|
47 |
+
* is_instantiation_of_t<vector, vector<int>> // true
|
48 |
+
* is_instantiation_of_t<pair, pair<int, string>> // true
|
49 |
+
* is_instantiation_of_t<vector, pair<int, string>> // false
|
50 |
+
*/
|
51 |
+
template <template <class...> class Template, class T>
|
52 |
+
struct is_instantiation_of : std::false_type {};
|
53 |
+
template <template <class...> class Template, class... Args>
|
54 |
+
struct is_instantiation_of<Template, Template<Args...>> : std::true_type {};
|
55 |
+
template <template <class...> class Template, class T>
|
56 |
+
using is_instantiation_of_t = typename is_instantiation_of<Template, T>::type;
|
57 |
+
|
58 |
+
namespace detail {
|
59 |
+
/**
|
60 |
+
* strip_class: helper to remove the class type from pointers to `operator()`.
|
61 |
+
*/
|
62 |
+
|
63 |
+
template <typename T>
|
64 |
+
struct strip_class {};
|
65 |
+
template <typename Class, typename Result, typename... Args>
|
66 |
+
struct strip_class<Result (Class::*)(Args...)> {
|
67 |
+
using type = Result(Args...);
|
68 |
+
};
|
69 |
+
template <typename Class, typename Result, typename... Args>
|
70 |
+
struct strip_class<Result (Class::*)(Args...) const> {
|
71 |
+
using type = Result(Args...);
|
72 |
+
};
|
73 |
+
template <typename T>
|
74 |
+
using strip_class_t = typename strip_class<T>::type;
|
75 |
+
} // namespace detail
|
76 |
+
|
77 |
+
/**
|
78 |
+
* Evaluates to true_type, iff the given class is a Functor
|
79 |
+
* (i.e. has a call operator with some set of arguments)
|
80 |
+
*/
|
81 |
+
|
82 |
+
template <class Functor, class Enable = void>
|
83 |
+
struct is_functor : std::false_type {};
|
84 |
+
template <class Functor>
|
85 |
+
struct is_functor<
|
86 |
+
Functor,
|
87 |
+
std::enable_if_t<is_function_type<
|
88 |
+
detail::strip_class_t<decltype(&Functor::operator())>>::value>>
|
89 |
+
: std::true_type {};
|
90 |
+
|
91 |
+
/**
|
92 |
+
* lambda_is_stateless<T> is true iff the lambda type T is stateless
|
93 |
+
* (i.e. does not have a closure).
|
94 |
+
* Example:
|
95 |
+
* auto stateless_lambda = [] (int a) {return a;};
|
96 |
+
* lambda_is_stateless<decltype(stateless_lambda)> // true
|
97 |
+
* auto stateful_lambda = [&] (int a) {return a;};
|
98 |
+
* lambda_is_stateless<decltype(stateful_lambda)> // false
|
99 |
+
*/
|
100 |
+
namespace detail {
|
101 |
+
template <class LambdaType, class FuncType>
|
102 |
+
struct is_stateless_lambda__ final {
|
103 |
+
static_assert(
|
104 |
+
!std::is_same<LambdaType, LambdaType>::value,
|
105 |
+
"Base case shouldn't be hit");
|
106 |
+
};
|
107 |
+
// implementation idea: According to the C++ standard, stateless lambdas are
|
108 |
+
// convertible to function pointers
|
109 |
+
template <class LambdaType, class C, class Result, class... Args>
|
110 |
+
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...) const>
|
111 |
+
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
|
112 |
+
template <class LambdaType, class C, class Result, class... Args>
|
113 |
+
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...)>
|
114 |
+
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
|
115 |
+
|
116 |
+
// case where LambdaType is not even a functor
|
117 |
+
template <class LambdaType, class Enable = void>
|
118 |
+
struct is_stateless_lambda_ final : std::false_type {};
|
119 |
+
// case where LambdaType is a functor
|
120 |
+
template <class LambdaType>
|
121 |
+
struct is_stateless_lambda_<
|
122 |
+
LambdaType,
|
123 |
+
std::enable_if_t<is_functor<LambdaType>::value>>
|
124 |
+
: is_stateless_lambda__<LambdaType, decltype(&LambdaType::operator())> {};
|
125 |
+
} // namespace detail
|
126 |
+
template <class T>
|
127 |
+
using is_stateless_lambda = detail::is_stateless_lambda_<std::decay_t<T>>;
|
128 |
+
|
129 |
+
/**
|
130 |
+
* is_type_condition<C> is true_type iff C<...> is a type trait representing a
|
131 |
+
* condition (i.e. has a constexpr static bool ::value member) Example:
|
132 |
+
* is_type_condition<std::is_reference> // true
|
133 |
+
*/
|
134 |
+
template <template <class> class C, class Enable = void>
|
135 |
+
struct is_type_condition : std::false_type {};
|
136 |
+
template <template <class> class C>
|
137 |
+
struct is_type_condition<
|
138 |
+
C,
|
139 |
+
std::enable_if_t<
|
140 |
+
std::is_same<bool, std::remove_cv_t<decltype(C<int>::value)>>::value>>
|
141 |
+
: std::true_type {};
|
142 |
+
|
143 |
+
/**
|
144 |
+
* is_fundamental<T> is true_type iff the lambda type T is a fundamental type
|
145 |
+
* (that is, arithmetic type, void, or nullptr_t). Example: is_fundamental<int>
|
146 |
+
* // true We define it here to resolve a MSVC bug. See
|
147 |
+
* https://github.com/pytorch/pytorch/issues/30932 for details.
|
148 |
+
*/
|
149 |
+
template <class T>
|
150 |
+
struct is_fundamental : std::is_fundamental<T> {};
|
151 |
+
} // namespace guts
|
152 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Unicode.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#if defined(_WIN32)
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
#include <c10/util/win32-headers.h>
|
6 |
+
#include <string>
|
7 |
+
#endif
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
#if defined(_WIN32)
|
11 |
+
C10_API std::wstring u8u16(const std::string& str);
|
12 |
+
C10_API std::string u16u8(const std::wstring& wstr);
|
13 |
+
#endif
|
14 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/UniqueVoidPtr.h
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <memory>
|
3 |
+
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
using DeleterFnPtr = void (*)(void*);
|
9 |
+
|
10 |
+
namespace detail {
|
11 |
+
|
12 |
+
// Does not delete anything
|
13 |
+
C10_API void deleteNothing(void*);
|
14 |
+
|
15 |
+
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
|
16 |
+
// with three major differences:
|
17 |
+
//
|
18 |
+
// 1) It is specialized to void
|
19 |
+
//
|
20 |
+
// 2) It is specialized for a function pointer deleter
|
21 |
+
// void(void* ctx); i.e., the deleter doesn't take a
|
22 |
+
// reference to the data, just to a context pointer
|
23 |
+
// (erased as void*). In fact, internally, this pointer
|
24 |
+
// is implemented as having an owning reference to
|
25 |
+
// context, and a non-owning reference to data; this is why
|
26 |
+
// you release_context(), not release() (the conventional
|
27 |
+
// API for release() wouldn't give you enough information
|
28 |
+
// to properly dispose of the object later.)
|
29 |
+
//
|
30 |
+
// 3) The deleter is guaranteed to be called when the unique
|
31 |
+
// pointer is destructed and the context is non-null; this is different
|
32 |
+
// from std::unique_ptr where the deleter is not called if the
|
33 |
+
// data pointer is null.
|
34 |
+
//
|
35 |
+
// Some of the methods have slightly different types than std::unique_ptr
|
36 |
+
// to reflect this.
|
37 |
+
//
|
38 |
+
class UniqueVoidPtr {
|
39 |
+
private:
|
40 |
+
// Lifetime tied to ctx_
|
41 |
+
void* data_;
|
42 |
+
std::unique_ptr<void, DeleterFnPtr> ctx_;
|
43 |
+
|
44 |
+
public:
|
45 |
+
UniqueVoidPtr() : data_(nullptr), ctx_(nullptr, &deleteNothing) {}
|
46 |
+
explicit UniqueVoidPtr(void* data)
|
47 |
+
: data_(data), ctx_(nullptr, &deleteNothing) {}
|
48 |
+
UniqueVoidPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter)
|
49 |
+
: data_(data), ctx_(ctx, ctx_deleter ? ctx_deleter : &deleteNothing) {}
|
50 |
+
void* operator->() const {
|
51 |
+
return data_;
|
52 |
+
}
|
53 |
+
void clear() {
|
54 |
+
ctx_ = nullptr;
|
55 |
+
data_ = nullptr;
|
56 |
+
}
|
57 |
+
void* get() const {
|
58 |
+
return data_;
|
59 |
+
}
|
60 |
+
void* get_context() const {
|
61 |
+
return ctx_.get();
|
62 |
+
}
|
63 |
+
void* release_context() {
|
64 |
+
return ctx_.release();
|
65 |
+
}
|
66 |
+
std::unique_ptr<void, DeleterFnPtr>&& move_context() {
|
67 |
+
return std::move(ctx_);
|
68 |
+
}
|
69 |
+
C10_NODISCARD bool compare_exchange_deleter(
|
70 |
+
DeleterFnPtr expected_deleter,
|
71 |
+
DeleterFnPtr new_deleter) {
|
72 |
+
if (get_deleter() != expected_deleter)
|
73 |
+
return false;
|
74 |
+
ctx_ = std::unique_ptr<void, DeleterFnPtr>(ctx_.release(), new_deleter);
|
75 |
+
return true;
|
76 |
+
}
|
77 |
+
|
78 |
+
template <typename T>
|
79 |
+
T* cast_context(DeleterFnPtr expected_deleter) const {
|
80 |
+
if (get_deleter() != expected_deleter)
|
81 |
+
return nullptr;
|
82 |
+
return static_cast<T*>(get_context());
|
83 |
+
}
|
84 |
+
operator bool() const {
|
85 |
+
return data_ || ctx_;
|
86 |
+
}
|
87 |
+
DeleterFnPtr get_deleter() const {
|
88 |
+
return ctx_.get_deleter();
|
89 |
+
}
|
90 |
+
};
|
91 |
+
|
92 |
+
// Note [How UniqueVoidPtr is implemented]
|
93 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
94 |
+
// UniqueVoidPtr solves a common problem for allocators of tensor data, which
|
95 |
+
// is that the data pointer (e.g., float*) which you are interested in, is not
|
96 |
+
// the same as the context pointer (e.g., DLManagedTensor) which you need
|
97 |
+
// to actually deallocate the data. Under a conventional deleter design, you
|
98 |
+
// have to store extra context in the deleter itself so that you can actually
|
99 |
+
// delete the right thing. Implementing this with standard C++ is somewhat
|
100 |
+
// error-prone: if you use a std::unique_ptr to manage tensors, the deleter will
|
101 |
+
// not be called if the data pointer is nullptr, which can cause a leak if the
|
102 |
+
// context pointer is non-null (and the deleter is responsible for freeing both
|
103 |
+
// the data pointer and the context pointer).
|
104 |
+
//
|
105 |
+
// So, in our reimplementation of unique_ptr, which just store the context
|
106 |
+
// directly in the unique pointer, and attach the deleter to the context
|
107 |
+
// pointer itself. In simple cases, the context pointer is just the pointer
|
108 |
+
// itself.
|
109 |
+
|
110 |
+
inline bool operator==(const UniqueVoidPtr& sp, std::nullptr_t) noexcept {
|
111 |
+
return !sp;
|
112 |
+
}
|
113 |
+
inline bool operator==(std::nullptr_t, const UniqueVoidPtr& sp) noexcept {
|
114 |
+
return !sp;
|
115 |
+
}
|
116 |
+
inline bool operator!=(const UniqueVoidPtr& sp, std::nullptr_t) noexcept {
|
117 |
+
return sp;
|
118 |
+
}
|
119 |
+
inline bool operator!=(std::nullptr_t, const UniqueVoidPtr& sp) noexcept {
|
120 |
+
return sp;
|
121 |
+
}
|
122 |
+
|
123 |
+
} // namespace detail
|
124 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/Unroll.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/macros/Macros.h>
|
3 |
+
|
4 |
+
// Utility to guarantee complete unrolling of a loop where the bounds are known
|
5 |
+
// at compile time. Various pragmas achieve similar effects, but are not as
|
6 |
+
// portable across compilers.
|
7 |
+
|
8 |
+
// Example: c10::ForcedUnroll<4>{}(f); is equivalent to f(0); f(1); f(2); f(3);
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
template <int n>
|
13 |
+
struct ForcedUnroll {
|
14 |
+
template <typename Func>
|
15 |
+
C10_ALWAYS_INLINE void operator()(const Func& f) const {
|
16 |
+
ForcedUnroll<n - 1>{}(f);
|
17 |
+
f(n - 1);
|
18 |
+
}
|
19 |
+
};
|
20 |
+
|
21 |
+
template <>
|
22 |
+
struct ForcedUnroll<1> {
|
23 |
+
template <typename Func>
|
24 |
+
C10_ALWAYS_INLINE void operator()(const Func& f) const {
|
25 |
+
f(0);
|
26 |
+
}
|
27 |
+
};
|
28 |
+
|
29 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/accumulate.h
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright 2004-present Facebook. All Rights Reserved.
|
2 |
+
|
3 |
+
#pragma once
|
4 |
+
|
5 |
+
#include <c10/util/ArrayRef.h>
|
6 |
+
|
7 |
+
#include <iterator>
|
8 |
+
#include <numeric>
|
9 |
+
#include <type_traits>
|
10 |
+
|
11 |
+
namespace c10 {
|
12 |
+
|
13 |
+
/// Sum of a list of integers; accumulates into the int64_t datatype
|
14 |
+
template <
|
15 |
+
typename C,
|
16 |
+
typename std::enable_if<
|
17 |
+
std::is_integral<typename C::value_type>::value,
|
18 |
+
int>::type = 0>
|
19 |
+
inline int64_t sum_integers(const C& container) {
|
20 |
+
// std::accumulate infers return type from `init` type, so if the `init` type
|
21 |
+
// is not large enough to hold the result, computation can overflow. We use
|
22 |
+
// `int64_t` here to avoid this.
|
23 |
+
return std::accumulate(
|
24 |
+
container.begin(), container.end(), static_cast<int64_t>(0));
|
25 |
+
}
|
26 |
+
|
27 |
+
/// Sum of integer elements referred to by iterators; accumulates into the
|
28 |
+
/// int64_t datatype
|
29 |
+
template <
|
30 |
+
typename Iter,
|
31 |
+
typename std::enable_if<
|
32 |
+
std::is_integral<
|
33 |
+
typename std::iterator_traits<Iter>::value_type>::value,
|
34 |
+
int>::type = 0>
|
35 |
+
inline int64_t sum_integers(Iter begin, Iter end) {
|
36 |
+
// std::accumulate infers return type from `init` type, so if the `init` type
|
37 |
+
// is not large enough to hold the result, computation can overflow. We use
|
38 |
+
// `int64_t` here to avoid this.
|
39 |
+
return std::accumulate(begin, end, static_cast<int64_t>(0));
|
40 |
+
}
|
41 |
+
|
42 |
+
/// Product of a list of integers; accumulates into the int64_t datatype
|
43 |
+
template <
|
44 |
+
typename C,
|
45 |
+
typename std::enable_if<
|
46 |
+
std::is_integral<typename C::value_type>::value,
|
47 |
+
int>::type = 0>
|
48 |
+
inline int64_t multiply_integers(const C& container) {
|
49 |
+
// std::accumulate infers return type from `init` type, so if the `init` type
|
50 |
+
// is not large enough to hold the result, computation can overflow. We use
|
51 |
+
// `int64_t` here to avoid this.
|
52 |
+
return std::accumulate(
|
53 |
+
container.begin(),
|
54 |
+
container.end(),
|
55 |
+
static_cast<int64_t>(1),
|
56 |
+
std::multiplies<>());
|
57 |
+
}
|
58 |
+
|
59 |
+
/// Product of integer elements referred to by iterators; accumulates into the
|
60 |
+
/// int64_t datatype
|
61 |
+
template <
|
62 |
+
typename Iter,
|
63 |
+
typename std::enable_if<
|
64 |
+
std::is_integral<
|
65 |
+
typename std::iterator_traits<Iter>::value_type>::value,
|
66 |
+
int>::type = 0>
|
67 |
+
inline int64_t multiply_integers(Iter begin, Iter end) {
|
68 |
+
// std::accumulate infers return type from `init` type, so if the `init` type
|
69 |
+
// is not large enough to hold the result, computation can overflow. We use
|
70 |
+
// `int64_t` here to avoid this.
|
71 |
+
return std::accumulate(
|
72 |
+
begin, end, static_cast<int64_t>(1), std::multiplies<>());
|
73 |
+
}
|
74 |
+
|
75 |
+
/// Return product of all dimensions starting from k
|
76 |
+
/// Returns 1 if k>=dims.size()
|
77 |
+
template <
|
78 |
+
typename C,
|
79 |
+
typename std::enable_if<
|
80 |
+
std::is_integral<typename C::value_type>::value,
|
81 |
+
int>::type = 0>
|
82 |
+
inline int64_t numelements_from_dim(const int k, const C& dims) {
|
83 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(k >= 0);
|
84 |
+
|
85 |
+
if (k > static_cast<int>(dims.size())) {
|
86 |
+
return 1;
|
87 |
+
} else {
|
88 |
+
auto cbegin = dims.cbegin();
|
89 |
+
std::advance(cbegin, k);
|
90 |
+
return multiply_integers(cbegin, dims.cend());
|
91 |
+
}
|
92 |
+
}
|
93 |
+
|
94 |
+
/// Product of all dims up to k (not including dims[k])
|
95 |
+
/// Throws an error if k>dims.size()
|
96 |
+
template <
|
97 |
+
typename C,
|
98 |
+
typename std::enable_if<
|
99 |
+
std::is_integral<typename C::value_type>::value,
|
100 |
+
int>::type = 0>
|
101 |
+
inline int64_t numelements_to_dim(const int k, const C& dims) {
|
102 |
+
TORCH_INTERNAL_ASSERT(0 <= k);
|
103 |
+
TORCH_INTERNAL_ASSERT((unsigned)k <= dims.size());
|
104 |
+
|
105 |
+
auto cend = dims.cbegin();
|
106 |
+
std::advance(cend, k);
|
107 |
+
return multiply_integers(dims.cbegin(), cend);
|
108 |
+
}
|
109 |
+
|
110 |
+
/// Product of all dims between k and l (including dims[k] and excluding
|
111 |
+
/// dims[l]) k and l may be supplied in either order
|
112 |
+
template <
|
113 |
+
typename C,
|
114 |
+
typename std::enable_if<
|
115 |
+
std::is_integral<typename C::value_type>::value,
|
116 |
+
int>::type = 0>
|
117 |
+
inline int64_t numelements_between_dim(int k, int l, const C& dims) {
|
118 |
+
TORCH_INTERNAL_ASSERT(0 <= k);
|
119 |
+
TORCH_INTERNAL_ASSERT(0 <= l);
|
120 |
+
|
121 |
+
if (k > l) {
|
122 |
+
std::swap(k, l);
|
123 |
+
}
|
124 |
+
|
125 |
+
TORCH_INTERNAL_ASSERT((unsigned)l < dims.size());
|
126 |
+
|
127 |
+
auto cbegin = dims.cbegin();
|
128 |
+
auto cend = dims.cbegin();
|
129 |
+
std::advance(cbegin, k);
|
130 |
+
std::advance(cend, l);
|
131 |
+
return multiply_integers(cbegin, cend);
|
132 |
+
}
|
133 |
+
|
134 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/c10/util/bits.h
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <cstdint>
|
3 |
+
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
/**
|
9 |
+
* bits1x8 is an uninterpreted dtype of a tensor with 1 bit (packed to byte
|
10 |
+
* boundary), without any semantics defined.
|
11 |
+
*/
|
12 |
+
struct alignas(1) bits1x8 {
|
13 |
+
using underlying = uint8_t;
|
14 |
+
uint8_t val_;
|
15 |
+
bits1x8() = default;
|
16 |
+
C10_HOST_DEVICE explicit bits1x8(uint8_t val) : val_(val) {}
|
17 |
+
};
|
18 |
+
|
19 |
+
/**
|
20 |
+
* bits2x4 is an uninterpreted dtype of a tensor with 2 bits (packed to byte
|
21 |
+
* boundary), without any semantics defined.
|
22 |
+
*/
|
23 |
+
struct alignas(1) bits2x4 {
|
24 |
+
using underlying = uint8_t;
|
25 |
+
uint8_t val_;
|
26 |
+
bits2x4() = default;
|
27 |
+
C10_HOST_DEVICE explicit bits2x4(uint8_t val) : val_(val) {}
|
28 |
+
};
|
29 |
+
|
30 |
+
/**
|
31 |
+
* bits4x2 is an uninterpreted dtype of a tensor with 4 bits (packed to byte
|
32 |
+
* boundary), without any semantics defined.
|
33 |
+
*/
|
34 |
+
struct alignas(1) bits4x2 {
|
35 |
+
using underlying = uint8_t;
|
36 |
+
uint8_t val_;
|
37 |
+
bits4x2() = default;
|
38 |
+
C10_HOST_DEVICE explicit bits4x2(uint8_t val) : val_(val) {}
|
39 |
+
};
|
40 |
+
|
41 |
+
/**
|
42 |
+
* bits8 is an uninterpreted dtype of a tensor with 8 bits, without any
|
43 |
+
* semantics defined.
|
44 |
+
*/
|
45 |
+
struct alignas(1) bits8 {
|
46 |
+
uint8_t val_;
|
47 |
+
bits8() = default;
|
48 |
+
C10_HOST_DEVICE explicit bits8(uint8_t val) : val_(val) {}
|
49 |
+
};
|
50 |
+
|
51 |
+
/**
|
52 |
+
* bits16 is an uninterpreted dtype of a tensor with 16 bits, without any
|
53 |
+
* semantics defined.
|
54 |
+
*/
|
55 |
+
struct alignas(2) bits16 {
|
56 |
+
uint16_t val_;
|
57 |
+
bits16() = default;
|
58 |
+
C10_HOST_DEVICE explicit bits16(uint16_t val) : val_(val) {}
|
59 |
+
};
|
60 |
+
|
61 |
+
} // namespace c10
|