Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h +13 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h +165 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Array.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h +139 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h +32 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h +397 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h +209 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h +13 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h +48 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h +190 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h +201 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h +194 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h +139 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h +242 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h +16 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h +1 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h +92 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h +1055 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h +17 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h +95 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h +151 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h +2213 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h +88 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h +441 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h +687 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h +483 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/functional.h +54 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h +34 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h +1555 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h +2545 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h +36 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/qualified_name.h +161 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/type_factory.h +108 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h +54 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/typeid.h +1 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h +1096 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h +431 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h +468 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h +73 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h +246 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h +560 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h +628 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h +438 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h +461 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h +368 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h +298 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h +251 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h +245 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
struct OperatorName;
|
7 |
+
}
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
// check if an op is a custom op (i.e. did not come from native_functions.yaml)
|
12 |
+
TORCH_API bool is_custom_op(const c10::OperatorName& opName);
|
13 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// This global header must not depend on native_functions.yaml or
|
2 |
+
// incremental builds will be next to useless
|
3 |
+
#pragma push_macro("TORCH_ASSERT_NO_OPERATORS")
|
4 |
+
#define TORCH_ASSERT_NO_OPERATORS
|
5 |
+
|
6 |
+
// This macro doesn't work if defined after the first time inttypes.h
|
7 |
+
// is included, so won't work anywhere if not defined here.
|
8 |
+
#ifndef __STDC_FORMAT_MACROS
|
9 |
+
#define __STDC_FORMAT_MACROS
|
10 |
+
#endif
|
11 |
+
#include <cinttypes>
|
12 |
+
|
13 |
+
// This list of headers was generated using a script that finds
|
14 |
+
// high-impact headers and then manually tweaked to remove OS specific
|
15 |
+
// or duplicate headers (e.g. <cassert> and <assert.h>) and to remove
|
16 |
+
// "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10).
|
17 |
+
|
18 |
+
// To generate the initial list:
|
19 |
+
// 1. Build pytorch from scratch with all build caching disabled
|
20 |
+
// 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing)
|
21 |
+
// $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json
|
22 |
+
// 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/
|
23 |
+
// $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json
|
24 |
+
// Where the threshold can be tweaked until c10 and some of ATen
|
25 |
+
// core are included but TORCH_ASSERT_NO_OPERATORS still passes.
|
26 |
+
|
27 |
+
#include <cerrno>
|
28 |
+
#include <cmath>
|
29 |
+
#include <cstddef>
|
30 |
+
#include <cstdint>
|
31 |
+
#include <cstdlib>
|
32 |
+
#include <cstring>
|
33 |
+
|
34 |
+
#include <algorithm>
|
35 |
+
#include <array>
|
36 |
+
#include <atomic>
|
37 |
+
#include <chrono>
|
38 |
+
#include <complex>
|
39 |
+
#include <deque>
|
40 |
+
#include <exception>
|
41 |
+
#include <functional>
|
42 |
+
#include <initializer_list>
|
43 |
+
#include <iomanip>
|
44 |
+
#include <iosfwd>
|
45 |
+
#include <iterator>
|
46 |
+
#include <limits>
|
47 |
+
#include <list>
|
48 |
+
#include <map>
|
49 |
+
#include <memory>
|
50 |
+
#include <mutex>
|
51 |
+
#include <new>
|
52 |
+
#include <numeric>
|
53 |
+
#include <ostream>
|
54 |
+
#include <sstream>
|
55 |
+
#include <stdexcept>
|
56 |
+
#include <string>
|
57 |
+
#include <tuple>
|
58 |
+
#include <type_traits>
|
59 |
+
#include <typeindex>
|
60 |
+
#include <typeinfo>
|
61 |
+
#include <unordered_map>
|
62 |
+
#include <unordered_set>
|
63 |
+
#include <utility>
|
64 |
+
#include <vector>
|
65 |
+
|
66 |
+
#include <c10/core/Allocator.h>
|
67 |
+
#include <c10/core/AutogradState.h>
|
68 |
+
#include <c10/core/Backend.h>
|
69 |
+
#include <c10/core/DefaultDtype.h>
|
70 |
+
#include <c10/core/Device.h>
|
71 |
+
#include <c10/core/DeviceType.h>
|
72 |
+
#include <c10/core/DispatchKey.h>
|
73 |
+
#include <c10/core/DispatchKeySet.h>
|
74 |
+
#include <c10/core/GeneratorImpl.h>
|
75 |
+
#include <c10/core/InferenceMode.h>
|
76 |
+
#include <c10/core/Layout.h>
|
77 |
+
#include <c10/core/MemoryFormat.h>
|
78 |
+
#include <c10/core/OptionalRef.h>
|
79 |
+
#include <c10/core/QScheme.h>
|
80 |
+
#include <c10/core/Scalar.h>
|
81 |
+
#include <c10/core/ScalarType.h>
|
82 |
+
#include <c10/core/ScalarTypeToTypeMeta.h>
|
83 |
+
#include <c10/core/Storage.h>
|
84 |
+
#include <c10/core/StorageImpl.h>
|
85 |
+
#include <c10/core/SymBool.h>
|
86 |
+
#include <c10/core/SymFloat.h>
|
87 |
+
#include <c10/core/SymInt.h>
|
88 |
+
#include <c10/core/SymIntArrayRef.h>
|
89 |
+
#include <c10/core/SymNodeImpl.h>
|
90 |
+
#include <c10/core/TensorImpl.h>
|
91 |
+
#include <c10/core/TensorOptions.h>
|
92 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
93 |
+
#include <c10/core/WrapDimMinimal.h>
|
94 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
95 |
+
#include <c10/core/impl/PyInterpreter.h>
|
96 |
+
#include <c10/core/impl/SizesAndStrides.h>
|
97 |
+
|
98 |
+
#include <c10/macros/Export.h>
|
99 |
+
#include <c10/macros/Macros.h>
|
100 |
+
|
101 |
+
#include <c10/util/AlignOf.h>
|
102 |
+
#include <c10/util/ArrayRef.h>
|
103 |
+
#include <c10/util/BFloat16.h>
|
104 |
+
#include <c10/util/C++17.h>
|
105 |
+
#include <c10/util/ConstexprCrc.h>
|
106 |
+
#include <c10/util/Deprecated.h>
|
107 |
+
#include <c10/util/DimVector.h>
|
108 |
+
#include <c10/util/Exception.h>
|
109 |
+
#include <c10/util/ExclusivelyOwned.h>
|
110 |
+
#include <c10/util/Flags.h>
|
111 |
+
#include <c10/util/Float8_e4m3fn.h>
|
112 |
+
#include <c10/util/Float8_e5m2.h>
|
113 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
114 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
115 |
+
#include <c10/util/FunctionRef.h>
|
116 |
+
#include <c10/util/Half.h>
|
117 |
+
#include <c10/util/IdWrapper.h>
|
118 |
+
#include <c10/util/Logging.h>
|
119 |
+
#include <c10/util/MaybeOwned.h>
|
120 |
+
#include <c10/util/Metaprogramming.h>
|
121 |
+
#include <c10/util/Optional.h>
|
122 |
+
#include <c10/util/Registry.h>
|
123 |
+
#include <c10/util/SmallVector.h>
|
124 |
+
#include <c10/util/StringUtil.h>
|
125 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
126 |
+
#include <c10/util/Type.h>
|
127 |
+
#include <c10/util/TypeCast.h>
|
128 |
+
#include <c10/util/TypeIndex.h>
|
129 |
+
#include <c10/util/TypeList.h>
|
130 |
+
#include <c10/util/TypeSafeSignMath.h>
|
131 |
+
#include <c10/util/TypeTraits.h>
|
132 |
+
#include <c10/util/UniqueVoidPtr.h>
|
133 |
+
#include <c10/util/accumulate.h>
|
134 |
+
#include <c10/util/bit_cast.h>
|
135 |
+
#include <c10/util/bits.h>
|
136 |
+
#include <c10/util/complex.h>
|
137 |
+
#include <c10/util/floating_point_utils.h>
|
138 |
+
#include <c10/util/intrusive_ptr.h>
|
139 |
+
#include <c10/util/irange.h>
|
140 |
+
#include <c10/util/llvmMathExtras.h>
|
141 |
+
#include <c10/util/python_stub.h>
|
142 |
+
#include <c10/util/qint32.h>
|
143 |
+
#include <c10/util/qint8.h>
|
144 |
+
#include <c10/util/quint2x4.h>
|
145 |
+
#include <c10/util/quint4x2.h>
|
146 |
+
#include <c10/util/quint8.h>
|
147 |
+
#include <c10/util/safe_numerics.h>
|
148 |
+
#include <c10/util/string_utils.h>
|
149 |
+
#include <c10/util/string_view.h>
|
150 |
+
#include <c10/util/typeid.h>
|
151 |
+
|
152 |
+
#include <ATen/StorageUtils.h>
|
153 |
+
#include <ATen/core/ATen_fwd.h>
|
154 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
155 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
156 |
+
#include <ATen/core/DimVector.h>
|
157 |
+
#include <ATen/core/Dimname.h>
|
158 |
+
#include <ATen/core/Generator.h>
|
159 |
+
#include <ATen/core/NamedTensor.h>
|
160 |
+
#include <ATen/core/QuantizerBase.h>
|
161 |
+
#include <ATen/core/TensorAccessor.h>
|
162 |
+
#include <ATen/core/TensorBase.h>
|
163 |
+
#include <ATen/core/symbol.h>
|
164 |
+
|
165 |
+
#pragma pop_macro("TORCH_ASSERT_NO_OPERATORS")
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Array.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// A fixed-size array type usable from both host and
|
4 |
+
// device code.
|
5 |
+
|
6 |
+
#include <c10/macros/Macros.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
|
9 |
+
namespace at { namespace detail {
|
10 |
+
|
11 |
+
template <typename T, int size_>
|
12 |
+
struct Array {
|
13 |
+
T data[size_];
|
14 |
+
|
15 |
+
C10_HOST_DEVICE T operator[](int i) const {
|
16 |
+
return data[i];
|
17 |
+
}
|
18 |
+
C10_HOST_DEVICE T& operator[](int i) {
|
19 |
+
return data[i];
|
20 |
+
}
|
21 |
+
#if defined(USE_ROCM)
|
22 |
+
C10_HOST_DEVICE Array() = default;
|
23 |
+
C10_HOST_DEVICE Array(const Array&) = default;
|
24 |
+
C10_HOST_DEVICE Array& operator=(const Array&) = default;
|
25 |
+
#else
|
26 |
+
Array() = default;
|
27 |
+
Array(const Array&) = default;
|
28 |
+
Array& operator=(const Array&) = default;
|
29 |
+
#endif
|
30 |
+
static constexpr int size(){return size_;}
|
31 |
+
// Fill the array with x.
|
32 |
+
C10_HOST_DEVICE Array(T x) {
|
33 |
+
for (int i = 0; i < size_; i++) {
|
34 |
+
data[i] = x;
|
35 |
+
}
|
36 |
+
}
|
37 |
+
};
|
38 |
+
|
39 |
+
}}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <c10/core/TensorOptions.h>
|
2 |
+
|
3 |
+
namespace c10 { namespace impl {
|
4 |
+
|
5 |
+
inline c10::optional<MemoryFormat>
|
6 |
+
check_tensor_options_and_extract_memory_format(
|
7 |
+
const TensorOptions& options,
|
8 |
+
c10::optional<MemoryFormat> memory_format) {
|
9 |
+
TORCH_CHECK(
|
10 |
+
options.requires_grad_opt() == c10::nullopt ||
|
11 |
+
options.requires_grad_opt().value() == false,
|
12 |
+
"Operators taking TensorOptions cannot take a TensorOptions with "
|
13 |
+
"options.requires_grad set as true. This isn't implemented yet.");
|
14 |
+
TORCH_CHECK(
|
15 |
+
!(options.has_memory_format() && memory_format.has_value()),
|
16 |
+
"Cannot set memory_format both in TensorOptions and explicit argument; please delete "
|
17 |
+
"the redundant setter.");
|
18 |
+
if (memory_format.has_value()) {
|
19 |
+
return memory_format;
|
20 |
+
} else {
|
21 |
+
return options.memory_format_opt();
|
22 |
+
}
|
23 |
+
}
|
24 |
+
|
25 |
+
}} // namespace impl namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Backend.h>
|
4 |
+
#include <c10/core/ScalarType.h>
|
5 |
+
#include <c10/core/Layout.h>
|
6 |
+
#include <c10/core/TensorOptions.h>
|
7 |
+
#include <c10/core/Storage.h>
|
8 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
|
11 |
+
|
12 |
+
namespace at {
|
13 |
+
|
14 |
+
class Tensor;
|
15 |
+
|
16 |
+
// This class specifies a Backend and a ScalarType. Currently, it primarily
|
17 |
+
// serves as a replacement return value for Tensor::type(). Previously,
|
18 |
+
// Tensor::type() returned Type&, but we are changing Type to not be
|
19 |
+
// dtype-specific.
|
20 |
+
class TORCH_API DeprecatedTypeProperties {
|
21 |
+
public:
|
22 |
+
DeprecatedTypeProperties(Backend backend, ScalarType scalar_type)
|
23 |
+
: backend_(backend), scalar_type_(scalar_type) {}
|
24 |
+
|
25 |
+
Backend backend() const {
|
26 |
+
return backend_;
|
27 |
+
}
|
28 |
+
|
29 |
+
Layout layout() const {
|
30 |
+
return layout_from_backend(backend_);
|
31 |
+
}
|
32 |
+
|
33 |
+
bool is_sparse() const {
|
34 |
+
return layout_from_backend(backend()) == kSparse;
|
35 |
+
}
|
36 |
+
|
37 |
+
bool is_sparse_csr() const {
|
38 |
+
return layout_from_backend(backend()) == kSparseCsr;
|
39 |
+
}
|
40 |
+
|
41 |
+
c10::DeviceType device_type() const {
|
42 |
+
return backendToDeviceType(backend_);
|
43 |
+
}
|
44 |
+
|
45 |
+
bool is_cuda() const {
|
46 |
+
return backendToDeviceType(backend_) == kCUDA;
|
47 |
+
}
|
48 |
+
|
49 |
+
ScalarType scalarType() const {
|
50 |
+
return scalar_type_;
|
51 |
+
}
|
52 |
+
|
53 |
+
caffe2::TypeMeta typeMeta() const {
|
54 |
+
return scalarTypeToTypeMeta(scalar_type_);
|
55 |
+
}
|
56 |
+
|
57 |
+
bool operator==(const DeprecatedTypeProperties& other) const {
|
58 |
+
return backend_ == other.backend() && scalar_type_ == other.scalarType();
|
59 |
+
}
|
60 |
+
|
61 |
+
bool operator!=(const DeprecatedTypeProperties& other) const {
|
62 |
+
return !(*this == other);
|
63 |
+
}
|
64 |
+
|
65 |
+
std::string toString() const {
|
66 |
+
std::string base_str;
|
67 |
+
if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) {
|
68 |
+
base_str = "UndefinedType";
|
69 |
+
} else {
|
70 |
+
base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type";
|
71 |
+
}
|
72 |
+
return base_str;
|
73 |
+
}
|
74 |
+
|
75 |
+
DeprecatedTypeProperties & toBackend(Backend b) const {
|
76 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
77 |
+
b, scalar_type_);
|
78 |
+
}
|
79 |
+
|
80 |
+
DeprecatedTypeProperties & toScalarType(ScalarType s) const {
|
81 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
82 |
+
backend_, s);
|
83 |
+
}
|
84 |
+
|
85 |
+
DeprecatedTypeProperties & cpu() const {
|
86 |
+
return toBackend(Backend::CPU);
|
87 |
+
}
|
88 |
+
|
89 |
+
DeprecatedTypeProperties & cuda() const {
|
90 |
+
return toBackend(Backend::CUDA);
|
91 |
+
}
|
92 |
+
|
93 |
+
DeprecatedTypeProperties & hip() const {
|
94 |
+
return toBackend(Backend::HIP);
|
95 |
+
}
|
96 |
+
|
97 |
+
DeprecatedTypeProperties & privateUser1() const {
|
98 |
+
return toBackend(Backend::PrivateUse1);
|
99 |
+
}
|
100 |
+
|
101 |
+
/// Constructs the `TensorOptions` from a type and a `device_index`.
|
102 |
+
TensorOptions options(int16_t device_index = -1) const {
|
103 |
+
return TensorOptions().dtype(typeMeta())
|
104 |
+
.device(device_type(), static_cast<c10::DeviceIndex>(device_index))
|
105 |
+
.layout(layout());
|
106 |
+
}
|
107 |
+
|
108 |
+
/// Constructs the `TensorOptions` from a type and a Device. Asserts that
|
109 |
+
/// the device type matches the device type of the type.
|
110 |
+
TensorOptions options(c10::optional<Device> device_opt) const {
|
111 |
+
if (!device_opt.has_value()) {
|
112 |
+
return options(-1);
|
113 |
+
} else {
|
114 |
+
Device device = device_opt.value();
|
115 |
+
AT_ASSERT(device.type() == device_type());
|
116 |
+
return options(device.index());
|
117 |
+
}
|
118 |
+
}
|
119 |
+
|
120 |
+
operator TensorOptions() const {
|
121 |
+
return options();
|
122 |
+
}
|
123 |
+
|
124 |
+
int64_t id() const {
|
125 |
+
return static_cast<int64_t>(backend()) *
|
126 |
+
static_cast<int64_t>(ScalarType::NumOptions) +
|
127 |
+
static_cast<int64_t>(scalarType());
|
128 |
+
}
|
129 |
+
|
130 |
+
Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
|
131 |
+
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
|
132 |
+
Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const;
|
133 |
+
|
134 |
+
private:
|
135 |
+
Backend backend_;
|
136 |
+
ScalarType scalar_type_;
|
137 |
+
};
|
138 |
+
|
139 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// In order to preserve bc, we make DeprecatedTypeProperties instances unique
|
4 |
+
// just like they are for Type.
|
5 |
+
|
6 |
+
#include <c10/core/Backend.h>
|
7 |
+
#include <c10/core/ScalarType.h>
|
8 |
+
#include <memory>
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
class DeprecatedTypeProperties;
|
13 |
+
|
14 |
+
struct TORCH_API DeprecatedTypePropertiesDeleter {
|
15 |
+
void operator()(DeprecatedTypeProperties * ptr);
|
16 |
+
};
|
17 |
+
|
18 |
+
class TORCH_API DeprecatedTypePropertiesRegistry {
|
19 |
+
public:
|
20 |
+
DeprecatedTypePropertiesRegistry();
|
21 |
+
|
22 |
+
DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const;
|
23 |
+
|
24 |
+
private:
|
25 |
+
std::unique_ptr<DeprecatedTypeProperties> registry
|
26 |
+
[static_cast<int>(Backend::NumOptions)]
|
27 |
+
[static_cast<int>(ScalarType::NumOptions)];
|
28 |
+
};
|
29 |
+
|
30 |
+
TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
|
31 |
+
|
32 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h
ADDED
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
#include <c10/util/TypeTraits.h>
|
6 |
+
#include <c10/util/TypeList.h>
|
7 |
+
#include <c10/util/intrusive_ptr.h>
|
8 |
+
#include <c10/util/order_preserving_flat_hash_map.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <ATen/core/TensorBody.h>
|
11 |
+
#include <ATen/core/jit_type_base.h>
|
12 |
+
|
13 |
+
namespace c10 {
|
14 |
+
struct IValue;
|
15 |
+
template<class Key, class Value> class Dict;
|
16 |
+
struct Type;
|
17 |
+
|
18 |
+
namespace impl {
|
19 |
+
|
20 |
+
using valid_dict_key_types = guts::typelist::typelist<
|
21 |
+
int64_t,
|
22 |
+
std::string,
|
23 |
+
double,
|
24 |
+
c10::complex<double>,
|
25 |
+
bool,
|
26 |
+
at::Tensor
|
27 |
+
>;
|
28 |
+
}
|
29 |
+
|
30 |
+
namespace detail {
|
31 |
+
|
32 |
+
struct DictKeyHash {
|
33 |
+
size_t operator()(const IValue& ivalue) const;
|
34 |
+
};
|
35 |
+
|
36 |
+
struct DictKeyEqualTo {
|
37 |
+
bool operator()(const IValue& lhs, const IValue& rhs) const;
|
38 |
+
};
|
39 |
+
|
40 |
+
struct DictImpl final : public c10::intrusive_ptr_target {
|
41 |
+
using dict_map_type = ska_ordered::order_preserving_flat_hash_map<IValue, IValue, DictKeyHash, DictKeyEqualTo>;
|
42 |
+
struct DictElementTypes final {
|
43 |
+
TypePtr keyType;
|
44 |
+
TypePtr valueType;
|
45 |
+
};
|
46 |
+
|
47 |
+
explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_)
|
48 |
+
: dict(std::move(dict_))
|
49 |
+
, elementTypes(std::move(elementTypes_)) {}
|
50 |
+
dict_map_type dict;
|
51 |
+
|
52 |
+
DictElementTypes elementTypes;
|
53 |
+
|
54 |
+
intrusive_ptr<DictImpl> copy() const;
|
55 |
+
friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs);
|
56 |
+
};
|
57 |
+
|
58 |
+
}
|
59 |
+
|
60 |
+
namespace impl {
|
61 |
+
template<class Key, class Value, class Iterator> class DictIterator;
|
62 |
+
|
63 |
+
/**
|
64 |
+
* A reference to an entry in the Dict.
|
65 |
+
* Use the `key()` and `value()` methods to read the element.
|
66 |
+
*/
|
67 |
+
template<class Key, class Value, class Iterator>
|
68 |
+
class DictEntryRef final {
|
69 |
+
public:
|
70 |
+
explicit DictEntryRef(Iterator iterator)
|
71 |
+
: iterator_(std::move(iterator)) {}
|
72 |
+
|
73 |
+
decltype(auto) key() const {
|
74 |
+
return iterator_->first.template to<Key>();
|
75 |
+
}
|
76 |
+
|
77 |
+
decltype(auto) value() const {
|
78 |
+
return iterator_->second.template to<Value>();
|
79 |
+
}
|
80 |
+
|
81 |
+
template<class Value_>
|
82 |
+
void setValue(Value_&& value) const {
|
83 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of setValue()");
|
84 |
+
iterator_->second = Value(std::forward<Value_>(value));
|
85 |
+
}
|
86 |
+
|
87 |
+
private:
|
88 |
+
// allow copying and moving, but only our friends (i.e. the Dict class) can do
|
89 |
+
// it. Copying/moving this reference wrapper would be too ambiguous to allow it
|
90 |
+
// in the public API.
|
91 |
+
DictEntryRef(const DictEntryRef&) = default;
|
92 |
+
DictEntryRef& operator=(const DictEntryRef&) = default;
|
93 |
+
DictEntryRef(DictEntryRef&&) noexcept = default;
|
94 |
+
DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default;
|
95 |
+
|
96 |
+
Iterator iterator_;
|
97 |
+
friend class DictIterator<Key, Value, Iterator>;
|
98 |
+
friend class Dict<Key, Value>;
|
99 |
+
};
|
100 |
+
|
101 |
+
// this wraps map_type::iterator to make sure user code can't rely
|
102 |
+
// on it being the type of the underlying map.
|
103 |
+
template<class Key, class Value, class Iterator>
|
104 |
+
class DictIterator final {
|
105 |
+
public:
|
106 |
+
// C++17 friendly std::iterator implementation
|
107 |
+
using iterator_category = std::forward_iterator_tag;
|
108 |
+
using value_type = DictEntryRef<Key, Value, Iterator>;
|
109 |
+
using difference_type = std::ptrdiff_t;
|
110 |
+
using pointer = value_type*;
|
111 |
+
using reference = value_type&;
|
112 |
+
|
113 |
+
explicit DictIterator() = default;
|
114 |
+
~DictIterator() = default;
|
115 |
+
|
116 |
+
DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {}
|
117 |
+
DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {}
|
118 |
+
DictIterator& operator=(const DictIterator& rhs) {
|
119 |
+
entryRef_ = rhs.entryRef_;
|
120 |
+
return *this;
|
121 |
+
}
|
122 |
+
DictIterator& operator=(DictIterator&& rhs) noexcept {
|
123 |
+
entryRef_ = std::move(rhs.entryRef_);
|
124 |
+
return *this;
|
125 |
+
}
|
126 |
+
|
127 |
+
DictIterator& operator++() {
|
128 |
+
++entryRef_.iterator_;
|
129 |
+
return *this;
|
130 |
+
}
|
131 |
+
|
132 |
+
DictIterator operator++(int) {
|
133 |
+
DictIterator copy(*this);
|
134 |
+
++*this;
|
135 |
+
return copy;
|
136 |
+
}
|
137 |
+
|
138 |
+
const DictEntryRef<Key, Value, Iterator>& operator*() const {
|
139 |
+
return entryRef_;
|
140 |
+
}
|
141 |
+
|
142 |
+
const DictEntryRef<Key, Value, Iterator>* operator->() const {
|
143 |
+
return &entryRef_;
|
144 |
+
}
|
145 |
+
|
146 |
+
friend difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) {
|
147 |
+
return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_;
|
148 |
+
}
|
149 |
+
|
150 |
+
private:
|
151 |
+
explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {}
|
152 |
+
|
153 |
+
const Iterator& get_iterator_() const {
|
154 |
+
return entryRef_.iterator_;
|
155 |
+
}
|
156 |
+
|
157 |
+
friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) {
|
158 |
+
return lhs.get_iterator_() == rhs.get_iterator_();
|
159 |
+
}
|
160 |
+
|
161 |
+
friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) {
|
162 |
+
return lhs.get_iterator_() != rhs.get_iterator_();
|
163 |
+
}
|
164 |
+
|
165 |
+
friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) {
|
166 |
+
return lhs.get_iterator_() < rhs.get_iterator_();
|
167 |
+
}
|
168 |
+
|
169 |
+
friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) {
|
170 |
+
return lhs.get_iterator_() <= rhs.get_iterator_();
|
171 |
+
}
|
172 |
+
|
173 |
+
friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) {
|
174 |
+
return lhs.get_iterator_() > rhs.get_iterator_();
|
175 |
+
}
|
176 |
+
|
177 |
+
friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) {
|
178 |
+
return lhs.get_iterator_() >= rhs.get_iterator_();
|
179 |
+
}
|
180 |
+
|
181 |
+
DictEntryRef<Key, Value, Iterator> entryRef_;
|
182 |
+
|
183 |
+
friend class DictIterator<Key, Value, typename c10::detail::DictImpl::dict_map_type::iterator>;
|
184 |
+
friend class Dict<Key, Value>;
|
185 |
+
};
|
186 |
+
|
187 |
+
template<class Key, class Value> Dict<Key, Value> toTypedDict(Dict<IValue, IValue> dict);
|
188 |
+
template<class Key, class Value> Dict<IValue, IValue> toGenericDict(Dict<Key, Value> dict);
|
189 |
+
}
|
190 |
+
|
191 |
+
/**
|
192 |
+
* An object of this class stores a map from Key to Value.
|
193 |
+
*
|
194 |
+
* This is a pointer type. After a copy, both Dicts
|
195 |
+
* will share the same storage:
|
196 |
+
*
|
197 |
+
* > Dict<int, string> a;
|
198 |
+
* > Dict<int, string> b = a;
|
199 |
+
* > b.insert(3, "three");
|
200 |
+
* > ASSERT("three" == a.at(3));
|
201 |
+
*
|
202 |
+
* We use this class in the PyTorch kernel API because that
|
203 |
+
* allows us to do optimizations and switch out the underlying
|
204 |
+
* map implementation without breaking backwards compatibility
|
205 |
+
* for the kernel API.
|
206 |
+
*/
|
207 |
+
template<class Key, class Value>
|
208 |
+
class Dict final {
|
209 |
+
private:
|
210 |
+
static_assert((std::is_same<IValue, Key>::value && std::is_same<IValue, Value>::value) || guts::typelist::contains<impl::valid_dict_key_types, Key>::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string.");
|
211 |
+
|
212 |
+
// impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map.
|
213 |
+
// We intentionally don't offer conversion from/to
|
214 |
+
// order_preserving_flat_hash_map, return references to it or something like that,
|
215 |
+
// because such operations would get expensive if we switch out
|
216 |
+
// the actual map implementation.
|
217 |
+
// This is an intrusive_ptr because Dict is a pointer type.
|
218 |
+
// Invariant: This will never be a nullptr, there will always be a valid
|
219 |
+
// DictImpl.
|
220 |
+
c10::intrusive_ptr<detail::DictImpl> impl_;
|
221 |
+
|
222 |
+
explicit Dict(c10::intrusive_ptr<detail::DictImpl>&& impl);
|
223 |
+
friend struct IValue;
|
224 |
+
template<class K, class V> friend Dict<K, V> impl::toTypedDict(Dict<IValue, IValue>);
|
225 |
+
template<class K, class V> friend Dict<IValue, IValue> impl::toGenericDict(Dict<K, V>);
|
226 |
+
|
227 |
+
public:
|
228 |
+
using key_type = Key;
|
229 |
+
using mapped_type = Value;
|
230 |
+
using size_type = typename detail::DictImpl::dict_map_type::size_type;
|
231 |
+
using iterator = impl::DictIterator<Key, Value, typename detail::DictImpl::dict_map_type::iterator>;
|
232 |
+
|
233 |
+
/**
|
234 |
+
* Creates an empty dict.
|
235 |
+
*/
|
236 |
+
explicit Dict();
|
237 |
+
|
238 |
+
/**
|
239 |
+
* Create a generic dict with runtime type information.
|
240 |
+
* This only works for c10::impl::GenericDict and is not part of the public API
|
241 |
+
* but only supposed to be used internally by PyTorch.
|
242 |
+
*/
|
243 |
+
explicit Dict(TypePtr keyType, TypePtr valueType);
|
244 |
+
|
245 |
+
~Dict() = default;
|
246 |
+
|
247 |
+
Dict(const Dict&) = default;
|
248 |
+
Dict& operator=(const Dict&) = default;
|
249 |
+
|
250 |
+
/**
|
251 |
+
* Create a new Dict pointing to a deep copy of the same data.
|
252 |
+
* The Dict returned is a new dict with separate storage.
|
253 |
+
* Changes in it are not reflected in the original dict or vice versa.
|
254 |
+
*/
|
255 |
+
Dict copy() const;
|
256 |
+
|
257 |
+
/**
|
258 |
+
* Returns an iterator to the first element of the container.
|
259 |
+
* If the container is empty, the returned iterator will be equal to end().
|
260 |
+
*/
|
261 |
+
iterator begin() const;
|
262 |
+
|
263 |
+
/**
|
264 |
+
* Returns an iterator to the element following the last element of the container.
|
265 |
+
* This element acts as a placeholder; attempting to access it results in undefined behavior.
|
266 |
+
*/
|
267 |
+
iterator end() const;
|
268 |
+
|
269 |
+
/**
|
270 |
+
* Checks if the container has no elements.
|
271 |
+
*/
|
272 |
+
bool empty() const;
|
273 |
+
|
274 |
+
/**
|
275 |
+
* Returns the number of elements in the container.
|
276 |
+
*/
|
277 |
+
size_type size() const;
|
278 |
+
|
279 |
+
/**
|
280 |
+
* Erases all elements from the container. After this call, size() returns zero.
|
281 |
+
* Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators.
|
282 |
+
*/
|
283 |
+
void clear() const;
|
284 |
+
|
285 |
+
/**
|
286 |
+
* Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key.
|
287 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
288 |
+
*
|
289 |
+
* @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place.
|
290 |
+
*/
|
291 |
+
template<class Key_, class Value_>
|
292 |
+
std::pair<iterator, bool> insert(Key_&& key, Value_&& value) const;
|
293 |
+
|
294 |
+
/**
|
295 |
+
* If an element with the given key already exists, it is overwritten with the given value.
|
296 |
+
* Otherwise, a new element with the given key and value are inserted.
|
297 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
298 |
+
*
|
299 |
+
* @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated.
|
300 |
+
*/
|
301 |
+
template<class Key_, class Value_>
|
302 |
+
std::pair<iterator, bool> insert_or_assign(Key_&& key, Value_&& value) const;
|
303 |
+
|
304 |
+
/**
|
305 |
+
* Removes the element pointed to by iter.
|
306 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
307 |
+
* The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter.
|
308 |
+
*/
|
309 |
+
void erase(iterator iter) const;
|
310 |
+
|
311 |
+
/**
|
312 |
+
* Removes the element with the given key, if it exists.
|
313 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
314 |
+
*
|
315 |
+
* @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
|
316 |
+
*/
|
317 |
+
C10_NODISCARD size_t erase(const Key& key) const;
|
318 |
+
|
319 |
+
/**
|
320 |
+
* Returns the mapped value of the element with key equivalent to key.
|
321 |
+
* If no such element exists, an exception of type std::out_of_range is thrown.
|
322 |
+
*/
|
323 |
+
Value at(const Key& key) const;
|
324 |
+
|
325 |
+
/**
|
326 |
+
* Finds an element with key equivalent to key.
|
327 |
+
*
|
328 |
+
* @return Iterator to an element with key equivalent to key.
|
329 |
+
* If no such element is found, past-the-end (see end()) iterator is returned.
|
330 |
+
*/
|
331 |
+
iterator find(const Key& key) const;
|
332 |
+
|
333 |
+
/**
|
334 |
+
* Checks if there is an element with key equivalent to key in the container.
|
335 |
+
*
|
336 |
+
* @return true if there is such an element, otherwise false.
|
337 |
+
*/
|
338 |
+
bool contains(const Key& key) const;
|
339 |
+
|
340 |
+
/**
|
341 |
+
* Increase the capacity so that at least count elements can be stored without
|
342 |
+
* having to reallocate or rehash.
|
343 |
+
*/
|
344 |
+
void reserve(size_type count) const;
|
345 |
+
|
346 |
+
/**
|
347 |
+
* Value equality comparison. This function implements Python-like semantics for
|
348 |
+
* equality: two dicts with the same identity (e.g. same pointer) trivially
|
349 |
+
* compare equal, otherwise each element is compared for equality.
|
350 |
+
*/
|
351 |
+
template <class Key_, class Value_>
|
352 |
+
friend bool operator==(
|
353 |
+
const Dict<Key_, Value_>& lhs,
|
354 |
+
const Dict<Key_, Value_>& rhs);
|
355 |
+
template <class Key_, class Value_>
|
356 |
+
friend bool operator!=(
|
357 |
+
const Dict<Key_, Value_>& lhs,
|
358 |
+
const Dict<Key_, Value_>& rhs);
|
359 |
+
|
360 |
+
/**
|
361 |
+
* Identity comparison. Returns true if and only if `rhs` represents the same
|
362 |
+
* Dict object as `this`.
|
363 |
+
*/
|
364 |
+
bool is(const Dict& rhs) const;
|
365 |
+
|
366 |
+
// private API for now because the return type will change to TypePtr
|
367 |
+
// instead of optional<TypePtr> once types are mandatory.
|
368 |
+
TypePtr keyType() const;
|
369 |
+
TypePtr valueType() const;
|
370 |
+
|
371 |
+
// [unsafe set type]
|
372 |
+
// These functions mutate the tagged type of this dictionary in place.
|
373 |
+
// There is no checking that the members of the dictionary are instances
|
374 |
+
// of the new types, nor is there a check that other IValues which
|
375 |
+
// hold references to this dictionary have the right static type.
|
376 |
+
// This functionality is used only in the unpickler, where at
|
377 |
+
// creation type the real type of the dictionary is unknown, but
|
378 |
+
// then later recovered from the static type information of the
|
379 |
+
// unpickled object.
|
380 |
+
void unsafeSetKeyType(TypePtr t);
|
381 |
+
void unsafeSetValueType(TypePtr t);
|
382 |
+
};
|
383 |
+
|
384 |
+
namespace impl {
|
385 |
+
// GenericDict is how IValue stores dicts. It is, however, not part of the
|
386 |
+
// public API. Kernels should use Dicts with concrete Key, Value types instead
|
387 |
+
// (maybe except for some internal prim ops).
|
388 |
+
using GenericDict = Dict<IValue, IValue>;
|
389 |
+
|
390 |
+
}
|
391 |
+
}
|
392 |
+
|
393 |
+
namespace torch {
|
394 |
+
template<class Key, class Value> using Dict = c10::Dict<Key, Value>;
|
395 |
+
}
|
396 |
+
|
397 |
+
#include <ATen/core/Dict_inl.h> // IWYU pragma: keep
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <c10/util/hash.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
namespace detail {
|
8 |
+
inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const {
|
9 |
+
if (lhs.isTensor() && rhs.isTensor()) {
|
10 |
+
// for tensors, we compare only by identity (following how it's done in Python).
|
11 |
+
return lhs.is(rhs);
|
12 |
+
}
|
13 |
+
// Otherwise, we first compare by identity for efficiency, then by value (see:
|
14 |
+
// [container equality])
|
15 |
+
return _fastEqualsForContainer(lhs, rhs);
|
16 |
+
}
|
17 |
+
}
|
18 |
+
|
19 |
+
template<class T> decltype(auto) getTypePtr();
|
20 |
+
std::string toString(const Type& type);
|
21 |
+
|
22 |
+
namespace impl {
|
23 |
+
|
24 |
+
template<class Key, class Value>
|
25 |
+
Dict<Key, Value> toTypedDict(GenericDict dict) {
|
26 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Key>() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Key types mismatch.");
|
27 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Value>() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Value types mismatch.");
|
28 |
+
|
29 |
+
return Dict<Key, Value>(std::move(dict.impl_));
|
30 |
+
}
|
31 |
+
|
32 |
+
template<class Key, class Value>
|
33 |
+
GenericDict toGenericDict(Dict<Key, Value> dict) {
|
34 |
+
return GenericDict(std::move(dict.impl_));
|
35 |
+
}
|
36 |
+
}
|
37 |
+
|
38 |
+
namespace detail {
|
39 |
+
|
40 |
+
inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
|
41 |
+
if (ivalue.isInt()) {
|
42 |
+
return std::hash<int64_t>()(ivalue.toInt());
|
43 |
+
} else if (ivalue.isString()) {
|
44 |
+
return std::hash<c10::string_view>()(ivalue.toStringView());
|
45 |
+
} else if (ivalue.isDouble()) {
|
46 |
+
return std::hash<double>()(ivalue.toDouble());
|
47 |
+
} else if (ivalue.isComplexDouble()) {
|
48 |
+
return c10::hash<c10::complex<double>>()(ivalue.toComplexDouble());
|
49 |
+
} else if (ivalue.isBool()) {
|
50 |
+
return std::hash<bool>()(ivalue.toBool());
|
51 |
+
} else if (ivalue.isTensor()) {
|
52 |
+
return std::hash<TensorImpl*>()(ivalue.toTensor().unsafeGetTensorImpl());
|
53 |
+
} else if (ivalue.isDevice()) {
|
54 |
+
return std::hash<Device>()(ivalue.toDevice());
|
55 |
+
} else {
|
56 |
+
throw std::runtime_error(
|
57 |
+
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
|
58 |
+
}
|
59 |
+
}
|
60 |
+
|
61 |
+
inline intrusive_ptr<DictImpl> DictImpl::copy() const {
|
62 |
+
return make_intrusive<DictImpl>(dict, elementTypes);
|
63 |
+
}
|
64 |
+
|
65 |
+
}
|
66 |
+
|
67 |
+
template<class Key, class Value>
|
68 |
+
Dict<Key, Value>::Dict()
|
69 |
+
:Dict(make_intrusive<detail::DictImpl>(
|
70 |
+
detail::DictImpl::dict_map_type(),
|
71 |
+
detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
|
72 |
+
static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
73 |
+
static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
74 |
+
}
|
75 |
+
|
76 |
+
template<class Key, class Value>
|
77 |
+
Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
|
78 |
+
: Dict(make_intrusive<detail::DictImpl>(
|
79 |
+
detail::DictImpl::dict_map_type(),
|
80 |
+
detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
|
81 |
+
static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
82 |
+
static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
83 |
+
}
|
84 |
+
|
85 |
+
template<class Key, class Value>
|
86 |
+
Dict<Key, Value>::Dict(c10::intrusive_ptr<detail::DictImpl>&& impl): impl_(std::move(impl)) {}
|
87 |
+
|
88 |
+
template<class Key, class Value>
|
89 |
+
Dict<Key, Value> Dict<Key, Value>::copy() const {
|
90 |
+
return Dict<Key, Value>(impl_->copy());
|
91 |
+
}
|
92 |
+
|
93 |
+
template<class Key, class Value>
|
94 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::begin() const {
|
95 |
+
return iterator{impl_->dict.begin()};
|
96 |
+
}
|
97 |
+
|
98 |
+
template<class Key, class Value>
|
99 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::end() const {
|
100 |
+
return iterator{impl_->dict.end()};
|
101 |
+
}
|
102 |
+
|
103 |
+
template<class Key, class Value>
|
104 |
+
bool Dict<Key, Value>::empty() const {
|
105 |
+
return impl_->dict.empty();
|
106 |
+
}
|
107 |
+
|
108 |
+
template<class Key, class Value>
|
109 |
+
typename Dict<Key, Value>::size_type Dict<Key, Value>::size() const {
|
110 |
+
return impl_->dict.size();
|
111 |
+
}
|
112 |
+
|
113 |
+
template<class Key, class Value>
|
114 |
+
void Dict<Key, Value>::clear() const {
|
115 |
+
impl_->dict.clear();
|
116 |
+
}
|
117 |
+
|
118 |
+
template<class Key, class Value>
|
119 |
+
template<class Key_, class Value_>
|
120 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
|
121 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
|
122 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
|
123 |
+
auto inserted = impl_->dict.emplace(
|
124 |
+
Key(std::forward<Key_>(key)),
|
125 |
+
Value(std::forward<Value_>(value)));
|
126 |
+
return {iterator{inserted.first}, inserted.second};
|
127 |
+
}
|
128 |
+
|
129 |
+
template<class Key, class Value>
|
130 |
+
template<class Key_, class Value_>
|
131 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
|
132 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
|
133 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
|
134 |
+
auto inserted = impl_->dict.insert_or_assign(
|
135 |
+
Key(std::forward<Key_>(key)),
|
136 |
+
Value(std::forward<Value_>(value)));
|
137 |
+
return {iterator{inserted.first}, inserted.second};
|
138 |
+
}
|
139 |
+
|
140 |
+
template<class Key, class Value>
|
141 |
+
void Dict<Key, Value>::erase(iterator iter) const {
|
142 |
+
impl_->dict.erase(iter.entryRef_.iterator_);
|
143 |
+
}
|
144 |
+
|
145 |
+
template<class Key, class Value>
|
146 |
+
C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
|
147 |
+
return impl_->dict.erase(key);
|
148 |
+
}
|
149 |
+
|
150 |
+
template<class Key, class Value>
|
151 |
+
Value Dict<Key, Value>::at(const Key& key) const {
|
152 |
+
return impl_->dict.at(key).template to<Value>();
|
153 |
+
}
|
154 |
+
|
155 |
+
template<class Key, class Value>
|
156 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::find(const Key& key) const {
|
157 |
+
return iterator{impl_->dict.find(key)};
|
158 |
+
}
|
159 |
+
|
160 |
+
template<class Key, class Value>
|
161 |
+
bool Dict<Key, Value>::contains(const Key& key) const {
|
162 |
+
return end() != find(key);
|
163 |
+
}
|
164 |
+
|
165 |
+
template<class Key, class Value>
|
166 |
+
void Dict<Key, Value>::reserve(size_type count) const {
|
167 |
+
impl_->dict.reserve(count);
|
168 |
+
}
|
169 |
+
|
170 |
+
template<class Key, class Value>
|
171 |
+
TypePtr Dict<Key, Value>::keyType() const {
|
172 |
+
return impl_->elementTypes.keyType;
|
173 |
+
}
|
174 |
+
|
175 |
+
template<class Key, class Value>
|
176 |
+
TypePtr Dict<Key, Value>::valueType() const {
|
177 |
+
return impl_->elementTypes.valueType;
|
178 |
+
}
|
179 |
+
template <class Key, class Value>
|
180 |
+
void Dict<Key, Value>::unsafeSetKeyType(TypePtr t) {
|
181 |
+
impl_->elementTypes.keyType = std::move(t);
|
182 |
+
}
|
183 |
+
|
184 |
+
template <class Key, class Value>
|
185 |
+
void Dict<Key, Value>::unsafeSetValueType(TypePtr t) {
|
186 |
+
impl_->elementTypes.valueType = std::move(t);
|
187 |
+
}
|
188 |
+
|
189 |
+
template <class Key_, class Value_>
|
190 |
+
bool operator==(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
191 |
+
// Dicts with the same identity trivially compare equal.
|
192 |
+
if (lhs.impl_ == rhs.impl_) {
|
193 |
+
return true;
|
194 |
+
}
|
195 |
+
|
196 |
+
// Otherwise compare the values
|
197 |
+
return *lhs.impl_ == *rhs.impl_;
|
198 |
+
}
|
199 |
+
|
200 |
+
template <class Key_, class Value_>
|
201 |
+
bool operator!=(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
202 |
+
return !(lhs == rhs);
|
203 |
+
}
|
204 |
+
|
205 |
+
template <class Key, class Value>
|
206 |
+
bool Dict<Key, Value>::is(const Dict& rhs) const {
|
207 |
+
return this->impl_ == rhs.impl_;
|
208 |
+
}
|
209 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/DimVector.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
|
6 |
+
// Re-declaring 'DimVector' type and size inside 'at' namespace.
|
7 |
+
// This is done to avoid modifying every use into their 'c10'
|
8 |
+
// equivalent.
|
9 |
+
|
10 |
+
using c10::kDimVectorStaticSize;
|
11 |
+
using c10::DimVector;
|
12 |
+
|
13 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/symbol.h>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
#include <c10/util/Optional.h>
|
6 |
+
#include <ostream>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
enum class NameType: uint8_t { BASIC, WILDCARD };
|
11 |
+
|
12 |
+
struct TORCH_API Dimname {
|
13 |
+
static Dimname fromSymbol(Symbol name);
|
14 |
+
static Dimname wildcard();
|
15 |
+
static bool isValidName(const std::string& name);
|
16 |
+
|
17 |
+
NameType type() const { return type_; }
|
18 |
+
Symbol symbol() const { return name_; }
|
19 |
+
|
20 |
+
bool isBasic() const { return type_ == NameType::BASIC; }
|
21 |
+
bool isWildcard() const { return type_ == NameType::WILDCARD; }
|
22 |
+
|
23 |
+
bool matches(Dimname other) const;
|
24 |
+
c10::optional<Dimname> unify(Dimname other) const;
|
25 |
+
|
26 |
+
private:
|
27 |
+
Dimname(Symbol name)
|
28 |
+
: name_(name), type_(NameType::BASIC) {}
|
29 |
+
Dimname(Symbol name, NameType type)
|
30 |
+
: name_(name), type_(type) {}
|
31 |
+
|
32 |
+
Symbol name_;
|
33 |
+
NameType type_;
|
34 |
+
};
|
35 |
+
|
36 |
+
using DimnameList = c10::ArrayRef<Dimname>;
|
37 |
+
|
38 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
|
39 |
+
|
40 |
+
inline bool operator==(const Dimname& lhs, const Dimname& rhs) {
|
41 |
+
return lhs.symbol() == rhs.symbol();
|
42 |
+
}
|
43 |
+
|
44 |
+
inline bool operator!=(const Dimname& lhs, const Dimname& rhs) {
|
45 |
+
return !(lhs == rhs);
|
46 |
+
}
|
47 |
+
|
48 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <mutex>
|
4 |
+
#include <deque>
|
5 |
+
#include <atomic>
|
6 |
+
#include <typeinfo>
|
7 |
+
#include <utility>
|
8 |
+
#include <cstddef>
|
9 |
+
#include <cstdint>
|
10 |
+
|
11 |
+
#include <c10/util/Exception.h>
|
12 |
+
#include <c10/util/intrusive_ptr.h>
|
13 |
+
#include <c10/core/Device.h>
|
14 |
+
#include <c10/core/DispatchKeySet.h>
|
15 |
+
|
16 |
+
// For the record I don't think this is a correct pimpl idiom.
|
17 |
+
// Including Impl header in interface header defeats the purpose
|
18 |
+
// because you can't change Impl private members without forcing
|
19 |
+
// everything that included the interface to rebuild.
|
20 |
+
// Impl should be forward-declared in the interface header instead.
|
21 |
+
#include <c10/core/GeneratorImpl.h>
|
22 |
+
|
23 |
+
/**
|
24 |
+
* Note [Generator]
|
25 |
+
* ~~~~~~~~~~~~~~~~
|
26 |
+
* A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to
|
27 |
+
* generate a seemingly random sequence of numbers, that may be later be used in creating
|
28 |
+
* a random distribution. Such an engine almost always maintains a state and requires a
|
29 |
+
* seed to start off the creation of random numbers. Often times, users have
|
30 |
+
* found it beneficial to be able to explicitly create, retain, and destroy
|
31 |
+
* PRNG states and also be able to have control over the seed value.
|
32 |
+
*
|
33 |
+
* A Generator in ATen gives users the ability to read, write and modify a PRNG engine.
|
34 |
+
* For instance, it does so by letting users seed a PRNG engine, fork the state of the
|
35 |
+
* engine, etc.
|
36 |
+
*
|
37 |
+
* By default, there is one generator per device, and a device's generator is
|
38 |
+
* lazily created. A user can use the torch.Generator() api to create their own generator.
|
39 |
+
*/
|
40 |
+
|
41 |
+
/**
|
42 |
+
* Note [Acquire lock when using random generators]
|
43 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
44 |
+
* Generator and its derived classes are NOT thread-safe. Please note that most of the
|
45 |
+
* places where we have inserted locking for generators are historically based, and we
|
46 |
+
* haven't actually checked that everything is truly thread safe (and it probably isn't).
|
47 |
+
* Please use the public mutex_ when using any methods from these classes, except for the
|
48 |
+
* read-only methods. You can learn about the usage by looking into the unittests
|
49 |
+
* (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard.
|
50 |
+
*
|
51 |
+
* TODO: Look into changing the threading semantics of Generators in ATen (e.g., making
|
52 |
+
* them non-thread safe and instead making the generator state splittable, to accommodate
|
53 |
+
* forks into other threads).
|
54 |
+
*/
|
55 |
+
|
56 |
+
namespace at {
|
57 |
+
|
58 |
+
class Tensor;
|
59 |
+
|
60 |
+
struct TORCH_API Generator {
|
61 |
+
Generator() = default;
|
62 |
+
|
63 |
+
explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
|
64 |
+
: impl_(std::move(gen_impl)) {
|
65 |
+
if (impl_.get() == nullptr) {
|
66 |
+
throw std::runtime_error("GeneratorImpl with nullptr is not supported");
|
67 |
+
}
|
68 |
+
}
|
69 |
+
|
70 |
+
bool operator==(const Generator& rhs) const {
|
71 |
+
return this->impl_ == rhs.impl_;
|
72 |
+
}
|
73 |
+
|
74 |
+
bool operator!=(const Generator& rhs) const {
|
75 |
+
return !((*this) == rhs);
|
76 |
+
}
|
77 |
+
|
78 |
+
bool defined() const {
|
79 |
+
return static_cast<bool>(impl_);
|
80 |
+
}
|
81 |
+
|
82 |
+
c10::GeneratorImpl* unsafeGetGeneratorImpl() const {
|
83 |
+
return impl_.get();
|
84 |
+
}
|
85 |
+
|
86 |
+
c10::GeneratorImpl* unsafeReleaseGeneratorImpl() {
|
87 |
+
return impl_.release();
|
88 |
+
}
|
89 |
+
|
90 |
+
const c10::intrusive_ptr<c10::GeneratorImpl>& getIntrusivePtr() const {
|
91 |
+
return impl_;
|
92 |
+
}
|
93 |
+
|
94 |
+
void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); }
|
95 |
+
// Sets the offset of Generator state to the desired offset. This is currently
|
96 |
+
// supported for only Philox based Generators, i.e., CUDA and MPS.
|
97 |
+
void set_offset(uint64_t offset) { impl_->set_offset(offset); }
|
98 |
+
|
99 |
+
// Returns the offset of Generator state. This is currently supported for only
|
100 |
+
// Philox based Generators, i.e., CUDA and MPS.
|
101 |
+
uint64_t get_offset() const { return impl_->get_offset(); }
|
102 |
+
|
103 |
+
uint64_t current_seed() const { return impl_->current_seed(); }
|
104 |
+
|
105 |
+
uint64_t seed() { return impl_->seed(); }
|
106 |
+
|
107 |
+
// Implementation not inlined to prevent cycle reference between
|
108 |
+
// `ATen/core/Generator.h` and `ATen/core/Tensor.h`
|
109 |
+
void set_state(const at::Tensor& new_state);
|
110 |
+
|
111 |
+
at::Tensor get_state() const;
|
112 |
+
|
113 |
+
std::mutex& mutex() {
|
114 |
+
return impl_->mutex_;
|
115 |
+
}
|
116 |
+
|
117 |
+
DispatchKeySet key_set() const {
|
118 |
+
return impl_->key_set();
|
119 |
+
}
|
120 |
+
|
121 |
+
Device device() const { return impl_->device(); }
|
122 |
+
|
123 |
+
inline void set_pyobj(PyObject* pyobj) const noexcept {
|
124 |
+
impl_->set_pyobj(pyobj);
|
125 |
+
}
|
126 |
+
|
127 |
+
inline PyObject* pyobj() const noexcept {
|
128 |
+
return impl_->pyobj();
|
129 |
+
}
|
130 |
+
|
131 |
+
template<typename T>
|
132 |
+
T* get() const { return static_cast<T*>(impl_.get()); }
|
133 |
+
|
134 |
+
Generator clone() const {
|
135 |
+
return Generator(impl_->clone());
|
136 |
+
}
|
137 |
+
|
138 |
+
private:
|
139 |
+
c10::intrusive_ptr<c10::GeneratorImpl> impl_;
|
140 |
+
};
|
141 |
+
|
142 |
+
template<class Impl, class... Args>
|
143 |
+
Generator make_generator(Args&&... args) {
|
144 |
+
return Generator(c10::make_intrusive<Impl>(std::forward<Args>(args)...));
|
145 |
+
}
|
146 |
+
|
147 |
+
/**
|
148 |
+
* Utility function to static cast input Generator* to
|
149 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
150 |
+
*/
|
151 |
+
template <typename T>
|
152 |
+
static inline T * check_generator(c10::optional<Generator> gen) {
|
153 |
+
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
|
154 |
+
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
|
155 |
+
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
|
156 |
+
return gen->get<T>();
|
157 |
+
}
|
158 |
+
|
159 |
+
/**
|
160 |
+
* Utility function used in tensor implementations, which
|
161 |
+
* supplies the default generator to tensors, if an input generator
|
162 |
+
* is not supplied. The input Generator* is also static casted to
|
163 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
164 |
+
*/
|
165 |
+
template <typename T>
|
166 |
+
static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) {
|
167 |
+
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
|
168 |
+
}
|
169 |
+
|
170 |
+
namespace detail {
|
171 |
+
|
172 |
+
/**
|
173 |
+
* Helper function for checking the validity of new random generator
|
174 |
+
* state. Right now following conditions are checked:
|
175 |
+
*
|
176 |
+
* - The new state tensor must be a torch.ByteTensor
|
177 |
+
* - Data of the new state tensor must be contiguous
|
178 |
+
*/
|
179 |
+
static inline void check_rng_state(const c10::TensorImpl& new_state) {
|
180 |
+
TORCH_CHECK_TYPE(
|
181 |
+
new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte,
|
182 |
+
"RNG state must be a torch.ByteTensor"
|
183 |
+
);
|
184 |
+
|
185 |
+
TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous");
|
186 |
+
}
|
187 |
+
|
188 |
+
} // namespace detail
|
189 |
+
|
190 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/List.h>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
class Tensor;
|
8 |
+
class OptionalTensorRef;
|
9 |
+
}
|
10 |
+
|
11 |
+
namespace c10 {
|
12 |
+
namespace detail {
|
13 |
+
|
14 |
+
/*
|
15 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
16 |
+
* implementation for `IListRefTag::Unboxed`.
|
17 |
+
*/
|
18 |
+
template <typename T, typename ListElemT>
|
19 |
+
class IListRefTagImplBase<IListRefTag::Unboxed, T, ListElemT> {
|
20 |
+
public:
|
21 |
+
using elem_type = ListElemT;
|
22 |
+
using list_type = ArrayRef<elem_type>;
|
23 |
+
|
24 |
+
/*
|
25 |
+
* These `unwrap` static methods unwraps the inner containers out
|
26 |
+
* of `IListRef<T>` (and `IListRefIterator<T>`). They are required when
|
27 |
+
* the macro `TORCH_ILISTREF_UNWRAP` is called.
|
28 |
+
*/
|
29 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
30 |
+
return ilist.payload_.unboxed;
|
31 |
+
}
|
32 |
+
|
33 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
34 |
+
return it.payload_.unboxed_iterator;
|
35 |
+
}
|
36 |
+
|
37 |
+
static const typename list_type::const_iterator& unwrap(
|
38 |
+
const IListRefIterator<T>& it) {
|
39 |
+
return it.payload_.unboxed_iterator;
|
40 |
+
}
|
41 |
+
|
42 |
+
/*
|
43 |
+
* We have these function (besides the `unwrap`s above) because the
|
44 |
+
* implementation for both `IListRef::operator[]` and `IListRefIterator::operator*`
|
45 |
+
* weren't syntatically equal for the existing tags at the time
|
46 |
+
* (`Unboxed` and `Boxed`).
|
47 |
+
*/
|
48 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
49 |
+
return lst.front();
|
50 |
+
}
|
51 |
+
|
52 |
+
static IListRefConstRef<T> iterator_get(
|
53 |
+
const typename list_type::const_iterator& it) {
|
54 |
+
return *it;
|
55 |
+
}
|
56 |
+
};
|
57 |
+
|
58 |
+
/*
|
59 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
60 |
+
* implementation for `IListRefTag::Boxed`.
|
61 |
+
*/
|
62 |
+
template <typename T, typename ListElemT>
|
63 |
+
class IListRefTagImplBase<IListRefTag::Boxed, T, ListElemT> {
|
64 |
+
public:
|
65 |
+
using elem_type = ListElemT;
|
66 |
+
using list_type = List<elem_type>;
|
67 |
+
|
68 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
69 |
+
return *ilist.payload_.boxed;
|
70 |
+
}
|
71 |
+
|
72 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
73 |
+
return it.payload_.boxed_iterator;
|
74 |
+
}
|
75 |
+
|
76 |
+
static const typename list_type::const_iterator& unwrap(
|
77 |
+
const IListRefIterator<T>& it) {
|
78 |
+
return it.payload_.boxed_iterator;
|
79 |
+
}
|
80 |
+
|
81 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
82 |
+
return lst[0];
|
83 |
+
}
|
84 |
+
|
85 |
+
static IListRefConstRef<T> iterator_get(
|
86 |
+
const typename list_type::const_iterator& it) {
|
87 |
+
return (*it).get().toTensor();
|
88 |
+
}
|
89 |
+
};
|
90 |
+
|
91 |
+
/*
|
92 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
93 |
+
* implementation for `IListRefTag::Materialized`.
|
94 |
+
*/
|
95 |
+
template <typename T>
|
96 |
+
class IListRefTagImplBase<IListRefTag::Materialized, T, MaterializedIListRefElem<T>> {
|
97 |
+
public:
|
98 |
+
using elem_type = MaterializedIListRefElem<T>;
|
99 |
+
using list_type = MaterializedIListRef<T>;
|
100 |
+
|
101 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
102 |
+
return *ilist.payload_.materialized;
|
103 |
+
}
|
104 |
+
|
105 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
106 |
+
return it.payload_.materialized_iterator;
|
107 |
+
}
|
108 |
+
|
109 |
+
static const typename list_type::const_iterator& unwrap(
|
110 |
+
const IListRefIterator<T>& it) {
|
111 |
+
return it.payload_.materialized_iterator;
|
112 |
+
}
|
113 |
+
|
114 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
115 |
+
return lst[0];
|
116 |
+
}
|
117 |
+
|
118 |
+
static IListRefConstRef<T> iterator_get(
|
119 |
+
const typename list_type::const_iterator& it) {
|
120 |
+
return *it;
|
121 |
+
}
|
122 |
+
};
|
123 |
+
|
124 |
+
/*
|
125 |
+
* [Note: ITensorListRef]
|
126 |
+
* Specializations necessary for `IListRef<at::Tensor>` type.
|
127 |
+
*
|
128 |
+
* Since the default implementations are usually done with supporting
|
129 |
+
* `Tensor` in mind, we only have to inherit from the base implementations.
|
130 |
+
*/
|
131 |
+
template <>
|
132 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::Tensor>
|
133 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::Tensor> {};
|
134 |
+
|
135 |
+
template <>
|
136 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::Tensor>
|
137 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::Tensor> {};
|
138 |
+
|
139 |
+
template <>
|
140 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::Tensor>
|
141 |
+
: public IListRefTagImplBase<
|
142 |
+
IListRefTag::Materialized,
|
143 |
+
at::Tensor,
|
144 |
+
MaterializedIListRefElem<at::Tensor>> {};
|
145 |
+
|
146 |
+
/*
|
147 |
+
* [Note: IOptTensorListRef]
|
148 |
+
* Specializations necessary for `IListRef<at::OptionalTensorRef>` type.
|
149 |
+
*
|
150 |
+
* We can't get an `at::OptionalTensorRef` directly from an instance of
|
151 |
+
* `List<optional<Tensor>>` (the type that corresponds to the boxed world).
|
152 |
+
*
|
153 |
+
* So, the default implementation won't help us. Thus, we have to implement
|
154 |
+
* this method ourselves.
|
155 |
+
*/
|
156 |
+
template <>
|
157 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::OptionalTensorRef>
|
158 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::OptionalTensorRef> {};
|
159 |
+
|
160 |
+
template <>
|
161 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
|
162 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::OptionalTensorRef, optional<at::Tensor>> {
|
163 |
+
|
164 |
+
public:
|
165 |
+
/*
|
166 |
+
* Given an instance of the types corresponding to the `Boxed` tag, we override
|
167 |
+
* the default implementation, so that we can return a `at::OptionalTensorRef`.
|
168 |
+
*/
|
169 |
+
static IListRefConstRef<at::OptionalTensorRef> iterator_get(
|
170 |
+
const typename list_type::const_iterator& it) {
|
171 |
+
const auto& ivalue = (*it).get();
|
172 |
+
if (!ivalue.isNone()) {
|
173 |
+
const auto& tensor = ivalue.toTensor();
|
174 |
+
return (tensor.defined()) ? tensor : at::OptionalTensorRef{};
|
175 |
+
}
|
176 |
+
return {};
|
177 |
+
}
|
178 |
+
};
|
179 |
+
|
180 |
+
template <>
|
181 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::OptionalTensorRef>
|
182 |
+
: public IListRefTagImplBase<
|
183 |
+
IListRefTag::Materialized,
|
184 |
+
at::OptionalTensorRef,
|
185 |
+
MaterializedIListRefElem<at::OptionalTensorRef>> {};
|
186 |
+
|
187 |
+
} // namespace detail
|
188 |
+
} // namespace c10
|
189 |
+
|
190 |
+
namespace at {
|
191 |
+
|
192 |
+
// [Note: ITensorListRef]
|
193 |
+
using ITensorListRef = c10::IListRef<at::Tensor>;
|
194 |
+
using ITensorListRefIterator = c10::IListRefIterator<at::Tensor>;
|
195 |
+
using MaterializedITensorListRef = c10::detail::MaterializedIListRef<at::Tensor>;
|
196 |
+
// [Note: IOptTensorListRef]
|
197 |
+
using IOptTensorListRef = c10::IListRef<at::OptionalTensorRef>;
|
198 |
+
using IOptTensorListRefIterator = c10::IListRefIterator<at::OptionalTensorRef>;
|
199 |
+
using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef<at::OptionalTensorRef>;
|
200 |
+
|
201 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
|
5 |
+
// define constants like M_PI and C keywords for MSVC
|
6 |
+
#ifdef _MSC_VER
|
7 |
+
#ifndef _USE_MATH_DEFINES
|
8 |
+
#define _USE_MATH_DEFINES
|
9 |
+
#endif
|
10 |
+
#include <math.h>
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#include <array>
|
14 |
+
#include <cmath>
|
15 |
+
#include <cstdint>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
|
19 |
+
constexpr int MERSENNE_STATE_N = 624;
|
20 |
+
constexpr int MERSENNE_STATE_M = 397;
|
21 |
+
constexpr uint32_t MATRIX_A = 0x9908b0df;
|
22 |
+
constexpr uint32_t UMASK = 0x80000000;
|
23 |
+
constexpr uint32_t LMASK = 0x7fffffff;
|
24 |
+
|
25 |
+
/**
|
26 |
+
* Note [Mt19937 Engine implementation]
|
27 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
28 |
+
* Originally implemented in:
|
29 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c
|
30 |
+
* and modified with C++ constructs. Moreover the state array of the engine
|
31 |
+
* has been modified to hold 32 bit uints instead of 64 bits.
|
32 |
+
*
|
33 |
+
* Note that we reimplemented mt19937 instead of using std::mt19937 because,
|
34 |
+
* at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2
|
35 |
+
* by default and following are the benchmark numbers (benchmark code can be found at
|
36 |
+
* https://github.com/syed-ahmed/benchmark-rngs):
|
37 |
+
*
|
38 |
+
* with -O2
|
39 |
+
* Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s
|
40 |
+
* Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s
|
41 |
+
* Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s
|
42 |
+
* Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s
|
43 |
+
*
|
44 |
+
* std::mt19937 is faster when used in conjunction with std::uniform_real_distribution,
|
45 |
+
* however we can't use std::uniform_real_distribution because of this bug:
|
46 |
+
* http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used
|
47 |
+
* std::uniform_real_distribution and filtered out the 1's, it is a different algorithm
|
48 |
+
* than what's in pytorch currently and that messes up the tests in tests_distributions.py.
|
49 |
+
* The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower
|
50 |
+
* than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter.
|
51 |
+
*
|
52 |
+
* Copyright notice:
|
53 |
+
* A C-program for MT19937, with initialization improved 2002/2/10.
|
54 |
+
* Coded by Takuji Nishimura and Makoto Matsumoto.
|
55 |
+
* This is a faster version by taking Shawn Cokus's optimization,
|
56 |
+
* Matthe Bellew's simplification, Isaku Wada's real version.
|
57 |
+
*
|
58 |
+
* Before using, initialize the state by using init_genrand(seed)
|
59 |
+
* or init_by_array(init_key, key_length).
|
60 |
+
*
|
61 |
+
* Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
|
62 |
+
* All rights reserved.
|
63 |
+
*
|
64 |
+
* Redistribution and use in source and binary forms, with or without
|
65 |
+
* modification, are permitted provided that the following conditions
|
66 |
+
* are met:
|
67 |
+
*
|
68 |
+
* 1. Redistributions of source code must retain the above copyright
|
69 |
+
* notice, this list of conditions and the following disclaimer.
|
70 |
+
*
|
71 |
+
* 2. Redistributions in binary form must reproduce the above copyright
|
72 |
+
* notice, this list of conditions and the following disclaimer in the
|
73 |
+
* documentation and/or other materials provided with the distribution.
|
74 |
+
*
|
75 |
+
* 3. The names of its contributors may not be used to endorse or promote
|
76 |
+
* products derived from this software without specific prior written
|
77 |
+
* permission.
|
78 |
+
*
|
79 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
80 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
81 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
82 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
83 |
+
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
84 |
+
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
85 |
+
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
86 |
+
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
87 |
+
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
88 |
+
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
89 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
90 |
+
*
|
91 |
+
*
|
92 |
+
* Any feedback is very welcome.
|
93 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
|
94 |
+
* email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
|
95 |
+
*/
|
96 |
+
|
97 |
+
/**
|
98 |
+
* mt19937_data_pod is used to get POD data in and out
|
99 |
+
* of mt19937_engine. Used in torch.get_rng_state and
|
100 |
+
* torch.set_rng_state functions.
|
101 |
+
*/
|
102 |
+
struct mt19937_data_pod {
|
103 |
+
uint64_t seed_;
|
104 |
+
int left_;
|
105 |
+
bool seeded_;
|
106 |
+
uint32_t next_;
|
107 |
+
std::array<uint32_t, MERSENNE_STATE_N> state_;
|
108 |
+
};
|
109 |
+
|
110 |
+
class mt19937_engine {
|
111 |
+
public:
|
112 |
+
|
113 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
114 |
+
inline explicit mt19937_engine(uint64_t seed = 5489) {
|
115 |
+
init_with_uint32(seed);
|
116 |
+
}
|
117 |
+
|
118 |
+
inline mt19937_data_pod data() const {
|
119 |
+
return data_;
|
120 |
+
}
|
121 |
+
|
122 |
+
inline void set_data(const mt19937_data_pod& data) {
|
123 |
+
data_ = data;
|
124 |
+
}
|
125 |
+
|
126 |
+
inline uint64_t seed() const {
|
127 |
+
return data_.seed_;
|
128 |
+
}
|
129 |
+
|
130 |
+
inline bool is_valid() {
|
131 |
+
if ((data_.seeded_ == true)
|
132 |
+
&& (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N)
|
133 |
+
&& (data_.next_ <= MERSENNE_STATE_N)) {
|
134 |
+
return true;
|
135 |
+
}
|
136 |
+
return false;
|
137 |
+
}
|
138 |
+
|
139 |
+
inline uint32_t operator()() {
|
140 |
+
if (--(data_.left_) == 0) {
|
141 |
+
next_state();
|
142 |
+
}
|
143 |
+
uint32_t y = *(data_.state_.data() + data_.next_++);
|
144 |
+
y ^= (y >> 11);
|
145 |
+
y ^= (y << 7) & 0x9d2c5680;
|
146 |
+
y ^= (y << 15) & 0xefc60000;
|
147 |
+
y ^= (y >> 18);
|
148 |
+
|
149 |
+
return y;
|
150 |
+
}
|
151 |
+
|
152 |
+
private:
|
153 |
+
mt19937_data_pod data_;
|
154 |
+
|
155 |
+
inline void init_with_uint32(uint64_t seed) {
|
156 |
+
data_.seed_ = seed;
|
157 |
+
data_.seeded_ = true;
|
158 |
+
data_.state_[0] = seed & 0xffffffff;
|
159 |
+
for (const auto j : c10::irange(1, MERSENNE_STATE_N)) {
|
160 |
+
data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j);
|
161 |
+
}
|
162 |
+
data_.left_ = 1;
|
163 |
+
data_.next_ = 0;
|
164 |
+
}
|
165 |
+
|
166 |
+
inline uint32_t mix_bits(uint32_t u, uint32_t v) {
|
167 |
+
return (u & UMASK) | (v & LMASK);
|
168 |
+
}
|
169 |
+
|
170 |
+
inline uint32_t twist(uint32_t u, uint32_t v) {
|
171 |
+
return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0);
|
172 |
+
}
|
173 |
+
|
174 |
+
inline void next_state() {
|
175 |
+
uint32_t* p = data_.state_.data();
|
176 |
+
data_.left_ = MERSENNE_STATE_N;
|
177 |
+
data_.next_ = 0;
|
178 |
+
|
179 |
+
for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) {
|
180 |
+
*p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]);
|
181 |
+
}
|
182 |
+
|
183 |
+
for(int j = MERSENNE_STATE_M; --j; p++) {
|
184 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]);
|
185 |
+
}
|
186 |
+
|
187 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]);
|
188 |
+
}
|
189 |
+
|
190 |
+
};
|
191 |
+
|
192 |
+
typedef mt19937_engine mt19937;
|
193 |
+
|
194 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Dimname.h>
|
4 |
+
#include <c10/core/TensorImpl.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
class TensorBase;
|
9 |
+
|
10 |
+
// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen.
|
11 |
+
// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname,
|
12 |
+
// so we have a couple of workarounds.
|
13 |
+
//
|
14 |
+
// In the long term, we'll move Dimname to c10 and everything in this file
|
15 |
+
// can be refactored out. The main blocker for that is that "c10::Symbol"
|
16 |
+
// actually exists outside of c10 and needs to be moved in.
|
17 |
+
|
18 |
+
// TensorImpl has a unique_ptr<NamedTensorMetaInterface> field.
|
19 |
+
// XXX: Ideally we would just put optional<vector<Dimname>> into TensorImpl.
|
20 |
+
//
|
21 |
+
// This class has an important invariant: there must be at least ONE
|
22 |
+
// non-wildcard
|
23 |
+
struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
24 |
+
// This enum is to remind people that the invariant on constructors is that
|
25 |
+
// the list of dimnames must have at least one non-wildcard
|
26 |
+
enum HAS_NON_WILDCARD {
|
27 |
+
HasNonWildcard
|
28 |
+
};
|
29 |
+
|
30 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
|
31 |
+
: names_(names.vec()) {
|
32 |
+
check_invariants();
|
33 |
+
}
|
34 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
|
35 |
+
: names_(std::move(names)) {
|
36 |
+
check_invariants();
|
37 |
+
}
|
38 |
+
|
39 |
+
std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
|
40 |
+
return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_);
|
41 |
+
}
|
42 |
+
|
43 |
+
DimnameList names() const { return names_; }
|
44 |
+
|
45 |
+
// Used for an assertion in TensorImpl.h
|
46 |
+
int64_t slow_dim() const override {
|
47 |
+
return names_.size();
|
48 |
+
}
|
49 |
+
|
50 |
+
void check_invariants() const {
|
51 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
52 |
+
std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
|
53 |
+
}
|
54 |
+
|
55 |
+
void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
|
56 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
57 |
+
std::copy(new_names.begin(), new_names.end(), names_.begin());
|
58 |
+
check_invariants();
|
59 |
+
}
|
60 |
+
|
61 |
+
void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
|
62 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
63 |
+
names_ = std::move(new_names);
|
64 |
+
check_invariants();
|
65 |
+
}
|
66 |
+
|
67 |
+
// INVARIANT: at least one Dimname is non-WILDCARD
|
68 |
+
std::vector<Dimname> names_;
|
69 |
+
};
|
70 |
+
|
71 |
+
// When NamesMode is disabled, then all operations ignore tensors' names fields.
|
72 |
+
// Concretely speaking, all tensors are treated as having nullopt names.
|
73 |
+
struct TORCH_API NamesMode {
|
74 |
+
static bool is_enabled();
|
75 |
+
static void set_enabled(bool enabled);
|
76 |
+
};
|
77 |
+
|
78 |
+
|
79 |
+
// A RAII, thread local (!) guard that enables or disables names upon
|
80 |
+
// construction, and sets it back to the original value upon destruction.
|
81 |
+
struct TORCH_API NoNamesGuard {
|
82 |
+
NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) {
|
83 |
+
NamesMode::set_enabled(false);
|
84 |
+
}
|
85 |
+
~NoNamesGuard() {
|
86 |
+
if (initialized) {
|
87 |
+
reset();
|
88 |
+
}
|
89 |
+
}
|
90 |
+
void reset() {
|
91 |
+
TORCH_INTERNAL_ASSERT(initialized);
|
92 |
+
NamesMode::set_enabled(prev_mode);
|
93 |
+
}
|
94 |
+
private:
|
95 |
+
bool prev_mode;
|
96 |
+
bool initialized;
|
97 |
+
};
|
98 |
+
|
99 |
+
void check_names_valid_for(const TensorBase& tensor, DimnameList names);
|
100 |
+
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
101 |
+
|
102 |
+
// Sets the names of `tensor` to be `names`.
|
103 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names);
|
104 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
|
105 |
+
|
106 |
+
constexpr size_t kMaxNamedTensorDim = 64;
|
107 |
+
|
108 |
+
DimnameList default_names(size_t len);
|
109 |
+
|
110 |
+
namespace impl {
|
111 |
+
|
112 |
+
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
113 |
+
// XXX: Ideally these would exist as methods on TensorImpl
|
114 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
115 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
116 |
+
|
117 |
+
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
118 |
+
|
119 |
+
// Returns true if the tensor's names exist and are not all 'None'.
|
120 |
+
// Returns false if the tensor's names don't exist (were not allocated),
|
121 |
+
// or if all names are 'None'.
|
122 |
+
// We treat not-allocated-names the same as allocated names that are all 'None'.
|
123 |
+
TORCH_API bool has_names(const TensorImpl* impl);
|
124 |
+
|
125 |
+
// Returns the names of the tensor's dimensions.
|
126 |
+
// Unnamed tensors are treated as having 'None' in all dimension; this method
|
127 |
+
// would return a DimnameList of all 'None's for an unnamed tensor.
|
128 |
+
TORCH_API DimnameList get_names(const TensorImpl* impl);
|
129 |
+
|
130 |
+
// This is more of an implementation detail; one should use impl::get_names /
|
131 |
+
// Tensor::names() whenever possible because it provides a cleaner API.
|
132 |
+
// Returns the names of the tensor if they have been allocated; returns nullopt
|
133 |
+
// instead if the haven't been. The names of a tensor are not allocated if a
|
134 |
+
// tensor is constructed with names=None.
|
135 |
+
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
136 |
+
|
137 |
+
} // namespace impl
|
138 |
+
|
139 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// define constants like M_PI and C keywords for MSVC
|
4 |
+
#ifdef _MSC_VER
|
5 |
+
#define _USE_MATH_DEFINES
|
6 |
+
#include <math.h>
|
7 |
+
#endif
|
8 |
+
|
9 |
+
|
10 |
+
#ifdef __CUDACC__
|
11 |
+
#include <cuda.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <ATen/core/Array.h>
|
15 |
+
#include <c10/macros/Macros.h>
|
16 |
+
#include <c10/util/Exception.h>
|
17 |
+
#include <c10/util/Half.h>
|
18 |
+
#include <cmath>
|
19 |
+
#include <cstdint>
|
20 |
+
|
21 |
+
namespace at {
|
22 |
+
|
23 |
+
// typedefs for holding vector data
|
24 |
+
namespace detail {
|
25 |
+
|
26 |
+
typedef at::detail::Array<uint32_t, 4> UINT4;
|
27 |
+
typedef at::detail::Array<uint32_t, 2> UINT2;
|
28 |
+
typedef at::detail::Array<double, 2> DOUBLE2;
|
29 |
+
typedef at::detail::Array<float, 2> FLOAT2;
|
30 |
+
|
31 |
+
} // namespace detail
|
32 |
+
|
33 |
+
/**
|
34 |
+
* Note [Philox Engine implementation]
|
35 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
36 |
+
* Originally implemented in PyTorch's fusion compiler
|
37 |
+
* Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
|
38 |
+
* for details regarding the engine.
|
39 |
+
*
|
40 |
+
* Note that currently this implementation of the philox engine is not used
|
41 |
+
* anywhere except for tests in cpu_generator_test.cpp. However, this engine
|
42 |
+
* will replace curandStatePhilox4_32_10_t in the future.
|
43 |
+
*
|
44 |
+
* The philox engine takes a seed value, a subsequeunce
|
45 |
+
* for starting the generation and an offset for the subsequence.
|
46 |
+
* Think of this engine as an algorithm producing a huge array. We are
|
47 |
+
* parallelizing this array by partitioning the huge array and assigning
|
48 |
+
* a thread index to each partition. In other words, each seed value
|
49 |
+
* (there are 2^64 possible seed values) gives a sub array of size
|
50 |
+
* 2^128 (each element in that array is a 128 bit number). Reasoning
|
51 |
+
* behind the array being of size 2^128 is, there are 2^64 possible
|
52 |
+
* thread index value and there is an array of size 2^64 for each of
|
53 |
+
* those thread index. Hence 2^64 * 2^64 = 2^128 for each seed value.
|
54 |
+
*
|
55 |
+
* In short, this generator can produce 2^64 (seed values) * 2^128 (number
|
56 |
+
* of elements in an array given by a seed value) = 2^192 values.
|
57 |
+
*
|
58 |
+
* Arguments:
|
59 |
+
* seed: Seed values could be any number from 0 to 2^64-1.
|
60 |
+
* subsequence: Subsequence is just the cuda thread indexing with:
|
61 |
+
* - blockIdx.x * blockDim.x + threadIdx.x
|
62 |
+
* offset: The offset variable in PhiloxEngine decides how many 128-bit
|
63 |
+
* random numbers to skip (i.e. how many groups of 4, 32-bit numbers to skip)
|
64 |
+
* and hence really decides the total number of randoms that can be achieved
|
65 |
+
* for the given subsequence.
|
66 |
+
*/
|
67 |
+
|
68 |
+
class philox_engine {
|
69 |
+
public:
|
70 |
+
|
71 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
72 |
+
C10_HOST_DEVICE inline explicit philox_engine(uint64_t seed = 67280421310721,
|
73 |
+
uint64_t subsequence = 0,
|
74 |
+
uint64_t offset = 0) {
|
75 |
+
|
76 |
+
reset_state(seed, subsequence);
|
77 |
+
incr_n(offset);
|
78 |
+
}
|
79 |
+
|
80 |
+
C10_HOST_DEVICE inline void reset_state(uint64_t seed = 67280421310721,
|
81 |
+
uint64_t subsequence = 0) {
|
82 |
+
key_[0] = static_cast<uint32_t>(seed);
|
83 |
+
key_[1] = static_cast<uint32_t>(seed >> 32);
|
84 |
+
counter_ = detail::UINT4(0);
|
85 |
+
counter_[2] = static_cast<uint32_t>(subsequence);
|
86 |
+
counter_[3] = static_cast<uint32_t>(subsequence >> 32);
|
87 |
+
STATE = 0;
|
88 |
+
}
|
89 |
+
|
90 |
+
/**
|
91 |
+
* Set the offset field of Philox Generator to the desired offset.
|
92 |
+
*/
|
93 |
+
C10_HOST_DEVICE inline void set_offset(uint64_t offset) {
|
94 |
+
counter_[0] = static_cast<uint32_t>(offset);
|
95 |
+
counter_[1] = static_cast<uint32_t>(offset >> 32);
|
96 |
+
}
|
97 |
+
|
98 |
+
/**
|
99 |
+
* Gets the current offset of the Philox Generator.
|
100 |
+
*/
|
101 |
+
C10_HOST_DEVICE uint64_t get_offset() const {
|
102 |
+
uint64_t lo = static_cast<uint64_t>(counter_[0]);
|
103 |
+
uint64_t hi = static_cast<uint64_t>(counter_[1]) << 32;
|
104 |
+
return lo | hi;
|
105 |
+
}
|
106 |
+
|
107 |
+
/**
|
108 |
+
* Produces a unique 32-bit pseudo random number on every invocation. Bookeeps state to avoid waste.
|
109 |
+
*/
|
110 |
+
C10_HOST_DEVICE inline uint32_t operator()(int32_t n_rounds = 10) { // 10 here to preserve back-compat behavior
|
111 |
+
if(STATE == 0) {
|
112 |
+
detail::UINT4 counter = counter_;
|
113 |
+
detail::UINT2 key = key_;
|
114 |
+
output_ = rand(counter, key, n_rounds);
|
115 |
+
incr();
|
116 |
+
}
|
117 |
+
uint32_t ret = output_[static_cast<int>(STATE)];
|
118 |
+
STATE = (STATE + 1) & 3;
|
119 |
+
return ret;
|
120 |
+
}
|
121 |
+
|
122 |
+
inline float randn(uint32_t n_rounds) {
|
123 |
+
#ifdef __CUDA_ARCH__
|
124 |
+
AT_ASSERT(false, "Unsupported invocation of randn on CUDA");
|
125 |
+
#endif
|
126 |
+
if(STATE == 0) {
|
127 |
+
detail::UINT4 counter = counter_;
|
128 |
+
detail::UINT2 key = key_;
|
129 |
+
output_ = rand(counter, key, n_rounds);
|
130 |
+
incr();
|
131 |
+
}
|
132 |
+
// TODO(min-jean-cho) change to Polar method, a more efficient version of Box-Muller method
|
133 |
+
// TODO(voz) We use std:: below, and thus need a separate impl for CUDA.
|
134 |
+
float u1 = 1 - uint32_to_uniform_float(output_[0]); // uint32_to_uniform_float returns [0,1), we need (0,1] to avoid passing 0 to log.
|
135 |
+
float u2 = 1 - uint32_to_uniform_float(output_[1]);
|
136 |
+
return static_cast<float>(std::sqrt(-2.0 * std::log(u1)) * std::cos(2.0 * M_PI * u2));
|
137 |
+
}
|
138 |
+
|
139 |
+
/**
|
140 |
+
* Function that Skips N 128 bit numbers in a subsequence
|
141 |
+
*/
|
142 |
+
C10_HOST_DEVICE inline void incr_n(uint64_t n) {
|
143 |
+
uint32_t nlo = static_cast<uint32_t>(n);
|
144 |
+
uint32_t nhi = static_cast<uint32_t>(n >> 32);
|
145 |
+
counter_[0] += nlo;
|
146 |
+
// if overflow in x has occurred, carry over to nhi
|
147 |
+
if (counter_[0] < nlo) {
|
148 |
+
nhi++;
|
149 |
+
// if overflow in nhi has occurred during carry over,
|
150 |
+
// propagate that overflow to y and exit to increment z
|
151 |
+
// otherwise return
|
152 |
+
counter_[1] += nhi;
|
153 |
+
if(nhi != 0) {
|
154 |
+
if (nhi <= counter_[1]) {
|
155 |
+
return;
|
156 |
+
}
|
157 |
+
}
|
158 |
+
} else {
|
159 |
+
// if overflow in y has occurred during addition,
|
160 |
+
// exit to increment z
|
161 |
+
// otherwise return
|
162 |
+
counter_[1] += nhi;
|
163 |
+
if (nhi <= counter_[1]) {
|
164 |
+
return;
|
165 |
+
}
|
166 |
+
}
|
167 |
+
if (++counter_[2])
|
168 |
+
return;
|
169 |
+
++counter_[3];
|
170 |
+
}
|
171 |
+
|
172 |
+
/**
|
173 |
+
* Function that Skips one 128 bit number in a subsequence
|
174 |
+
*/
|
175 |
+
C10_HOST_DEVICE inline void incr() {
|
176 |
+
if (++counter_[0])
|
177 |
+
return;
|
178 |
+
if (++counter_[1])
|
179 |
+
return;
|
180 |
+
if (++counter_[2]) {
|
181 |
+
return;
|
182 |
+
}
|
183 |
+
++counter_[3];
|
184 |
+
}
|
185 |
+
|
186 |
+
private:
|
187 |
+
detail::UINT4 counter_;
|
188 |
+
detail::UINT4 output_;
|
189 |
+
detail::UINT2 key_;
|
190 |
+
uint32_t STATE;
|
191 |
+
|
192 |
+
C10_HOST_DEVICE inline uint32_t mulhilo32(uint32_t a, uint32_t b,
|
193 |
+
uint32_t *result_high) {
|
194 |
+
#ifdef __CUDA_ARCH__
|
195 |
+
*result_high = __umulhi(a, b);
|
196 |
+
return a*b;
|
197 |
+
#else
|
198 |
+
const uint64_t product = static_cast<uint64_t>(a) * b;
|
199 |
+
*result_high = static_cast<uint32_t>(product >> 32);
|
200 |
+
return static_cast<uint32_t>(product);
|
201 |
+
#endif
|
202 |
+
}
|
203 |
+
|
204 |
+
C10_HOST_DEVICE inline detail::UINT4 single_round(detail::UINT4 ctr, detail::UINT2 in_key) {
|
205 |
+
uint32_t hi0 = 0;
|
206 |
+
uint32_t hi1 = 0;
|
207 |
+
uint32_t lo0 = mulhilo32(kPhiloxSA, ctr[0], &hi0);
|
208 |
+
uint32_t lo1 = mulhilo32(kPhiloxSB, ctr[2], &hi1);
|
209 |
+
detail::UINT4 ret;
|
210 |
+
ret[0] = hi1 ^ ctr[1] ^ in_key[0];
|
211 |
+
ret[1] = lo1;
|
212 |
+
ret[2] = hi0 ^ ctr[3] ^ in_key[1];
|
213 |
+
ret[3] = lo0;
|
214 |
+
return ret;
|
215 |
+
}
|
216 |
+
|
217 |
+
C10_HOST_DEVICE constexpr float uint32_to_uniform_float(uint32_t value) {
|
218 |
+
// maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
219 |
+
constexpr float scale = 4.6566127342e-10;
|
220 |
+
return static_cast<float>(value & 0x7FFFFFFF) * scale;
|
221 |
+
}
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
C10_HOST_DEVICE inline detail::UINT4 rand(detail::UINT4& counter, detail::UINT2& key, uint32_t n_rounds) {
|
226 |
+
for (uint32_t round = 0; round < (n_rounds - 1); round++) {
|
227 |
+
counter = single_round(counter, key);
|
228 |
+
key[0] += (kPhilox10A); key[1] += (kPhilox10B);
|
229 |
+
}
|
230 |
+
return single_round(counter, key);
|
231 |
+
}
|
232 |
+
|
233 |
+
|
234 |
+
static const uint32_t kPhilox10A = 0x9E3779B9;
|
235 |
+
static const uint32_t kPhilox10B = 0xBB67AE85;
|
236 |
+
static const uint32_t kPhiloxSA = 0xD2511F53;
|
237 |
+
static const uint32_t kPhiloxSB = 0xCD9E8D57;
|
238 |
+
};
|
239 |
+
|
240 |
+
typedef philox_engine Philox4_32;
|
241 |
+
|
242 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
namespace at {
|
4 |
+
namespace Reduction {
|
5 |
+
|
6 |
+
// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py
|
7 |
+
// These constants control the reduction behavior of loss functions.
|
8 |
+
// Ideally, this would be a scoped enum, but jit doesn't support that
|
9 |
+
enum Reduction {
|
10 |
+
None, // Do not reduce
|
11 |
+
Mean, // (Possibly weighted) mean of losses
|
12 |
+
Sum, // Sum losses
|
13 |
+
END
|
14 |
+
};
|
15 |
+
} // namespace Reduction
|
16 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <c10/core/Scalar.h>
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/TensorBody.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
class TORCH_API OptionalTensorRef {
|
8 |
+
public:
|
9 |
+
OptionalTensorRef() = default;
|
10 |
+
|
11 |
+
~OptionalTensorRef() {
|
12 |
+
ref_.unsafeReleaseTensorImpl();
|
13 |
+
}
|
14 |
+
|
15 |
+
OptionalTensorRef(const TensorBase& src)
|
16 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {
|
17 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined());
|
18 |
+
}
|
19 |
+
|
20 |
+
OptionalTensorRef(const OptionalTensorRef& rhs)
|
21 |
+
: ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {}
|
22 |
+
|
23 |
+
OptionalTensorRef& operator=(OptionalTensorRef rhs) {
|
24 |
+
std::swap(ref_, rhs.ref_);
|
25 |
+
return *this;
|
26 |
+
}
|
27 |
+
|
28 |
+
bool has_value() const {
|
29 |
+
return ref_.defined();
|
30 |
+
}
|
31 |
+
|
32 |
+
const Tensor& getTensorRef() const & {
|
33 |
+
return ref_;
|
34 |
+
}
|
35 |
+
|
36 |
+
const Tensor& operator*() const & {
|
37 |
+
return ref_;
|
38 |
+
}
|
39 |
+
|
40 |
+
const Tensor* operator->() const & {
|
41 |
+
return &ref_;
|
42 |
+
}
|
43 |
+
|
44 |
+
operator bool() const {
|
45 |
+
return ref_.defined();
|
46 |
+
}
|
47 |
+
|
48 |
+
private:
|
49 |
+
Tensor ref_;
|
50 |
+
};
|
51 |
+
|
52 |
+
// Use to convert a TensorBase (that may be undefined) to an at::Tensor
|
53 |
+
// without bumping refcount.
|
54 |
+
class TORCH_API TensorRef {
|
55 |
+
public:
|
56 |
+
~TensorRef() {
|
57 |
+
ref_.unsafeReleaseTensorImpl();
|
58 |
+
}
|
59 |
+
|
60 |
+
TensorRef(const TensorBase& src)
|
61 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {}
|
62 |
+
|
63 |
+
const Tensor& operator*() const & {
|
64 |
+
return ref_;
|
65 |
+
}
|
66 |
+
private:
|
67 |
+
Tensor ref_;
|
68 |
+
};
|
69 |
+
|
70 |
+
template <typename T>
|
71 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
|
72 |
+
// Return the grad argument in case of a hook with void return type to have an
|
73 |
+
// std::function with Tensor return type
|
74 |
+
static_assert(std::is_same<decltype(hook(Tensor())), void>::value,
|
75 |
+
"Expected hook to return void");
|
76 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
77 |
+
TensorRef grad(grad_base);
|
78 |
+
fn(*grad);
|
79 |
+
return Tensor();
|
80 |
+
});
|
81 |
+
}
|
82 |
+
|
83 |
+
template <typename T>
|
84 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> {
|
85 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
86 |
+
TensorRef grad(grad_base);
|
87 |
+
Tensor ret = fn(*grad);
|
88 |
+
return TensorBase(std::move(ret));
|
89 |
+
});
|
90 |
+
}
|
91 |
+
|
92 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h
ADDED
@@ -0,0 +1,1055 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/Layout.h>
|
5 |
+
#include <c10/core/MemoryFormat.h>
|
6 |
+
#include <c10/core/ScalarType.h>
|
7 |
+
#include <c10/core/ScalarTypeToTypeMeta.h>
|
8 |
+
#include <c10/core/Storage.h>
|
9 |
+
#include <c10/core/SymIntArrayRef.h>
|
10 |
+
#include <c10/core/TensorImpl.h>
|
11 |
+
#include <c10/core/TensorOptions.h>
|
12 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
13 |
+
#include <c10/core/WrapDimMinimal.h>
|
14 |
+
#include <c10/util/C++17.h>
|
15 |
+
#include <c10/util/Exception.h>
|
16 |
+
#include <c10/util/ExclusivelyOwned.h>
|
17 |
+
#include <c10/util/ExclusivelyOwnedTensorTraits.h>
|
18 |
+
#include <c10/util/MaybeOwned.h>
|
19 |
+
#include <c10/util/Optional.h>
|
20 |
+
#include <c10/util/intrusive_ptr.h>
|
21 |
+
|
22 |
+
#include <ATen/core/NamedTensor.h>
|
23 |
+
#include <ATen/core/QuantizerBase.h>
|
24 |
+
#include <ATen/core/TensorAccessor.h>
|
25 |
+
#include <ATen/StorageUtils.h>
|
26 |
+
|
27 |
+
namespace c10 {
|
28 |
+
class Scalar;
|
29 |
+
}
|
30 |
+
|
31 |
+
namespace torch { namespace autograd {
|
32 |
+
|
33 |
+
struct Node;
|
34 |
+
|
35 |
+
}} // namespace torch::autograd
|
36 |
+
|
37 |
+
namespace at {
|
38 |
+
|
39 |
+
class Tensor;
|
40 |
+
class TensorBase;
|
41 |
+
|
42 |
+
// Convert Tensor to TensorBase without any need to include Tensor.h
|
43 |
+
TORCH_API const TensorBase& get_tensor_base(const Tensor& t);
|
44 |
+
|
45 |
+
namespace impl {
|
46 |
+
inline bool variable_excluded_from_dispatch() {
|
47 |
+
#ifdef C10_MOBILE
|
48 |
+
// Please read the comment in `VariableFallbackKernel.cpp` about the background of this change.
|
49 |
+
return true;
|
50 |
+
#else
|
51 |
+
return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset);
|
52 |
+
#endif
|
53 |
+
}
|
54 |
+
|
55 |
+
}
|
56 |
+
|
57 |
+
// NOTE: [Tensor vs. TensorBase]
|
58 |
+
//
|
59 |
+
// Tensor, being the central data structure in PyTorch, gets used and
|
60 |
+
// it's header included almost everywhere. Unfortunately this means
|
61 |
+
// every time an operator signature is updated or changed in
|
62 |
+
// native_functions.yaml, you (and every other PyTorch developer) need
|
63 |
+
// to recompile all of ATen and it's dependencies.
|
64 |
+
//
|
65 |
+
// TensorBase aims to break up these header dependencies, and improve
|
66 |
+
// incremental build times for all PyTorch developers. TensorBase
|
67 |
+
// represents a reference counted handle to TensorImpl, exactly the
|
68 |
+
// same as Tensor. However, TensorBase doesn't have code generated
|
69 |
+
// methods in it's API and thus no dependence on native_functions.yaml.
|
70 |
+
//
|
71 |
+
// Usage tips
|
72 |
+
// ----------
|
73 |
+
// - You can `#define TORCH_ASSERT_NO_OPERATORS` at the top of a .cpp
|
74 |
+
// or .cu file to ensure it has no header dependencies on
|
75 |
+
// native_functions.yaml (direct or indirect).
|
76 |
+
// - Tensor inherits from TensorBase, so functions taking
|
77 |
+
// `const TensorBase &` are callable with Tensor as well.
|
78 |
+
// - TensorBase can be converted to tensor with `Tensor(tensor_base)`,
|
79 |
+
// but this requires a reference-count bump. OptionalTensorRef on
|
80 |
+
// the other hand can materialize a `const Tensor &` without
|
81 |
+
// touching the reference-count.
|
82 |
+
class TORCH_API TensorBase {
|
83 |
+
public:
|
84 |
+
struct unsafe_borrow_t { explicit unsafe_borrow_t() = default; };
|
85 |
+
|
86 |
+
protected:
|
87 |
+
// Create a Tensor with a +0 reference count. Special care must be
|
88 |
+
// taken to avoid decrementing this reference count at destruction
|
89 |
+
// time. Intended to support MaybeOwnedTraits<Tensor>.
|
90 |
+
explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs)
|
91 |
+
: impl_(c10::intrusive_ptr<at::TensorImpl, UndefinedTensorImpl>::reclaim(rhs.impl_.get())) {}
|
92 |
+
friend MaybeOwnedTraits<TensorBase>;
|
93 |
+
|
94 |
+
public:
|
95 |
+
TensorBase() = default;
|
96 |
+
// This constructor should not be used by end users and is an implementation
|
97 |
+
// detail invoked by autogenerated code.
|
98 |
+
explicit TensorBase(
|
99 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
|
100 |
+
: impl_(std::move(tensor_impl)) {
|
101 |
+
if (impl_.get() == nullptr) {
|
102 |
+
throw std::runtime_error("TensorImpl with nullptr is not supported");
|
103 |
+
}
|
104 |
+
}
|
105 |
+
TensorBase(const TensorBase&) = default;
|
106 |
+
TensorBase(TensorBase&&) noexcept = default;
|
107 |
+
|
108 |
+
public:
|
109 |
+
// Creates a new wrapper from TensorImpl. Intentionally a free method because
|
110 |
+
// it should be used with care. Checks necessary invariants
|
111 |
+
static TensorBase wrap_tensor_impl(
|
112 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
|
113 |
+
TensorBase r(std::move(tensor_impl));
|
114 |
+
r.enforce_invariants();
|
115 |
+
return r;
|
116 |
+
}
|
117 |
+
|
118 |
+
int64_t dim() const {
|
119 |
+
return impl_->dim();
|
120 |
+
}
|
121 |
+
int64_t storage_offset() const {
|
122 |
+
return impl_->storage_offset();
|
123 |
+
}
|
124 |
+
|
125 |
+
TensorBase contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
|
126 |
+
if (is_contiguous(memory_format)) {
|
127 |
+
return *this;
|
128 |
+
} else {
|
129 |
+
return __dispatch_contiguous(memory_format);
|
130 |
+
}
|
131 |
+
}
|
132 |
+
|
133 |
+
/// Should be used if *this can reasonably be expected to be contiguous and
|
134 |
+
/// performance is important.
|
135 |
+
/// Compared to contiguous, it saves a reference count
|
136 |
+
/// increment/decrement if *this is already contiguous, at the cost
|
137 |
+
/// in all cases of an extra pointer of stack usage, an extra branch
|
138 |
+
/// to access, and an extra branch at destruction time.
|
139 |
+
c10::MaybeOwned<TensorBase> expect_contiguous(
|
140 |
+
MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
|
141 |
+
|
142 |
+
// Use .contiguous() instead. Trying to borrow from a prvalue
|
143 |
+
// will only lead to trouble and dangling references.
|
144 |
+
c10::MaybeOwned<TensorBase> expect_contiguous(
|
145 |
+
MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
|
146 |
+
|
147 |
+
const TensorBase& fill_(const c10::Scalar& scalar) const;
|
148 |
+
const TensorBase& zero_() const;
|
149 |
+
|
150 |
+
TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
|
151 |
+
|
152 |
+
bool is_complex() const {
|
153 |
+
return at::isComplexType(this->scalar_type());
|
154 |
+
}
|
155 |
+
|
156 |
+
bool is_floating_point() const {
|
157 |
+
return at::isFloatingType(this->scalar_type());
|
158 |
+
}
|
159 |
+
|
160 |
+
bool is_signed() const {
|
161 |
+
return at::isSignedType(this->scalar_type());
|
162 |
+
}
|
163 |
+
|
164 |
+
c10::SymInt sym_size(int64_t dim) const {
|
165 |
+
return impl_->sym_size(dim);
|
166 |
+
}
|
167 |
+
|
168 |
+
c10::SymInt sym_stride(int64_t dim) const {
|
169 |
+
const auto sizes = this->sym_strides();
|
170 |
+
const auto ndim = static_cast<int64_t>(sizes.size());
|
171 |
+
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
|
172 |
+
return sizes[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
|
173 |
+
|
174 |
+
}
|
175 |
+
|
176 |
+
int64_t size(int64_t dim) const {
|
177 |
+
return impl_->size(dim);
|
178 |
+
}
|
179 |
+
|
180 |
+
int64_t stride(int64_t dim) const {
|
181 |
+
const auto strides = this->strides();
|
182 |
+
const auto ndim = static_cast<int64_t>(strides.size());
|
183 |
+
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
|
184 |
+
return strides[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
|
185 |
+
}
|
186 |
+
|
187 |
+
TensorImpl * unsafeGetTensorImpl() const {
|
188 |
+
return impl_.get();
|
189 |
+
}
|
190 |
+
TensorImpl * unsafeReleaseTensorImpl() {
|
191 |
+
return impl_.release();
|
192 |
+
}
|
193 |
+
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
|
194 |
+
return impl_;
|
195 |
+
}
|
196 |
+
|
197 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> unsafeReleaseIntrusivePtr() {
|
198 |
+
return std::move(impl_);
|
199 |
+
}
|
200 |
+
|
201 |
+
bool defined() const {
|
202 |
+
return impl_;
|
203 |
+
}
|
204 |
+
|
205 |
+
void reset() {
|
206 |
+
impl_.reset();
|
207 |
+
}
|
208 |
+
|
209 |
+
#if defined (_MSC_VER)
|
210 |
+
TensorBase& operator=(const TensorBase& x) & {
|
211 |
+
impl_ = x.impl_;
|
212 |
+
return *this;
|
213 |
+
};
|
214 |
+
TensorBase& operator=(TensorBase&& x) & noexcept {
|
215 |
+
impl_ = std::move(x.impl_);
|
216 |
+
return *this;
|
217 |
+
}
|
218 |
+
#else
|
219 |
+
TensorBase& operator=(const TensorBase& x) & = default;
|
220 |
+
TensorBase& operator=(TensorBase&& x) & noexcept = default;
|
221 |
+
#endif
|
222 |
+
|
223 |
+
// Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here
|
224 |
+
TensorBase& operator=(const TensorBase&) && = delete;
|
225 |
+
TensorBase& operator=(TensorBase&&) && noexcept = delete;
|
226 |
+
|
227 |
+
bool is_same(const TensorBase& other) const noexcept {
|
228 |
+
return impl_ == other.impl_;
|
229 |
+
}
|
230 |
+
size_t use_count() const noexcept {
|
231 |
+
return impl_.use_count();
|
232 |
+
}
|
233 |
+
size_t weak_use_count() const noexcept {
|
234 |
+
return impl_.weak_use_count();
|
235 |
+
}
|
236 |
+
|
237 |
+
std::string toString() const;
|
238 |
+
|
239 |
+
IntArrayRef sizes() const {
|
240 |
+
return impl_->sizes();
|
241 |
+
}
|
242 |
+
c10::SymIntArrayRef sym_sizes() const {
|
243 |
+
return impl_->sym_sizes();
|
244 |
+
}
|
245 |
+
c10::SymIntArrayRef sym_strides() const {
|
246 |
+
return impl_->sym_strides();
|
247 |
+
}
|
248 |
+
IntArrayRef strides() const {
|
249 |
+
return impl_->strides();
|
250 |
+
}
|
251 |
+
// See impl::get_opt_names in ATen/NamedTensor.h for docs.
|
252 |
+
c10::optional<DimnameList> opt_names() const {
|
253 |
+
return impl::get_opt_names(unsafeGetTensorImpl());
|
254 |
+
}
|
255 |
+
// See impl::get_names in ATen/NamedTensor.h for docs.
|
256 |
+
DimnameList names() const {
|
257 |
+
return impl::get_names(unsafeGetTensorImpl());
|
258 |
+
}
|
259 |
+
int64_t ndimension() const {
|
260 |
+
return dim();
|
261 |
+
}
|
262 |
+
|
263 |
+
bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const {
|
264 |
+
return impl_->is_contiguous(memory_format);
|
265 |
+
}
|
266 |
+
|
267 |
+
bool is_non_overlapping_and_dense() const {
|
268 |
+
return impl_->is_non_overlapping_and_dense();
|
269 |
+
}
|
270 |
+
|
271 |
+
at::MemoryFormat suggest_memory_format(
|
272 |
+
bool channels_last_strides_exact_match = false) const {
|
273 |
+
// Setting channels_last_strides_exact_match to true forces function to
|
274 |
+
// check 0,1 - sized dimension strides.
|
275 |
+
if (layout() == at::kStrided) {
|
276 |
+
if (impl_->is_strides_like_channels_last()) {
|
277 |
+
if (!channels_last_strides_exact_match ||
|
278 |
+
get_channels_last_strides_2d(sizes()) == strides()) {
|
279 |
+
return at::MemoryFormat::ChannelsLast;
|
280 |
+
}
|
281 |
+
}
|
282 |
+
else if (impl_->is_strides_like_channels_last_3d()) {
|
283 |
+
if (!channels_last_strides_exact_match ||
|
284 |
+
get_channels_last_strides_3d(sizes()) == strides()) {
|
285 |
+
return at::MemoryFormat::ChannelsLast3d;
|
286 |
+
}
|
287 |
+
}
|
288 |
+
}
|
289 |
+
return at::MemoryFormat::Contiguous;
|
290 |
+
}
|
291 |
+
|
292 |
+
// Total bytes consumed by the "view" of elements of the array. Does not
|
293 |
+
// include size of metadata. The number reported here does not necessarily
|
294 |
+
// correspond to the true physical memory consumed by a tensor; instead,
|
295 |
+
// it reports the memory the tensor would take *if* it were contiguous.
|
296 |
+
// Defined to be numel() * itemsize()
|
297 |
+
size_t nbytes() const {
|
298 |
+
TORCH_CHECK(layout () != at::kSparse,
|
299 |
+
"nbytes is not defined for sparse tensors. If you want the size of the constituent " \
|
300 |
+
"tensors, add the nbytes of the indices and values. If you want the size of the " \
|
301 |
+
"equivalent dense tensor, multiply numel() by element_size()");
|
302 |
+
return impl_->numel() * impl_->itemsize();
|
303 |
+
}
|
304 |
+
|
305 |
+
c10::SymInt sym_nbytes() const {
|
306 |
+
TORCH_CHECK(layout () != at::kSparse,
|
307 |
+
"nbytes is not defined for sparse tensors. If you want the size of the constituent " \
|
308 |
+
"tensors, add the nbytes of the indices and values. If you want the size of the " \
|
309 |
+
"equivalent dense tensor, multiply numel() by element_size()");
|
310 |
+
return impl_->sym_numel() * impl_->itemsize();
|
311 |
+
}
|
312 |
+
|
313 |
+
int64_t numel() const {
|
314 |
+
return impl_->numel();
|
315 |
+
}
|
316 |
+
|
317 |
+
c10::SymInt sym_numel() const {
|
318 |
+
return impl_->sym_numel();
|
319 |
+
}
|
320 |
+
|
321 |
+
c10::SymInt sym_storage_offset() const {
|
322 |
+
return impl_->sym_storage_offset();
|
323 |
+
}
|
324 |
+
|
325 |
+
// Length of one array element in bytes. This is the traditional
|
326 |
+
// Numpy naming.
|
327 |
+
size_t itemsize() const {
|
328 |
+
return impl_->itemsize();
|
329 |
+
}
|
330 |
+
|
331 |
+
// Same as itemsize(). This is the PyTorch naming.
|
332 |
+
int64_t element_size() const {
|
333 |
+
return static_cast<int64_t>(impl_->itemsize());
|
334 |
+
}
|
335 |
+
|
336 |
+
DispatchKeySet key_set() const {
|
337 |
+
return impl_->key_set();
|
338 |
+
}
|
339 |
+
ScalarType scalar_type() const {
|
340 |
+
return typeMetaToScalarType(impl_->dtype());
|
341 |
+
}
|
342 |
+
bool has_storage() const {
|
343 |
+
return defined() && impl_->has_storage();
|
344 |
+
}
|
345 |
+
const Storage& storage() const {
|
346 |
+
return impl_->storage();
|
347 |
+
}
|
348 |
+
bool is_alias_of(const at::TensorBase& other) const{
|
349 |
+
return impl_->storage().is_alias_of(other.storage());
|
350 |
+
}
|
351 |
+
|
352 |
+
// Move the storage backend to shm based
|
353 |
+
// to enable memory sharing across processes.
|
354 |
+
//
|
355 |
+
// NB1: the ideal behavior of this API still requires further discussion
|
356 |
+
// but for now we are inclined to keep it consistent with existing THP behavior
|
357 |
+
// https://github.com/pytorch/pytorch/blob/4dca9bde0552afc67b5b74f4a0696fe6055709c4/torch/storage.py#L196-L212
|
358 |
+
// so we don't assert on anything here and rely on caller knowing
|
359 |
+
// what it's doing.
|
360 |
+
//
|
361 |
+
// NB2: this currently provides Linux fd based shm support only
|
362 |
+
// to simplify the storage lifetime management logic in ATen
|
363 |
+
// and similarly for now we are not adding support for file system based
|
364 |
+
// shm support like in THP due to additional GC manager support needed
|
365 |
+
// to prevent leaks.
|
366 |
+
// As such, calling this from non supported systems (e.g. Windows) would fail.
|
367 |
+
void share_memory_() {
|
368 |
+
at::share_memory_(*this);
|
369 |
+
}
|
370 |
+
|
371 |
+
inline bool _is_zerotensor() const {
|
372 |
+
return impl_->_is_zerotensor();
|
373 |
+
}
|
374 |
+
|
375 |
+
inline void _set_zero(bool zero) const {
|
376 |
+
impl_->_set_zero(zero);
|
377 |
+
}
|
378 |
+
|
379 |
+
inline bool is_conj() const {
|
380 |
+
return impl_->is_conj();
|
381 |
+
}
|
382 |
+
|
383 |
+
// sets the conjugate bit of a tensor.
|
384 |
+
// NOTE: Conjugate bit is supposed to be a read-only field. Only change this, if you are sure
|
385 |
+
// that's what you want. Changing this might lead to incorrect behavior since conjugation is
|
386 |
+
// a lazy operation and we rely on this bit to determine if a conjugation needs to be materialized.
|
387 |
+
inline void _set_conj(bool conjugate) const {
|
388 |
+
impl_->_set_conj(conjugate);
|
389 |
+
}
|
390 |
+
|
391 |
+
inline bool is_neg() const {
|
392 |
+
return impl_->is_neg();
|
393 |
+
}
|
394 |
+
|
395 |
+
// sets the negative bit of a tensor.
|
396 |
+
// NOTE: Negative bit is supposed to be a read-only field. Only change this, if you are sure
|
397 |
+
// that's what you want. Changing this might lead to incorrect behavior since we rely on this
|
398 |
+
// bit to determine if a negation needs to be materialized.
|
399 |
+
inline void _set_neg(bool negative) const {
|
400 |
+
impl_->_set_neg(negative);
|
401 |
+
}
|
402 |
+
|
403 |
+
/// Returns a `Tensor`'s layout.
|
404 |
+
Layout layout() const {
|
405 |
+
return impl_->layout();
|
406 |
+
}
|
407 |
+
|
408 |
+
/// Returns a `Tensor`'s dtype (`TypeMeta`).
|
409 |
+
caffe2::TypeMeta dtype() const {
|
410 |
+
return impl_->dtype();
|
411 |
+
}
|
412 |
+
|
413 |
+
/// Returns a `Tensor`'s device.
|
414 |
+
inline Device device() const {
|
415 |
+
return impl_->device();
|
416 |
+
}
|
417 |
+
|
418 |
+
/// Returns a `Tensor`'s device index.
|
419 |
+
DeviceIndex get_device() const {
|
420 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
421 |
+
return impl_->get_device();
|
422 |
+
}
|
423 |
+
|
424 |
+
/// Returns if a `Tensor` has CPU backend.
|
425 |
+
bool is_cpu() const {
|
426 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
427 |
+
return impl_->is_cpu();
|
428 |
+
}
|
429 |
+
|
430 |
+
/// Returns if a `Tensor` has CUDA backend.
|
431 |
+
bool is_cuda() const {
|
432 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
433 |
+
return impl_->is_cuda();
|
434 |
+
}
|
435 |
+
|
436 |
+
/// Returns if a `Tensor` has IPU backend.
|
437 |
+
bool is_ipu() const {
|
438 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
439 |
+
return impl_->is_ipu();
|
440 |
+
}
|
441 |
+
|
442 |
+
/// Returns if a `Tensor` has XPU backend.
|
443 |
+
bool is_xpu() const {
|
444 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
445 |
+
return impl_->is_xpu();
|
446 |
+
}
|
447 |
+
|
448 |
+
/// Returns if a `Tensor` has XLA backend.
|
449 |
+
bool is_xla() const {
|
450 |
+
return impl_->is_xla();
|
451 |
+
}
|
452 |
+
|
453 |
+
/// Returns if a `Tensor` has MTIA backend.
|
454 |
+
bool is_mtia() const {
|
455 |
+
return impl_->is_mtia();
|
456 |
+
}
|
457 |
+
|
458 |
+
/// Returns if a `Tensor` has HPU backend.
|
459 |
+
bool is_hpu() const {
|
460 |
+
return impl_->is_hpu();
|
461 |
+
}
|
462 |
+
|
463 |
+
/// Returns if a `Tensor` has Lazy backend.
|
464 |
+
bool is_lazy() const {
|
465 |
+
return impl_->is_lazy();
|
466 |
+
}
|
467 |
+
|
468 |
+
/// Returns if a `Tensor` has HIP backend.
|
469 |
+
bool is_hip() const {
|
470 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
471 |
+
return impl_->is_hip();
|
472 |
+
}
|
473 |
+
|
474 |
+
/// Returns if a `Tensor` has VE backend.
|
475 |
+
bool is_ve() const {
|
476 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
477 |
+
return impl_->is_ve();
|
478 |
+
}
|
479 |
+
|
480 |
+
/// Returns if a `Tensor` has PrivateUse1 backend.
|
481 |
+
bool is_privateuseone() const {
|
482 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
483 |
+
return impl_->is_privateuseone();
|
484 |
+
}
|
485 |
+
|
486 |
+
/// Returns if a `Tensor` has sparse backend.
|
487 |
+
bool is_sparse() const {
|
488 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
489 |
+
return impl_->is_sparse();
|
490 |
+
}
|
491 |
+
|
492 |
+
/// Returns is a `Tensor` has a sparse CSR backend.
|
493 |
+
bool is_sparse_csr() const {
|
494 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
495 |
+
return impl_->is_sparse_csr();
|
496 |
+
}
|
497 |
+
|
498 |
+
/// Returns if a `Tensor` is mkldnn tensor.
|
499 |
+
bool is_mkldnn() const {
|
500 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
501 |
+
return impl_->is_mkldnn();
|
502 |
+
}
|
503 |
+
|
504 |
+
/// Returns if a `Tensor` is mps tensor.
|
505 |
+
bool is_mps() const {
|
506 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
507 |
+
return impl_->is_mps();
|
508 |
+
}
|
509 |
+
|
510 |
+
/// Returns if a `Tensor` is ort tensor.
|
511 |
+
bool is_ort() const {
|
512 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
513 |
+
return impl_->is_ort();
|
514 |
+
}
|
515 |
+
|
516 |
+
/// Returns if a `Tensor` is vulkan tensor.
|
517 |
+
bool is_vulkan() const {
|
518 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
519 |
+
return impl_->is_vulkan();
|
520 |
+
}
|
521 |
+
|
522 |
+
/// Returns if a `Tensor` is metal tensor.
|
523 |
+
bool is_metal() const {
|
524 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
525 |
+
return impl_->is_metal();
|
526 |
+
}
|
527 |
+
|
528 |
+
/// Returns if a `Tensor` has quantized backend.
|
529 |
+
bool is_quantized() const {
|
530 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
531 |
+
return impl_->is_quantized();
|
532 |
+
}
|
533 |
+
|
534 |
+
/// Returns if a `Tensor` is a meta tensor. Meta tensors can
|
535 |
+
/// also have other designations.
|
536 |
+
bool is_meta() const {
|
537 |
+
return impl_->is_meta();
|
538 |
+
}
|
539 |
+
|
540 |
+
/// Returns if a `Tensor` is an inference tensor.
|
541 |
+
bool is_inference() const {
|
542 |
+
return impl_->is_inference();
|
543 |
+
}
|
544 |
+
|
545 |
+
// Returns if a `Tensor` is a NestedTensor.
|
546 |
+
bool is_nested() const {
|
547 |
+
return impl_->is_nested();
|
548 |
+
}
|
549 |
+
|
550 |
+
/// If a tensor is a quantized tensor, returns its quantizer
|
551 |
+
/// TODO: it's not in native_functions.yaml yet as it's not exposed to python
|
552 |
+
QuantizerPtr quantizer() const;
|
553 |
+
|
554 |
+
/// Returns if a `Tensor` has any dimension names
|
555 |
+
bool has_names() const {
|
556 |
+
// If a user is using unnamed tensors, then we can short-circuit right here.
|
557 |
+
// Otherwise, impl::has_names attempts to retrieve names.
|
558 |
+
if (!impl_->has_named_tensor_meta()) {
|
559 |
+
return false;
|
560 |
+
}
|
561 |
+
return impl::has_names(unsafeGetTensorImpl());
|
562 |
+
}
|
563 |
+
|
564 |
+
/// Returns a `Tensor`'s dimension names data structure
|
565 |
+
const NamedTensorMeta* get_named_tensor_meta() const {
|
566 |
+
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
|
567 |
+
}
|
568 |
+
|
569 |
+
NamedTensorMeta* get_named_tensor_meta() {
|
570 |
+
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
|
571 |
+
}
|
572 |
+
|
573 |
+
/// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in
|
574 |
+
/// TensorOptions.h.
|
575 |
+
TensorOptions options() const {
|
576 |
+
return TensorOptions().dtype(dtype())
|
577 |
+
.device(device())
|
578 |
+
.layout(layout());
|
579 |
+
}
|
580 |
+
|
581 |
+
const void* const_data_ptr() const {
|
582 |
+
return this->unsafeGetTensorImpl()->data();
|
583 |
+
}
|
584 |
+
|
585 |
+
void* mutable_data_ptr() const {
|
586 |
+
return this->unsafeGetTensorImpl()->mutable_data();
|
587 |
+
}
|
588 |
+
|
589 |
+
// TODO(#97856) Make this return a const pointer. This currently
|
590 |
+
// returns a non-const pointer because of the large
|
591 |
+
// number of clients that we still want to audit before
|
592 |
+
// migrating to mutable_data_ptr().
|
593 |
+
void* data_ptr() const {
|
594 |
+
return mutable_data_ptr();
|
595 |
+
}
|
596 |
+
|
597 |
+
template <typename T, std::enable_if_t<!std::is_const<T>::value, int> = 0>
|
598 |
+
const T* const_data_ptr() const;
|
599 |
+
|
600 |
+
template <typename T, std::enable_if_t<std::is_const<T>::value, int> = 0>
|
601 |
+
const std::remove_const_t<T>* const_data_ptr() const;
|
602 |
+
|
603 |
+
template <typename T>
|
604 |
+
T* mutable_data_ptr() const;
|
605 |
+
|
606 |
+
// Legacy interface during the migration to indicate that a callsite
|
607 |
+
// has not been audited for mutability.
|
608 |
+
//
|
609 |
+
// Do not add new uses of this, use const_data_ptr() if possible,
|
610 |
+
// mutable_data_ptr() otherwise.
|
611 |
+
//
|
612 |
+
// TODO(#97856) Make this return a const pointer. This is currently
|
613 |
+
// const because of the vast number of clients that
|
614 |
+
// rely on this.
|
615 |
+
template <typename T>
|
616 |
+
T* data_ptr() const;
|
617 |
+
|
618 |
+
// Purposely not defined here to avoid inlining
|
619 |
+
void print() const;
|
620 |
+
|
621 |
+
// Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and
|
622 |
+
// dimension.
|
623 |
+
template<typename T, size_t N>
|
624 |
+
TensorAccessor<T,N> accessor() const& {
|
625 |
+
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
626 |
+
TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
|
627 |
+
T* ptr = nullptr;
|
628 |
+
if constexpr (std::is_const<T>::value) {
|
629 |
+
ptr = const_data_ptr<T>();
|
630 |
+
} else {
|
631 |
+
ptr = mutable_data_ptr<T>();
|
632 |
+
}
|
633 |
+
return TensorAccessor<T,N>(ptr,sizes().data(),strides().data());
|
634 |
+
}
|
635 |
+
template<typename T, size_t N>
|
636 |
+
TensorAccessor<T,N> accessor() && = delete;
|
637 |
+
|
638 |
+
// Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and
|
639 |
+
// dimension. You can optionally specify RestrictPtrTraits as a template parameter to
|
640 |
+
// cast the data pointer to a __restrict__ pointer.
|
641 |
+
// In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor
|
642 |
+
// as an argument.
|
643 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
644 |
+
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> generic_packed_accessor() const& {
|
645 |
+
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
646 |
+
TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
|
647 |
+
T* ptr = nullptr;
|
648 |
+
if constexpr (std::is_const<T>::value) {
|
649 |
+
ptr = const_data_ptr<T>();
|
650 |
+
} else {
|
651 |
+
ptr = mutable_data_ptr<T>();
|
652 |
+
}
|
653 |
+
return GenericPackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(ptr),sizes().data(),strides().data());
|
654 |
+
}
|
655 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
656 |
+
GenericPackedTensorAccessor<T,N> generic_packed_accessor() && = delete;
|
657 |
+
|
658 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
659 |
+
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() const& {
|
660 |
+
TORCH_CHECK(
|
661 |
+
impl_->numel() <=
|
662 |
+
static_cast<int64_t>(std::numeric_limits<int32_t>::max()),
|
663 |
+
"numel needs to be smaller than int32_t max; otherwise, please use packed_accessor64");
|
664 |
+
return generic_packed_accessor<T,N,PtrTraits,int32_t>();
|
665 |
+
}
|
666 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
667 |
+
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() && = delete;
|
668 |
+
|
669 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
670 |
+
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() const& {
|
671 |
+
return generic_packed_accessor<T,N,PtrTraits,int64_t>();
|
672 |
+
}
|
673 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
674 |
+
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() && = delete;
|
675 |
+
|
676 |
+
// ~~~~~ Autograd API ~~~~~
|
677 |
+
|
678 |
+
/// \fn bool is_leaf() const;
|
679 |
+
///
|
680 |
+
/// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
|
681 |
+
///
|
682 |
+
/// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
|
683 |
+
/// created by the user. This means that they are not the result of an operation and so
|
684 |
+
/// `grad_fn()` is `nullptr`.
|
685 |
+
///
|
686 |
+
/// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
|
687 |
+
/// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
|
688 |
+
///
|
689 |
+
/// Example:
|
690 |
+
/// @code
|
691 |
+
/// auto a = torch::rand(10, torch::requires_grad());
|
692 |
+
/// std::cout << a.is_leaf() << std::endl; // prints `true`
|
693 |
+
///
|
694 |
+
/// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
|
695 |
+
/// std::cout << b.is_leaf() << std::endl; // prints `false`
|
696 |
+
/// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
|
697 |
+
///
|
698 |
+
/// auto c = torch::rand(10, torch::requires_grad()) + 2;
|
699 |
+
/// std::cout << c.is_leaf() << std::endl; // prints `false`
|
700 |
+
/// // c was created by the addition operation
|
701 |
+
///
|
702 |
+
/// auto d = torch::rand(10).cuda();
|
703 |
+
/// std::cout << d.is_leaf() << std::endl; // prints `true`
|
704 |
+
/// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
|
705 |
+
///
|
706 |
+
/// auto e = torch::rand(10).cuda().requires_grad_();
|
707 |
+
/// std::cout << e.is_leaf() << std::endl; // prints `true`
|
708 |
+
/// // e requires gradients and has no operations creating it
|
709 |
+
///
|
710 |
+
/// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
|
711 |
+
/// std::cout << f.is_leaf() << std::endl; // prints `true`
|
712 |
+
/// // f requires grad, has no operation creating it
|
713 |
+
/// @endcode
|
714 |
+
|
715 |
+
/// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
|
716 |
+
///
|
717 |
+
/// Computes the gradient of current tensor with respect to graph leaves.
|
718 |
+
///
|
719 |
+
/// The graph is differentiated using the chain rule. If the tensor is
|
720 |
+
/// non-scalar (i.e. its data has more than one element) and requires
|
721 |
+
/// gradient, the function additionally requires specifying ``gradient``.
|
722 |
+
/// It should be a tensor of matching type and location, that contains
|
723 |
+
/// the gradient of the differentiated function w.r.t. this Tensor.
|
724 |
+
///
|
725 |
+
/// This function accumulates gradients in the leaves - you might need to
|
726 |
+
/// zero them before calling it.
|
727 |
+
///
|
728 |
+
/// \param gradient Gradient w.r.t. the
|
729 |
+
/// tensor. If it is a tensor, it will be automatically converted
|
730 |
+
/// to a Tensor that does not require grad unless ``create_graph`` is True.
|
731 |
+
/// None values can be specified for scalar Tensors or ones that
|
732 |
+
/// don't require grad. If a None value would be acceptable then
|
733 |
+
/// this argument is optional.
|
734 |
+
/// \param retain_graph If ``false``, the graph used to compute
|
735 |
+
/// the grads will be freed. Note that in nearly all cases setting
|
736 |
+
/// this option to True is not needed and often can be worked around
|
737 |
+
/// in a much more efficient way. Defaults to the value of
|
738 |
+
/// ``create_graph``.
|
739 |
+
/// \param create_graph If ``true``, graph of the derivative will
|
740 |
+
/// be constructed, allowing to compute higher order derivative
|
741 |
+
/// products. Defaults to ``false``.
|
742 |
+
/// \param inputs Inputs w.r.t. which the gradient will be accumulated into
|
743 |
+
/// ``at::Tensor::grad``. All other Tensors will be ignored. If not
|
744 |
+
/// provided, the gradient is accumulated into all the leaf Tensors
|
745 |
+
/// that were used to compute the current tensor.
|
746 |
+
/// When inputs are provided and a given input is not a leaf,
|
747 |
+
/// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
|
748 |
+
/// It is an implementation detail on which the user should not rely.
|
749 |
+
/// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
|
750 |
+
|
751 |
+
/// \fn Tensor detach() const;
|
752 |
+
///
|
753 |
+
/// Returns a new Tensor, detached from the current graph.
|
754 |
+
/// The result will never require gradient.
|
755 |
+
|
756 |
+
/// \fn Tensor & detach_() const;
|
757 |
+
///
|
758 |
+
/// Detaches the Tensor from the graph that created it, making it a leaf.
|
759 |
+
/// Views cannot be detached in-place.
|
760 |
+
|
761 |
+
/// \fn void retain_grad() const;
|
762 |
+
///
|
763 |
+
/// Enables this Tensor to have their :attr:`grad` populated during
|
764 |
+
/// :func:`backward`. This is a no-op for leaf tensors.
|
765 |
+
|
766 |
+
/// \fn bool retains_grad() const;
|
767 |
+
///
|
768 |
+
/// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
|
769 |
+
/// populated during :func:`backward`, ``false`` otherwise.
|
770 |
+
|
771 |
+
const TensorBase& set_requires_grad(bool requires_grad) const {
|
772 |
+
impl_->set_requires_grad(requires_grad);
|
773 |
+
return *this;
|
774 |
+
}
|
775 |
+
bool requires_grad() const {
|
776 |
+
return impl_->requires_grad();
|
777 |
+
}
|
778 |
+
|
779 |
+
// The Forward AD API functions below are low level and are not to be used by end
|
780 |
+
// users who should use the API provided in torch/csrc/autograd.h
|
781 |
+
|
782 |
+
/// This function returns the forward gradient for this Tensor at the given level.
|
783 |
+
const Tensor& _fw_grad(uint64_t level) const {
|
784 |
+
return impl_->_fw_grad(level, *this);
|
785 |
+
}
|
786 |
+
|
787 |
+
/// This function can be used to set the value of the forward grad.
|
788 |
+
/// Note that the given new_grad might not be used directly if it has different
|
789 |
+
/// metadata (size/stride/storage offset) compared to this Tensor. In that case,
|
790 |
+
/// new_grad content will be copied into a new Tensor
|
791 |
+
void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
|
792 |
+
impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
|
793 |
+
}
|
794 |
+
|
795 |
+
/// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
|
796 |
+
/// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
|
797 |
+
/// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
|
798 |
+
///
|
799 |
+
/// One notable difference with the legacy `.data()` function is that changes to the
|
800 |
+
/// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
|
801 |
+
/// will not update the original `Variable`, due to the fact that this function
|
802 |
+
/// shallow-copies the `Variable`'s underlying TensorImpl.
|
803 |
+
at::TensorBase tensor_data() const;
|
804 |
+
|
805 |
+
/// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
|
806 |
+
/// in Python, which create a new `Variable` that shares the same storage and
|
807 |
+
/// tensor metadata with the original `Variable`, but with a completely new
|
808 |
+
/// autograd history.
|
809 |
+
///
|
810 |
+
/// NOTE: If we change the tensor metadata (e.g. sizes / strides /
|
811 |
+
/// storage / storage_offset) of a variable created from `var.variable_data()`, those
|
812 |
+
/// changes will not update the original variable `var`. In `.variable_data()`, we set
|
813 |
+
/// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
|
814 |
+
/// in order to prevent users from changing metadata of `var.variable_data()`
|
815 |
+
/// and expecting the original variable `var` to also be updated.
|
816 |
+
at::TensorBase variable_data() const;
|
817 |
+
|
818 |
+
// Gradient Node and Edges
|
819 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
820 |
+
|
821 |
+
/// Gets the gradient function of the `Variable`. If this is a leaf variable,
|
822 |
+
/// the pointer returned will be null.
|
823 |
+
///
|
824 |
+
/// For View Variables:
|
825 |
+
/// Gets the up-to-date grad_fn. If the shared data or base was modified, we
|
826 |
+
/// re-create the grad_fn to express the up-to-date view relationship between
|
827 |
+
/// this and the base Variable.
|
828 |
+
const std::shared_ptr<torch::autograd::Node>& grad_fn() const;
|
829 |
+
|
830 |
+
// Hooks
|
831 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
832 |
+
|
833 |
+
template <typename T>
|
834 |
+
using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, TensorBase>>::value, unsigned>;
|
835 |
+
template <typename T>
|
836 |
+
using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, TensorBase>, TensorBase>::value, unsigned>;
|
837 |
+
|
838 |
+
/// Registers a backward hook.
|
839 |
+
///
|
840 |
+
/// The hook will be called every time a gradient with respect to the Tensor is computed.
|
841 |
+
/// The hook should have one of the following signature:
|
842 |
+
/// ```
|
843 |
+
/// hook(TensorBase grad) -> TensorBase
|
844 |
+
/// ```
|
845 |
+
/// ```
|
846 |
+
/// hook(TensorBase grad) -> void
|
847 |
+
/// ```
|
848 |
+
/// The hook should not modify its argument, but it can optionally return a new gradient
|
849 |
+
/// which will be used in place of `grad`.
|
850 |
+
///
|
851 |
+
/// This function returns the index of the hook in the list which can be used to remove hook.
|
852 |
+
///
|
853 |
+
/// Example:
|
854 |
+
/// @code
|
855 |
+
/// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
|
856 |
+
/// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
|
857 |
+
/// v.backward(torch::tensor({1., 2., 3.}));
|
858 |
+
/// // This prints:
|
859 |
+
/// // ```
|
860 |
+
/// // 2
|
861 |
+
/// // 4
|
862 |
+
/// // 6
|
863 |
+
/// // [ CPUFloatType{3} ]
|
864 |
+
/// // ```
|
865 |
+
/// std::cout << v.grad() << std::endl;
|
866 |
+
/// v.remove_hook(h); // removes the hook
|
867 |
+
/// @endcode
|
868 |
+
template <typename T>
|
869 |
+
hook_return_void_t<T> register_hook(T&& hook) const;
|
870 |
+
template <typename T>
|
871 |
+
hook_return_var_t<T> register_hook(T&& hook) const;
|
872 |
+
|
873 |
+
protected:
|
874 |
+
unsigned _register_hook(std::function<TensorBase(const TensorBase&)> hook) const;
|
875 |
+
|
876 |
+
public:
|
877 |
+
|
878 |
+
/// Remove hook at given position
|
879 |
+
void remove_hook(unsigned pos) const;
|
880 |
+
|
881 |
+
// Variable methods
|
882 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
883 |
+
|
884 |
+
bool is_leaf() const;
|
885 |
+
|
886 |
+
int64_t output_nr() const;
|
887 |
+
|
888 |
+
void set_data(const TensorBase & new_data) const;
|
889 |
+
|
890 |
+
TensorBase data() const;
|
891 |
+
|
892 |
+
int64_t _version() const;
|
893 |
+
|
894 |
+
void retain_grad() const;
|
895 |
+
|
896 |
+
bool retains_grad() const;
|
897 |
+
|
898 |
+
const TensorBase& requires_grad_(bool _requires_grad=true) const;
|
899 |
+
|
900 |
+
// View Variables
|
901 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
902 |
+
|
903 |
+
/// Returns true if this `Variable` is a view of another `Variable`.
|
904 |
+
bool is_view() const;
|
905 |
+
|
906 |
+
/// Returns the `Variable` that this `Variable` is a view of. If this
|
907 |
+
/// `Variable` is not a view, throw a `std::runtime_error`.
|
908 |
+
const TensorBase& _base() const;
|
909 |
+
|
910 |
+
// Miscellaneous
|
911 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
912 |
+
|
913 |
+
const std::string& name() const;
|
914 |
+
|
915 |
+
protected:
|
916 |
+
void enforce_invariants();
|
917 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_;
|
918 |
+
|
919 |
+
private:
|
920 |
+
TensorBase __dispatch_contiguous(c10::MemoryFormat) const;
|
921 |
+
};
|
922 |
+
|
923 |
+
inline DeviceIndex get_device(const TensorBase& self) {
|
924 |
+
return self.get_device();
|
925 |
+
}
|
926 |
+
|
927 |
+
template <typename T>
|
928 |
+
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t<T> {
|
929 |
+
// Return the grad argument in case of a hook with void return type to have an
|
930 |
+
// std::function with Tensor return type
|
931 |
+
static_assert(std::is_same<decltype(hook(TensorBase())), void>::value,
|
932 |
+
"Expected hook to return void");
|
933 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad) {
|
934 |
+
fn(grad);
|
935 |
+
return TensorBase();
|
936 |
+
});
|
937 |
+
}
|
938 |
+
|
939 |
+
template <typename T>
|
940 |
+
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t<T> {
|
941 |
+
return _register_hook(std::forward<T>(hook));
|
942 |
+
}
|
943 |
+
|
944 |
+
namespace detail {
|
945 |
+
// Helper creator for Tensor class which doesn't requires the users to pass
|
946 |
+
// in an intrusive_ptr instead it just converts the argument passed to
|
947 |
+
// requested intrusive_ptr type.
|
948 |
+
template <typename T, typename... Args>
|
949 |
+
TensorBase make_tensor_base(Args&&... args) {
|
950 |
+
return TensorBase(c10::make_intrusive<T>(std::forward<Args>(args)...));
|
951 |
+
}
|
952 |
+
|
953 |
+
} // namespace detail
|
954 |
+
|
955 |
+
static inline DispatchKey legacyExtractDispatchKey(const TensorBase& t) {
|
956 |
+
return legacyExtractDispatchKey(t.key_set());
|
957 |
+
}
|
958 |
+
|
959 |
+
} // namespace at
|
960 |
+
|
961 |
+
namespace c10 {
|
962 |
+
template <>
|
963 |
+
struct MaybeOwnedTraits<at::TensorBase> {
|
964 |
+
using owned_type = at::TensorBase;
|
965 |
+
using borrow_type = at::TensorBase;
|
966 |
+
|
967 |
+
static borrow_type createBorrow(const owned_type& from) {
|
968 |
+
// NOTE: this can be implemented without the special
|
969 |
+
// unsafe_borrow_t Tensor constructor as
|
970 |
+
//
|
971 |
+
// return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
|
972 |
+
//
|
973 |
+
// but that hurts inlining due to the nullptr check in the
|
974 |
+
// Tensor(c10::intrusive_ptr<...>) constructor. We already know
|
975 |
+
// that from.impl_ isn't null because from is a valid Tensor, so
|
976 |
+
// we needn't do the check again. (using __builtin_assume can
|
977 |
+
// avoid this, but wouldn't be portable to MSVC.)
|
978 |
+
return borrow_type(borrow_type::unsafe_borrow_t{}, from);
|
979 |
+
}
|
980 |
+
|
981 |
+
static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
|
982 |
+
lhs.unsafeReleaseTensorImpl();
|
983 |
+
// See above note: this can be implemented with public API
|
984 |
+
// similarly to createBorrow(), but that would hurt inlining.
|
985 |
+
lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
|
986 |
+
}
|
987 |
+
|
988 |
+
static void destroyBorrow(borrow_type& toDestroy) {
|
989 |
+
toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
|
990 |
+
}
|
991 |
+
|
992 |
+
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
|
993 |
+
return borrow;
|
994 |
+
}
|
995 |
+
|
996 |
+
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
|
997 |
+
return &borrow;
|
998 |
+
}
|
999 |
+
|
1000 |
+
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
|
1001 |
+
return true;
|
1002 |
+
}
|
1003 |
+
};
|
1004 |
+
|
1005 |
+
template <>
|
1006 |
+
struct ExclusivelyOwnedTraits<at::TensorBase> : public c10::ExclusivelyOwnedTensorTraits<at::TensorBase> {};
|
1007 |
+
} // namespace c10
|
1008 |
+
|
1009 |
+
namespace at {
|
1010 |
+
|
1011 |
+
inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor(
|
1012 |
+
const c10::optional<TensorBase>& opt) {
|
1013 |
+
return opt.has_value()
|
1014 |
+
? c10::MaybeOwned<TensorBase>::borrowed(*opt)
|
1015 |
+
: c10::MaybeOwned<TensorBase>::owned(std::in_place);
|
1016 |
+
}
|
1017 |
+
|
1018 |
+
inline c10::MaybeOwned<TensorBase> TensorBase::expect_contiguous(MemoryFormat memory_format) const & {
|
1019 |
+
if (is_contiguous(memory_format)) {
|
1020 |
+
return c10::MaybeOwned<TensorBase>::borrowed(*this);
|
1021 |
+
} else {
|
1022 |
+
return c10::MaybeOwned<TensorBase>::owned(__dispatch_contiguous(memory_format));
|
1023 |
+
}
|
1024 |
+
}
|
1025 |
+
|
1026 |
+
namespace symint {
|
1027 |
+
|
1028 |
+
template <typename T>
|
1029 |
+
using enable_if_symint = std::enable_if_t<std::is_same<T, c10::SymInt>::value>;
|
1030 |
+
template <typename T>
|
1031 |
+
using enable_if_int = std::enable_if_t<std::is_same<T, int64_t>::value>;
|
1032 |
+
|
1033 |
+
template <typename T, typename = enable_if_symint<T>>
|
1034 |
+
c10::SymIntArrayRef sizes(const TensorBase& t) { return t.sym_sizes(); }
|
1035 |
+
template <typename T, typename = enable_if_int<T>>
|
1036 |
+
IntArrayRef sizes(const TensorBase& t) { return t.sizes(); }
|
1037 |
+
|
1038 |
+
template <typename T, typename = enable_if_symint<T>>
|
1039 |
+
c10::SymInt size(const TensorBase& t, int64_t dim) { return t.sym_size(dim); }
|
1040 |
+
template <typename T, typename = enable_if_int<T>>
|
1041 |
+
int64_t size(const TensorBase& t, int64_t dim) { return t.size(dim); }
|
1042 |
+
|
1043 |
+
template <typename T, typename = enable_if_symint<T>>
|
1044 |
+
c10::SymIntArrayRef strides(const TensorBase& t) { return t.sym_strides(); }
|
1045 |
+
template <typename T, typename = enable_if_int<T>>
|
1046 |
+
IntArrayRef strides(const TensorBase& t) { return t.strides(); }
|
1047 |
+
|
1048 |
+
template <typename T, typename = enable_if_symint<T>>
|
1049 |
+
c10::SymInt numel(const TensorBase& t) { return t.sym_numel(); }
|
1050 |
+
template <typename T, typename = enable_if_int<T>>
|
1051 |
+
int64_t numel(const TensorBase& t) { return t.numel(); }
|
1052 |
+
|
1053 |
+
} // namespace symint
|
1054 |
+
|
1055 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/library.h>
|
4 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
5 |
+
#include <c10/util/ArrayRef.h>
|
6 |
+
#include <c10/util/Optional.h>
|
7 |
+
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
namespace impl {
|
11 |
+
|
12 |
+
TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
|
13 |
+
TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
|
14 |
+
TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li);
|
15 |
+
using c10::impl::dispatch_mode_enabled;
|
16 |
+
|
17 |
+
}}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Tensor.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
|
6 |
+
inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) {
|
7 |
+
auto tensor_impl = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(static_cast<TensorImpl*>(th_pointer));
|
8 |
+
if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) {
|
9 |
+
c10::raw::intrusive_ptr::incref(tensor_impl.get());
|
10 |
+
}
|
11 |
+
return Tensor(std::move(tensor_impl));
|
12 |
+
}
|
13 |
+
|
14 |
+
inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) {
|
15 |
+
if (retain && th_pointer) {
|
16 |
+
c10::raw::intrusive_ptr::incref(static_cast<StorageImpl*>(th_pointer));
|
17 |
+
}
|
18 |
+
return Storage(c10::intrusive_ptr<StorageImpl>::reclaim(static_cast<StorageImpl*>(th_pointer)));
|
19 |
+
}
|
20 |
+
|
21 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <tuple>
|
5 |
+
#include <type_traits>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
#include <c10/util/ArrayRef.h>
|
9 |
+
#include <ATen/core/List.h>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
// This class allows you to write variadic functions which
|
14 |
+
// call a (possibly overloaded) function on each argument,
|
15 |
+
// in order. This is most commonly used in autogenerated code,
|
16 |
+
// where it is convenient to have a function that can uniformly
|
17 |
+
// take arguments of different types. If your arguments
|
18 |
+
// are homogenous consider using a std::initializer_list instead.
|
19 |
+
//
|
20 |
+
// For examples of this in use, see torch/csrc/utils/variadic.h
|
21 |
+
template <typename F>
|
22 |
+
struct IterArgs {
|
23 |
+
template <typename... Args>
|
24 |
+
inline F& apply() {
|
25 |
+
return self();
|
26 |
+
}
|
27 |
+
|
28 |
+
// NB: Use perfect forwarding here, otherwise we'll make value
|
29 |
+
// copies of all arguments!
|
30 |
+
template <typename T, typename... Args>
|
31 |
+
inline F& apply(T&& arg, Args&&... args) {
|
32 |
+
self()(std::forward<T>(arg));
|
33 |
+
if (self().short_circuit()) {
|
34 |
+
return self();
|
35 |
+
} else {
|
36 |
+
return apply(std::forward<Args>(args)...);
|
37 |
+
}
|
38 |
+
}
|
39 |
+
|
40 |
+
// Here are some handy overloads which provide sensible
|
41 |
+
// defaults for container-like structures that one might
|
42 |
+
// be interested in recursing into. You can enable them
|
43 |
+
// by adding:
|
44 |
+
//
|
45 |
+
// using IterArgs<YourStructName>::operator()
|
46 |
+
//
|
47 |
+
// to your struct. These are not enabled by default because
|
48 |
+
// you may be able to process these structures more efficiently
|
49 |
+
// than handling them one-by-one.
|
50 |
+
|
51 |
+
template <typename T>
|
52 |
+
void operator()(c10::IListRef<T> args) {
|
53 |
+
for (const auto& arg : args) {
|
54 |
+
self()(arg);
|
55 |
+
if (self().short_circuit())
|
56 |
+
return;
|
57 |
+
}
|
58 |
+
}
|
59 |
+
|
60 |
+
template <typename T>
|
61 |
+
void operator()(at::ArrayRef<T> args) {
|
62 |
+
for (const auto& arg : args) {
|
63 |
+
self()(arg);
|
64 |
+
if (self().short_circuit())
|
65 |
+
return;
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
template <typename T>
|
70 |
+
void operator()(const torch::List<T>& args) {
|
71 |
+
for (const auto& arg : args) {
|
72 |
+
self()(arg);
|
73 |
+
if (self().short_circuit())
|
74 |
+
return;
|
75 |
+
}
|
76 |
+
}
|
77 |
+
|
78 |
+
// NB: we need to specify std::vector manually as C++ won't
|
79 |
+
// do an implicit conversion to make a template deduction go through.
|
80 |
+
template <typename T>
|
81 |
+
void operator()(const std::vector<T>& args) {
|
82 |
+
self()(at::ArrayRef<T>{args});
|
83 |
+
}
|
84 |
+
|
85 |
+
constexpr bool short_circuit() const {
|
86 |
+
return false;
|
87 |
+
}
|
88 |
+
|
89 |
+
private:
|
90 |
+
inline F& self() {
|
91 |
+
return *static_cast<F*>(this);
|
92 |
+
}
|
93 |
+
};
|
94 |
+
|
95 |
+
} // namespace torch
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <unordered_set>
|
3 |
+
#include <vector>
|
4 |
+
#include <ATen/core/symbol.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/hash.h>
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
/**
|
10 |
+
* class AliasInfo
|
11 |
+
*
|
12 |
+
* Data structure to hold aliasing information for an `Argument`. They can be
|
13 |
+
* nested to represent aliasing information on contained types.
|
14 |
+
*
|
15 |
+
* There is a `beforeSet` which describes the aliasing information before the
|
16 |
+
* operator executes, and an `afterSet` that describes aliasing info
|
17 |
+
* after execution.
|
18 |
+
*/
|
19 |
+
class AliasInfo {
|
20 |
+
public:
|
21 |
+
// Symbol for the set that can alias anything
|
22 |
+
static Symbol wildcardSet() {
|
23 |
+
static const Symbol wc = Symbol::fromQualString("alias::*");
|
24 |
+
return wc;
|
25 |
+
}
|
26 |
+
|
27 |
+
void setIsWrite(bool isWrite) {
|
28 |
+
isWrite_ = isWrite;
|
29 |
+
}
|
30 |
+
|
31 |
+
bool isWrite() const {
|
32 |
+
return isWrite_;
|
33 |
+
}
|
34 |
+
|
35 |
+
void addBeforeSet(Symbol aliasSet) {
|
36 |
+
beforeSets_.insert(aliasSet);
|
37 |
+
}
|
38 |
+
|
39 |
+
void addAfterSet(Symbol aliasSet) {
|
40 |
+
afterSets_.insert(aliasSet);
|
41 |
+
}
|
42 |
+
|
43 |
+
const std::unordered_set<Symbol>& beforeSets() const {
|
44 |
+
return beforeSets_;
|
45 |
+
}
|
46 |
+
|
47 |
+
const std::unordered_set<Symbol>& afterSets() const {
|
48 |
+
return afterSets_;
|
49 |
+
}
|
50 |
+
|
51 |
+
Symbol beforeSet() const {
|
52 |
+
AT_ASSERT(beforeSets_.size() == 1);
|
53 |
+
return *beforeSets_.begin();
|
54 |
+
}
|
55 |
+
|
56 |
+
bool isWildcardBefore() const {
|
57 |
+
return beforeSets_.count(wildcardSet()) != 0;
|
58 |
+
}
|
59 |
+
|
60 |
+
bool isWildcardAfter() const {
|
61 |
+
return afterSets_.count(wildcardSet()) != 0;
|
62 |
+
}
|
63 |
+
|
64 |
+
// the alias info for the contained types of the type
|
65 |
+
// e.g. if this is an annotation on List[T], `sets` refers to
|
66 |
+
// the alias sets that the list may be in
|
67 |
+
// while containedTypes()[0] refers to the sets that members of the list
|
68 |
+
// may be in
|
69 |
+
void addContainedType(AliasInfo aliasInfo) {
|
70 |
+
containedTypes_.push_back(std::move(aliasInfo));
|
71 |
+
}
|
72 |
+
const std::vector<AliasInfo>& containedTypes() const {
|
73 |
+
return containedTypes_;
|
74 |
+
}
|
75 |
+
|
76 |
+
private:
|
77 |
+
std::unordered_set<Symbol> beforeSets_;
|
78 |
+
std::unordered_set<Symbol> afterSets_;
|
79 |
+
std::vector<AliasInfo> containedTypes_;
|
80 |
+
bool isWrite_ = false;
|
81 |
+
};
|
82 |
+
|
83 |
+
inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) {
|
84 |
+
return lhs.isWrite() == rhs.isWrite()
|
85 |
+
&& lhs.beforeSets() == rhs.beforeSets()
|
86 |
+
&& lhs.afterSets() == rhs.afterSets()
|
87 |
+
&& lhs.containedTypes() == rhs.containedTypes();
|
88 |
+
}
|
89 |
+
|
90 |
+
// this does match the way things are represented in the schema
|
91 |
+
inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) {
|
92 |
+
out << "(";
|
93 |
+
bool first = true;
|
94 |
+
for (const auto& set : aliasInfo.beforeSets()) {
|
95 |
+
if (first) {
|
96 |
+
first = false;
|
97 |
+
} else {
|
98 |
+
out << "|";
|
99 |
+
}
|
100 |
+
out << set.toUnqualString();
|
101 |
+
}
|
102 |
+
if (aliasInfo.isWrite()) {
|
103 |
+
out << "!";
|
104 |
+
}
|
105 |
+
if (aliasInfo.beforeSets() != aliasInfo.afterSets()) {
|
106 |
+
out << " -> ";
|
107 |
+
first = true;
|
108 |
+
for (const auto& set : aliasInfo.afterSets()) {
|
109 |
+
if (first) {
|
110 |
+
first = false;
|
111 |
+
} else {
|
112 |
+
out << "|";
|
113 |
+
}
|
114 |
+
out << set.toUnqualString();
|
115 |
+
}
|
116 |
+
}
|
117 |
+
out << ")";
|
118 |
+
return out;
|
119 |
+
}
|
120 |
+
} // namespace c10
|
121 |
+
|
122 |
+
namespace std {
|
123 |
+
template <>
|
124 |
+
struct hash<c10::AliasInfo> {
|
125 |
+
size_t operator()(const c10::AliasInfo& aliasInfo) const {
|
126 |
+
auto hash = std::hash<bool>()(aliasInfo.isWrite());
|
127 |
+
|
128 |
+
// NOTE: for unordered_set hashes, we couldn't use hash_combine
|
129 |
+
// because hash_combine is order dependent. Instead, we choose to
|
130 |
+
// use XOR as the combining function as XOR is commutative.
|
131 |
+
size_t before_set_hash_seed = 0;
|
132 |
+
for (auto &e: aliasInfo.beforeSets()) {
|
133 |
+
auto symbol_hash = std::hash<c10::Symbol>()(e);
|
134 |
+
before_set_hash_seed = before_set_hash_seed ^ symbol_hash;
|
135 |
+
}
|
136 |
+
size_t after_set_hash_seed = 0;
|
137 |
+
for (auto &e: aliasInfo.afterSets()) {
|
138 |
+
auto symbol_hash = std::hash<c10::Symbol>()(e);
|
139 |
+
after_set_hash_seed = after_set_hash_seed ^ symbol_hash;
|
140 |
+
}
|
141 |
+
|
142 |
+
hash = c10::hash_combine(hash, before_set_hash_seed);
|
143 |
+
hash = c10::hash_combine(hash, after_set_hash_seed);
|
144 |
+
for (auto &e: aliasInfo.containedTypes()) {
|
145 |
+
auto contained_type_hash = std::hash<c10::AliasInfo>()(e);
|
146 |
+
hash = c10::hash_combine(hash, contained_type_hash);
|
147 |
+
}
|
148 |
+
return hash;
|
149 |
+
}
|
150 |
+
};
|
151 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h
ADDED
@@ -0,0 +1,2213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from aten_interned_strings.h
|
4 |
+
|
5 |
+
#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if including <ATen/core/symbol.h> for \
|
9 |
+
the c10::Symbol class would be sufficient, or if your change would be \
|
10 |
+
better placed in another file.
|
11 |
+
#endif
|
12 |
+
|
13 |
+
// ATen symbols correspond exactly to operators defined in ATen. Every
|
14 |
+
// symbol here corresponds exactly to an ATen operation defined in
|
15 |
+
// native_functions.yaml; attributes are in one-to-one correspondence
|
16 |
+
// with their ATen name.
|
17 |
+
|
18 |
+
#define FORALL_ATEN_BASE_SYMBOLS(_) \
|
19 |
+
_(aten, __and__) \
|
20 |
+
_(aten, __iand__) \
|
21 |
+
_(aten, __ilshift__) \
|
22 |
+
_(aten, __ior__) \
|
23 |
+
_(aten, __irshift__) \
|
24 |
+
_(aten, __ixor__) \
|
25 |
+
_(aten, __lshift__) \
|
26 |
+
_(aten, __or__) \
|
27 |
+
_(aten, __rshift__) \
|
28 |
+
_(aten, __xor__) \
|
29 |
+
_(aten, _adaptive_avg_pool2d) \
|
30 |
+
_(aten, _adaptive_avg_pool2d_backward) \
|
31 |
+
_(aten, _adaptive_avg_pool3d) \
|
32 |
+
_(aten, _adaptive_avg_pool3d_backward) \
|
33 |
+
_(aten, _add_batch_dim) \
|
34 |
+
_(aten, _add_relu) \
|
35 |
+
_(aten, _add_relu_) \
|
36 |
+
_(aten, _addmm_activation) \
|
37 |
+
_(aten, _aminmax) \
|
38 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale) \
|
39 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale_) \
|
40 |
+
_(aten, _amp_update_scale) \
|
41 |
+
_(aten, _amp_update_scale_) \
|
42 |
+
_(aten, _assert_async) \
|
43 |
+
_(aten, _assert_scalar) \
|
44 |
+
_(aten, _assert_tensor_metadata) \
|
45 |
+
_(aten, _autocast_to_full_precision) \
|
46 |
+
_(aten, _autocast_to_reduced_precision) \
|
47 |
+
_(aten, _backward) \
|
48 |
+
_(aten, _batch_norm_impl_index) \
|
49 |
+
_(aten, _batch_norm_impl_index_backward) \
|
50 |
+
_(aten, _cast_Byte) \
|
51 |
+
_(aten, _cast_Char) \
|
52 |
+
_(aten, _cast_Double) \
|
53 |
+
_(aten, _cast_Float) \
|
54 |
+
_(aten, _cast_Half) \
|
55 |
+
_(aten, _cast_Int) \
|
56 |
+
_(aten, _cast_Long) \
|
57 |
+
_(aten, _cast_Short) \
|
58 |
+
_(aten, _cdist_backward) \
|
59 |
+
_(aten, _cdist_forward) \
|
60 |
+
_(aten, _cholesky_solve_helper) \
|
61 |
+
_(aten, _choose_qparams_per_tensor) \
|
62 |
+
_(aten, _chunk_cat) \
|
63 |
+
_(aten, _coalesce) \
|
64 |
+
_(aten, _coalesced) \
|
65 |
+
_(aten, _coalesced_) \
|
66 |
+
_(aten, _compute_linear_combination) \
|
67 |
+
_(aten, _conj) \
|
68 |
+
_(aten, _conj_copy) \
|
69 |
+
_(aten, _conj_physical) \
|
70 |
+
_(aten, _conv_depthwise2d) \
|
71 |
+
_(aten, _convert_indices_from_coo_to_csr) \
|
72 |
+
_(aten, _convert_indices_from_csr_to_coo) \
|
73 |
+
_(aten, _convert_weight_to_int4pack) \
|
74 |
+
_(aten, _convolution) \
|
75 |
+
_(aten, _convolution_double_backward) \
|
76 |
+
_(aten, _convolution_mode) \
|
77 |
+
_(aten, _copy_from) \
|
78 |
+
_(aten, _copy_from_and_resize) \
|
79 |
+
_(aten, _cslt_compress) \
|
80 |
+
_(aten, _cslt_sparse_mm) \
|
81 |
+
_(aten, _cslt_sparse_mm_search) \
|
82 |
+
_(aten, _ctc_loss) \
|
83 |
+
_(aten, _ctc_loss_backward) \
|
84 |
+
_(aten, _cudnn_ctc_loss) \
|
85 |
+
_(aten, _cudnn_init_dropout_state) \
|
86 |
+
_(aten, _cudnn_rnn) \
|
87 |
+
_(aten, _cudnn_rnn_backward) \
|
88 |
+
_(aten, _cudnn_rnn_flatten_weight) \
|
89 |
+
_(aten, _cufft_clear_plan_cache) \
|
90 |
+
_(aten, _cufft_get_plan_cache_max_size) \
|
91 |
+
_(aten, _cufft_get_plan_cache_size) \
|
92 |
+
_(aten, _cufft_set_plan_cache_max_size) \
|
93 |
+
_(aten, _cummax_helper) \
|
94 |
+
_(aten, _cummin_helper) \
|
95 |
+
_(aten, _debug_has_internal_overlap) \
|
96 |
+
_(aten, _dimI) \
|
97 |
+
_(aten, _dimV) \
|
98 |
+
_(aten, _dim_arange) \
|
99 |
+
_(aten, _dirichlet_grad) \
|
100 |
+
_(aten, _efficient_attention_backward) \
|
101 |
+
_(aten, _efficient_attention_forward) \
|
102 |
+
_(aten, _efficientzerotensor) \
|
103 |
+
_(aten, _embedding_bag) \
|
104 |
+
_(aten, _embedding_bag_backward) \
|
105 |
+
_(aten, _embedding_bag_dense_backward) \
|
106 |
+
_(aten, _embedding_bag_forward_only) \
|
107 |
+
_(aten, _embedding_bag_per_sample_weights_backward) \
|
108 |
+
_(aten, _embedding_bag_sparse_backward) \
|
109 |
+
_(aten, _empty_affine_quantized) \
|
110 |
+
_(aten, _empty_per_channel_affine_quantized) \
|
111 |
+
_(aten, _euclidean_dist) \
|
112 |
+
_(aten, _fake_quantize_learnable_per_channel_affine) \
|
113 |
+
_(aten, _fake_quantize_learnable_per_channel_affine_backward) \
|
114 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine) \
|
115 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine_backward) \
|
116 |
+
_(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \
|
117 |
+
_(aten, _fft_c2c) \
|
118 |
+
_(aten, _fft_c2r) \
|
119 |
+
_(aten, _fft_r2c) \
|
120 |
+
_(aten, _fill_mem_eff_dropout_mask) \
|
121 |
+
_(aten, _fill_mem_eff_dropout_mask_) \
|
122 |
+
_(aten, _flash_attention_backward) \
|
123 |
+
_(aten, _flash_attention_forward) \
|
124 |
+
_(aten, _foobar) \
|
125 |
+
_(aten, _foreach_abs) \
|
126 |
+
_(aten, _foreach_abs_) \
|
127 |
+
_(aten, _foreach_acos) \
|
128 |
+
_(aten, _foreach_acos_) \
|
129 |
+
_(aten, _foreach_add) \
|
130 |
+
_(aten, _foreach_add_) \
|
131 |
+
_(aten, _foreach_addcdiv) \
|
132 |
+
_(aten, _foreach_addcdiv_) \
|
133 |
+
_(aten, _foreach_addcmul) \
|
134 |
+
_(aten, _foreach_addcmul_) \
|
135 |
+
_(aten, _foreach_asin) \
|
136 |
+
_(aten, _foreach_asin_) \
|
137 |
+
_(aten, _foreach_atan) \
|
138 |
+
_(aten, _foreach_atan_) \
|
139 |
+
_(aten, _foreach_ceil) \
|
140 |
+
_(aten, _foreach_ceil_) \
|
141 |
+
_(aten, _foreach_clamp_max) \
|
142 |
+
_(aten, _foreach_clamp_max_) \
|
143 |
+
_(aten, _foreach_clamp_min) \
|
144 |
+
_(aten, _foreach_clamp_min_) \
|
145 |
+
_(aten, _foreach_copy) \
|
146 |
+
_(aten, _foreach_copy_) \
|
147 |
+
_(aten, _foreach_cos) \
|
148 |
+
_(aten, _foreach_cos_) \
|
149 |
+
_(aten, _foreach_cosh) \
|
150 |
+
_(aten, _foreach_cosh_) \
|
151 |
+
_(aten, _foreach_div) \
|
152 |
+
_(aten, _foreach_div_) \
|
153 |
+
_(aten, _foreach_erf) \
|
154 |
+
_(aten, _foreach_erf_) \
|
155 |
+
_(aten, _foreach_erfc) \
|
156 |
+
_(aten, _foreach_erfc_) \
|
157 |
+
_(aten, _foreach_exp) \
|
158 |
+
_(aten, _foreach_exp_) \
|
159 |
+
_(aten, _foreach_expm1) \
|
160 |
+
_(aten, _foreach_expm1_) \
|
161 |
+
_(aten, _foreach_floor) \
|
162 |
+
_(aten, _foreach_floor_) \
|
163 |
+
_(aten, _foreach_frac) \
|
164 |
+
_(aten, _foreach_frac_) \
|
165 |
+
_(aten, _foreach_lerp) \
|
166 |
+
_(aten, _foreach_lerp_) \
|
167 |
+
_(aten, _foreach_lgamma) \
|
168 |
+
_(aten, _foreach_lgamma_) \
|
169 |
+
_(aten, _foreach_log) \
|
170 |
+
_(aten, _foreach_log10) \
|
171 |
+
_(aten, _foreach_log10_) \
|
172 |
+
_(aten, _foreach_log1p) \
|
173 |
+
_(aten, _foreach_log1p_) \
|
174 |
+
_(aten, _foreach_log2) \
|
175 |
+
_(aten, _foreach_log2_) \
|
176 |
+
_(aten, _foreach_log_) \
|
177 |
+
_(aten, _foreach_maximum) \
|
178 |
+
_(aten, _foreach_maximum_) \
|
179 |
+
_(aten, _foreach_minimum) \
|
180 |
+
_(aten, _foreach_minimum_) \
|
181 |
+
_(aten, _foreach_mul) \
|
182 |
+
_(aten, _foreach_mul_) \
|
183 |
+
_(aten, _foreach_neg) \
|
184 |
+
_(aten, _foreach_neg_) \
|
185 |
+
_(aten, _foreach_norm) \
|
186 |
+
_(aten, _foreach_pow) \
|
187 |
+
_(aten, _foreach_pow_) \
|
188 |
+
_(aten, _foreach_reciprocal) \
|
189 |
+
_(aten, _foreach_reciprocal_) \
|
190 |
+
_(aten, _foreach_round) \
|
191 |
+
_(aten, _foreach_round_) \
|
192 |
+
_(aten, _foreach_sigmoid) \
|
193 |
+
_(aten, _foreach_sigmoid_) \
|
194 |
+
_(aten, _foreach_sign) \
|
195 |
+
_(aten, _foreach_sign_) \
|
196 |
+
_(aten, _foreach_sin) \
|
197 |
+
_(aten, _foreach_sin_) \
|
198 |
+
_(aten, _foreach_sinh) \
|
199 |
+
_(aten, _foreach_sinh_) \
|
200 |
+
_(aten, _foreach_sqrt) \
|
201 |
+
_(aten, _foreach_sqrt_) \
|
202 |
+
_(aten, _foreach_sub) \
|
203 |
+
_(aten, _foreach_sub_) \
|
204 |
+
_(aten, _foreach_tan) \
|
205 |
+
_(aten, _foreach_tan_) \
|
206 |
+
_(aten, _foreach_tanh) \
|
207 |
+
_(aten, _foreach_tanh_) \
|
208 |
+
_(aten, _foreach_trunc) \
|
209 |
+
_(aten, _foreach_trunc_) \
|
210 |
+
_(aten, _foreach_zero) \
|
211 |
+
_(aten, _foreach_zero_) \
|
212 |
+
_(aten, _functional_assert_async) \
|
213 |
+
_(aten, _functional_assert_scalar) \
|
214 |
+
_(aten, _functional_sym_constrain_range) \
|
215 |
+
_(aten, _functional_sym_constrain_range_for_size) \
|
216 |
+
_(aten, _fused_adam) \
|
217 |
+
_(aten, _fused_adam_) \
|
218 |
+
_(aten, _fused_adamw) \
|
219 |
+
_(aten, _fused_adamw_) \
|
220 |
+
_(aten, _fused_dropout) \
|
221 |
+
_(aten, _fused_moving_avg_obs_fq_helper) \
|
222 |
+
_(aten, _fused_moving_avg_obs_fq_helper_functional) \
|
223 |
+
_(aten, _fused_sdp_choice) \
|
224 |
+
_(aten, _fused_sgd) \
|
225 |
+
_(aten, _fused_sgd_) \
|
226 |
+
_(aten, _fw_primal) \
|
227 |
+
_(aten, _fw_primal_copy) \
|
228 |
+
_(aten, _gather_sparse_backward) \
|
229 |
+
_(aten, _grid_sampler_2d_cpu_fallback) \
|
230 |
+
_(aten, _grid_sampler_2d_cpu_fallback_backward) \
|
231 |
+
_(aten, _has_compatible_shallow_copy_type) \
|
232 |
+
_(aten, _has_same_storage_numel) \
|
233 |
+
_(aten, _histogramdd_bin_edges) \
|
234 |
+
_(aten, _histogramdd_from_bin_cts) \
|
235 |
+
_(aten, _histogramdd_from_bin_tensors) \
|
236 |
+
_(aten, _index_put_impl) \
|
237 |
+
_(aten, _index_put_impl_) \
|
238 |
+
_(aten, _indices) \
|
239 |
+
_(aten, _indices_copy) \
|
240 |
+
_(aten, _int_mm) \
|
241 |
+
_(aten, _is_all_true) \
|
242 |
+
_(aten, _is_any_true) \
|
243 |
+
_(aten, _is_zerotensor) \
|
244 |
+
_(aten, _lazy_clone) \
|
245 |
+
_(aten, _linalg_check_errors) \
|
246 |
+
_(aten, _linalg_det) \
|
247 |
+
_(aten, _linalg_eigh) \
|
248 |
+
_(aten, _linalg_eigvals) \
|
249 |
+
_(aten, _linalg_slogdet) \
|
250 |
+
_(aten, _linalg_solve_ex) \
|
251 |
+
_(aten, _linalg_svd) \
|
252 |
+
_(aten, _local_scalar_dense) \
|
253 |
+
_(aten, _log_softmax) \
|
254 |
+
_(aten, _log_softmax_backward_data) \
|
255 |
+
_(aten, _logcumsumexp) \
|
256 |
+
_(aten, _lstm_mps) \
|
257 |
+
_(aten, _lu_with_info) \
|
258 |
+
_(aten, _make_dep_token) \
|
259 |
+
_(aten, _make_dual) \
|
260 |
+
_(aten, _make_dual_copy) \
|
261 |
+
_(aten, _make_per_channel_quantized_tensor) \
|
262 |
+
_(aten, _make_per_tensor_quantized_tensor) \
|
263 |
+
_(aten, _masked_scale) \
|
264 |
+
_(aten, _masked_softmax) \
|
265 |
+
_(aten, _masked_softmax_backward) \
|
266 |
+
_(aten, _mixed_dtypes_linear) \
|
267 |
+
_(aten, _mkldnn_reshape) \
|
268 |
+
_(aten, _mkldnn_transpose) \
|
269 |
+
_(aten, _mkldnn_transpose_) \
|
270 |
+
_(aten, _mps_convolution) \
|
271 |
+
_(aten, _mps_convolution_transpose) \
|
272 |
+
_(aten, _native_batch_norm_legit) \
|
273 |
+
_(aten, _native_batch_norm_legit_functional) \
|
274 |
+
_(aten, _native_batch_norm_legit_no_training) \
|
275 |
+
_(aten, _native_multi_head_attention) \
|
276 |
+
_(aten, _neg_view) \
|
277 |
+
_(aten, _neg_view_copy) \
|
278 |
+
_(aten, _nested_from_padded) \
|
279 |
+
_(aten, _nested_from_padded_and_nested_example) \
|
280 |
+
_(aten, _nested_get_jagged_dummy) \
|
281 |
+
_(aten, _nested_get_lengths) \
|
282 |
+
_(aten, _nested_get_offsets) \
|
283 |
+
_(aten, _nested_get_ragged_idx) \
|
284 |
+
_(aten, _nested_get_values) \
|
285 |
+
_(aten, _nested_get_values_copy) \
|
286 |
+
_(aten, _nested_select_backward) \
|
287 |
+
_(aten, _nested_sum_backward) \
|
288 |
+
_(aten, _nested_tensor_from_mask) \
|
289 |
+
_(aten, _nested_tensor_from_mask_left_aligned) \
|
290 |
+
_(aten, _nested_tensor_from_tensor_list) \
|
291 |
+
_(aten, _nested_tensor_size) \
|
292 |
+
_(aten, _nested_tensor_softmax_with_shape) \
|
293 |
+
_(aten, _nested_tensor_storage_offsets) \
|
294 |
+
_(aten, _nested_tensor_strides) \
|
295 |
+
_(aten, _nested_view_from_buffer) \
|
296 |
+
_(aten, _nested_view_from_buffer_copy) \
|
297 |
+
_(aten, _nested_view_from_jagged) \
|
298 |
+
_(aten, _nested_view_from_jagged_copy) \
|
299 |
+
_(aten, _new_zeros_with_same_feature_meta) \
|
300 |
+
_(aten, _nnpack_available) \
|
301 |
+
_(aten, _nnpack_spatial_convolution) \
|
302 |
+
_(aten, _nnz) \
|
303 |
+
_(aten, _pack_padded_sequence) \
|
304 |
+
_(aten, _pack_padded_sequence_backward) \
|
305 |
+
_(aten, _pad_circular) \
|
306 |
+
_(aten, _pad_enum) \
|
307 |
+
_(aten, _pad_packed_sequence) \
|
308 |
+
_(aten, _pdist_backward) \
|
309 |
+
_(aten, _pdist_forward) \
|
310 |
+
_(aten, _pin_memory) \
|
311 |
+
_(aten, _prelu_kernel) \
|
312 |
+
_(aten, _prelu_kernel_backward) \
|
313 |
+
_(aten, _print) \
|
314 |
+
_(aten, _propagate_xla_data) \
|
315 |
+
_(aten, _remove_batch_dim) \
|
316 |
+
_(aten, _reshape_alias) \
|
317 |
+
_(aten, _reshape_alias_copy) \
|
318 |
+
_(aten, _reshape_copy) \
|
319 |
+
_(aten, _reshape_from_tensor) \
|
320 |
+
_(aten, _resize_output) \
|
321 |
+
_(aten, _resize_output_) \
|
322 |
+
_(aten, _rowwise_prune) \
|
323 |
+
_(aten, _sample_dirichlet) \
|
324 |
+
_(aten, _saturate_weight_to_fp16) \
|
325 |
+
_(aten, _scaled_dot_product_attention_math) \
|
326 |
+
_(aten, _scaled_dot_product_cudnn_attention) \
|
327 |
+
_(aten, _scaled_dot_product_efficient_attention) \
|
328 |
+
_(aten, _scaled_dot_product_efficient_attention_backward) \
|
329 |
+
_(aten, _scaled_dot_product_flash_attention) \
|
330 |
+
_(aten, _scaled_dot_product_flash_attention_backward) \
|
331 |
+
_(aten, _scaled_dot_product_flash_attention_for_cpu) \
|
332 |
+
_(aten, _scaled_dot_product_flash_attention_for_cpu_backward) \
|
333 |
+
_(aten, _scaled_mm) \
|
334 |
+
_(aten, _segment_reduce_backward) \
|
335 |
+
_(aten, _shape_as_tensor) \
|
336 |
+
_(aten, _slow_conv2d_backward) \
|
337 |
+
_(aten, _slow_conv2d_forward) \
|
338 |
+
_(aten, _sobol_engine_draw) \
|
339 |
+
_(aten, _sobol_engine_ff) \
|
340 |
+
_(aten, _sobol_engine_ff_) \
|
341 |
+
_(aten, _sobol_engine_initialize_state) \
|
342 |
+
_(aten, _sobol_engine_initialize_state_) \
|
343 |
+
_(aten, _sobol_engine_scramble) \
|
344 |
+
_(aten, _sobol_engine_scramble_) \
|
345 |
+
_(aten, _softmax) \
|
346 |
+
_(aten, _softmax_backward_data) \
|
347 |
+
_(aten, _sparse_addmm) \
|
348 |
+
_(aten, _sparse_broadcast_to) \
|
349 |
+
_(aten, _sparse_broadcast_to_copy) \
|
350 |
+
_(aten, _sparse_bsc_tensor_unsafe) \
|
351 |
+
_(aten, _sparse_bsr_tensor_unsafe) \
|
352 |
+
_(aten, _sparse_compressed_tensor_unsafe) \
|
353 |
+
_(aten, _sparse_coo_tensor_unsafe) \
|
354 |
+
_(aten, _sparse_coo_tensor_with_dims) \
|
355 |
+
_(aten, _sparse_coo_tensor_with_dims_and_tensors) \
|
356 |
+
_(aten, _sparse_csc_tensor_unsafe) \
|
357 |
+
_(aten, _sparse_csr_prod) \
|
358 |
+
_(aten, _sparse_csr_sum) \
|
359 |
+
_(aten, _sparse_csr_tensor_unsafe) \
|
360 |
+
_(aten, _sparse_log_softmax) \
|
361 |
+
_(aten, _sparse_log_softmax_backward_data) \
|
362 |
+
_(aten, _sparse_mask_projection) \
|
363 |
+
_(aten, _sparse_mm) \
|
364 |
+
_(aten, _sparse_mm_reduce_impl) \
|
365 |
+
_(aten, _sparse_mm_reduce_impl_backward) \
|
366 |
+
_(aten, _sparse_semi_structured_linear) \
|
367 |
+
_(aten, _sparse_softmax) \
|
368 |
+
_(aten, _sparse_softmax_backward_data) \
|
369 |
+
_(aten, _sparse_sparse_matmul) \
|
370 |
+
_(aten, _sparse_sum) \
|
371 |
+
_(aten, _sparse_sum_backward) \
|
372 |
+
_(aten, _spdiags) \
|
373 |
+
_(aten, _stack) \
|
374 |
+
_(aten, _standard_gamma) \
|
375 |
+
_(aten, _standard_gamma_grad) \
|
376 |
+
_(aten, _test_ambiguous_defaults) \
|
377 |
+
_(aten, _test_autograd_multiple_dispatch) \
|
378 |
+
_(aten, _test_autograd_multiple_dispatch_view) \
|
379 |
+
_(aten, _test_autograd_multiple_dispatch_view_copy) \
|
380 |
+
_(aten, _test_check_tensor) \
|
381 |
+
_(aten, _test_functorch_fallback) \
|
382 |
+
_(aten, _test_optional_filled_intlist) \
|
383 |
+
_(aten, _test_optional_floatlist) \
|
384 |
+
_(aten, _test_optional_intlist) \
|
385 |
+
_(aten, _test_parallel_materialize) \
|
386 |
+
_(aten, _test_serialization_subcmul) \
|
387 |
+
_(aten, _test_string_default) \
|
388 |
+
_(aten, _test_warn_in_autograd) \
|
389 |
+
_(aten, _thnn_differentiable_gru_cell_backward) \
|
390 |
+
_(aten, _thnn_differentiable_lstm_cell_backward) \
|
391 |
+
_(aten, _thnn_fused_gru_cell) \
|
392 |
+
_(aten, _thnn_fused_gru_cell_backward) \
|
393 |
+
_(aten, _thnn_fused_lstm_cell) \
|
394 |
+
_(aten, _thnn_fused_lstm_cell_backward) \
|
395 |
+
_(aten, _thnn_fused_lstm_cell_backward_impl) \
|
396 |
+
_(aten, _to_copy) \
|
397 |
+
_(aten, _to_cpu) \
|
398 |
+
_(aten, _to_dense) \
|
399 |
+
_(aten, _to_sparse) \
|
400 |
+
_(aten, _to_sparse_bsc) \
|
401 |
+
_(aten, _to_sparse_bsr) \
|
402 |
+
_(aten, _to_sparse_csc) \
|
403 |
+
_(aten, _to_sparse_csr) \
|
404 |
+
_(aten, _to_sparse_semi_structured) \
|
405 |
+
_(aten, _transform_bias_rescale_qkv) \
|
406 |
+
_(aten, _transformer_encoder_layer_fwd) \
|
407 |
+
_(aten, _trilinear) \
|
408 |
+
_(aten, _triton_multi_head_attention) \
|
409 |
+
_(aten, _triton_scaled_dot_attention) \
|
410 |
+
_(aten, _unique) \
|
411 |
+
_(aten, _unique2) \
|
412 |
+
_(aten, _unpack_dual) \
|
413 |
+
_(aten, _unsafe_index) \
|
414 |
+
_(aten, _unsafe_index_put) \
|
415 |
+
_(aten, _unsafe_view) \
|
416 |
+
_(aten, _upsample_bicubic2d_aa) \
|
417 |
+
_(aten, _upsample_bicubic2d_aa_backward) \
|
418 |
+
_(aten, _upsample_bilinear2d_aa) \
|
419 |
+
_(aten, _upsample_bilinear2d_aa_backward) \
|
420 |
+
_(aten, _upsample_nearest_exact1d) \
|
421 |
+
_(aten, _upsample_nearest_exact1d_backward) \
|
422 |
+
_(aten, _upsample_nearest_exact2d) \
|
423 |
+
_(aten, _upsample_nearest_exact2d_backward) \
|
424 |
+
_(aten, _upsample_nearest_exact3d) \
|
425 |
+
_(aten, _upsample_nearest_exact3d_backward) \
|
426 |
+
_(aten, _use_cudnn_ctc_loss) \
|
427 |
+
_(aten, _use_cudnn_rnn_flatten_weight) \
|
428 |
+
_(aten, _validate_compressed_sparse_indices) \
|
429 |
+
_(aten, _validate_sparse_bsc_tensor_args) \
|
430 |
+
_(aten, _validate_sparse_bsr_tensor_args) \
|
431 |
+
_(aten, _validate_sparse_compressed_tensor_args) \
|
432 |
+
_(aten, _validate_sparse_coo_tensor_args) \
|
433 |
+
_(aten, _validate_sparse_csc_tensor_args) \
|
434 |
+
_(aten, _validate_sparse_csr_tensor_args) \
|
435 |
+
_(aten, _values) \
|
436 |
+
_(aten, _values_copy) \
|
437 |
+
_(aten, _version) \
|
438 |
+
_(aten, _weight_int4pack_mm) \
|
439 |
+
_(aten, _weight_int8pack_mm) \
|
440 |
+
_(aten, _weight_norm) \
|
441 |
+
_(aten, _weight_norm_differentiable_backward) \
|
442 |
+
_(aten, _weight_norm_interface) \
|
443 |
+
_(aten, _weight_norm_interface_backward) \
|
444 |
+
_(aten, abs) \
|
445 |
+
_(aten, abs_) \
|
446 |
+
_(aten, absolute) \
|
447 |
+
_(aten, absolute_) \
|
448 |
+
_(aten, acos) \
|
449 |
+
_(aten, acos_) \
|
450 |
+
_(aten, acosh) \
|
451 |
+
_(aten, acosh_) \
|
452 |
+
_(aten, adaptive_avg_pool1d) \
|
453 |
+
_(aten, adaptive_avg_pool2d) \
|
454 |
+
_(aten, adaptive_avg_pool3d) \
|
455 |
+
_(aten, adaptive_avg_pool3d_backward) \
|
456 |
+
_(aten, adaptive_max_pool1d) \
|
457 |
+
_(aten, adaptive_max_pool2d) \
|
458 |
+
_(aten, adaptive_max_pool2d_backward) \
|
459 |
+
_(aten, adaptive_max_pool3d) \
|
460 |
+
_(aten, adaptive_max_pool3d_backward) \
|
461 |
+
_(aten, add) \
|
462 |
+
_(aten, add_) \
|
463 |
+
_(aten, addbmm) \
|
464 |
+
_(aten, addbmm_) \
|
465 |
+
_(aten, addcdiv) \
|
466 |
+
_(aten, addcdiv_) \
|
467 |
+
_(aten, addcmul) \
|
468 |
+
_(aten, addcmul_) \
|
469 |
+
_(aten, addmm) \
|
470 |
+
_(aten, addmm_) \
|
471 |
+
_(aten, addmv) \
|
472 |
+
_(aten, addmv_) \
|
473 |
+
_(aten, addr) \
|
474 |
+
_(aten, addr_) \
|
475 |
+
_(aten, adjoint) \
|
476 |
+
_(aten, affine_grid_generator) \
|
477 |
+
_(aten, affine_grid_generator_backward) \
|
478 |
+
_(aten, alias) \
|
479 |
+
_(aten, alias_copy) \
|
480 |
+
_(aten, align_as) \
|
481 |
+
_(aten, align_tensors) \
|
482 |
+
_(aten, align_to) \
|
483 |
+
_(aten, all) \
|
484 |
+
_(aten, allclose) \
|
485 |
+
_(aten, alpha_dropout) \
|
486 |
+
_(aten, alpha_dropout_) \
|
487 |
+
_(aten, amax) \
|
488 |
+
_(aten, amin) \
|
489 |
+
_(aten, aminmax) \
|
490 |
+
_(aten, angle) \
|
491 |
+
_(aten, any) \
|
492 |
+
_(aten, arange) \
|
493 |
+
_(aten, arccos) \
|
494 |
+
_(aten, arccos_) \
|
495 |
+
_(aten, arccosh) \
|
496 |
+
_(aten, arccosh_) \
|
497 |
+
_(aten, arcsin) \
|
498 |
+
_(aten, arcsin_) \
|
499 |
+
_(aten, arcsinh) \
|
500 |
+
_(aten, arcsinh_) \
|
501 |
+
_(aten, arctan) \
|
502 |
+
_(aten, arctan2) \
|
503 |
+
_(aten, arctan2_) \
|
504 |
+
_(aten, arctan_) \
|
505 |
+
_(aten, arctanh) \
|
506 |
+
_(aten, arctanh_) \
|
507 |
+
_(aten, argmax) \
|
508 |
+
_(aten, argmin) \
|
509 |
+
_(aten, argsort) \
|
510 |
+
_(aten, argwhere) \
|
511 |
+
_(aten, as_strided) \
|
512 |
+
_(aten, as_strided_) \
|
513 |
+
_(aten, as_strided_copy) \
|
514 |
+
_(aten, as_strided_scatter) \
|
515 |
+
_(aten, asin) \
|
516 |
+
_(aten, asin_) \
|
517 |
+
_(aten, asinh) \
|
518 |
+
_(aten, asinh_) \
|
519 |
+
_(aten, atan) \
|
520 |
+
_(aten, atan2) \
|
521 |
+
_(aten, atan2_) \
|
522 |
+
_(aten, atan_) \
|
523 |
+
_(aten, atanh) \
|
524 |
+
_(aten, atanh_) \
|
525 |
+
_(aten, atleast_1d) \
|
526 |
+
_(aten, atleast_2d) \
|
527 |
+
_(aten, atleast_3d) \
|
528 |
+
_(aten, avg_pool1d) \
|
529 |
+
_(aten, avg_pool2d) \
|
530 |
+
_(aten, avg_pool2d_backward) \
|
531 |
+
_(aten, avg_pool3d) \
|
532 |
+
_(aten, avg_pool3d_backward) \
|
533 |
+
_(aten, baddbmm) \
|
534 |
+
_(aten, baddbmm_) \
|
535 |
+
_(aten, bartlett_window) \
|
536 |
+
_(aten, batch_norm) \
|
537 |
+
_(aten, batch_norm_backward_elemt) \
|
538 |
+
_(aten, batch_norm_backward_reduce) \
|
539 |
+
_(aten, batch_norm_elemt) \
|
540 |
+
_(aten, batch_norm_gather_stats) \
|
541 |
+
_(aten, batch_norm_gather_stats_with_counts) \
|
542 |
+
_(aten, batch_norm_stats) \
|
543 |
+
_(aten, batch_norm_update_stats) \
|
544 |
+
_(aten, bernoulli) \
|
545 |
+
_(aten, bernoulli_) \
|
546 |
+
_(aten, bilinear) \
|
547 |
+
_(aten, binary_cross_entropy) \
|
548 |
+
_(aten, binary_cross_entropy_backward) \
|
549 |
+
_(aten, binary_cross_entropy_with_logits) \
|
550 |
+
_(aten, bincount) \
|
551 |
+
_(aten, binomial) \
|
552 |
+
_(aten, bitwise_and) \
|
553 |
+
_(aten, bitwise_and_) \
|
554 |
+
_(aten, bitwise_left_shift) \
|
555 |
+
_(aten, bitwise_left_shift_) \
|
556 |
+
_(aten, bitwise_not) \
|
557 |
+
_(aten, bitwise_not_) \
|
558 |
+
_(aten, bitwise_or) \
|
559 |
+
_(aten, bitwise_or_) \
|
560 |
+
_(aten, bitwise_right_shift) \
|
561 |
+
_(aten, bitwise_right_shift_) \
|
562 |
+
_(aten, bitwise_xor) \
|
563 |
+
_(aten, bitwise_xor_) \
|
564 |
+
_(aten, blackman_window) \
|
565 |
+
_(aten, block_diag) \
|
566 |
+
_(aten, bmm) \
|
567 |
+
_(aten, broadcast_tensors) \
|
568 |
+
_(aten, broadcast_to) \
|
569 |
+
_(aten, bucketize) \
|
570 |
+
_(aten, can_cast) \
|
571 |
+
_(aten, cartesian_prod) \
|
572 |
+
_(aten, cat) \
|
573 |
+
_(aten, cauchy) \
|
574 |
+
_(aten, cauchy_) \
|
575 |
+
_(aten, ccol_indices) \
|
576 |
+
_(aten, ccol_indices_copy) \
|
577 |
+
_(aten, cdist) \
|
578 |
+
_(aten, ceil) \
|
579 |
+
_(aten, ceil_) \
|
580 |
+
_(aten, celu) \
|
581 |
+
_(aten, celu_) \
|
582 |
+
_(aten, chain_matmul) \
|
583 |
+
_(aten, chalf) \
|
584 |
+
_(aten, channel_shuffle) \
|
585 |
+
_(aten, cholesky) \
|
586 |
+
_(aten, cholesky_inverse) \
|
587 |
+
_(aten, cholesky_solve) \
|
588 |
+
_(aten, choose_qparams_optimized) \
|
589 |
+
_(aten, chunk) \
|
590 |
+
_(aten, clamp) \
|
591 |
+
_(aten, clamp_) \
|
592 |
+
_(aten, clamp_max) \
|
593 |
+
_(aten, clamp_max_) \
|
594 |
+
_(aten, clamp_min) \
|
595 |
+
_(aten, clamp_min_) \
|
596 |
+
_(aten, clip) \
|
597 |
+
_(aten, clip_) \
|
598 |
+
_(aten, clone) \
|
599 |
+
_(aten, coalesce) \
|
600 |
+
_(aten, col2im) \
|
601 |
+
_(aten, col_indices) \
|
602 |
+
_(aten, col_indices_copy) \
|
603 |
+
_(aten, column_stack) \
|
604 |
+
_(aten, combinations) \
|
605 |
+
_(aten, complex) \
|
606 |
+
_(aten, concat) \
|
607 |
+
_(aten, concatenate) \
|
608 |
+
_(aten, conj) \
|
609 |
+
_(aten, conj_physical) \
|
610 |
+
_(aten, conj_physical_) \
|
611 |
+
_(aten, constant_pad_nd) \
|
612 |
+
_(aten, contiguous) \
|
613 |
+
_(aten, conv1d) \
|
614 |
+
_(aten, conv2d) \
|
615 |
+
_(aten, conv3d) \
|
616 |
+
_(aten, conv_depthwise3d) \
|
617 |
+
_(aten, conv_tbc) \
|
618 |
+
_(aten, conv_tbc_backward) \
|
619 |
+
_(aten, conv_transpose1d) \
|
620 |
+
_(aten, conv_transpose2d) \
|
621 |
+
_(aten, conv_transpose3d) \
|
622 |
+
_(aten, convolution) \
|
623 |
+
_(aten, convolution_backward) \
|
624 |
+
_(aten, convolution_backward_overrideable) \
|
625 |
+
_(aten, convolution_overrideable) \
|
626 |
+
_(aten, copy) \
|
627 |
+
_(aten, copy_) \
|
628 |
+
_(aten, copy_sparse_to_sparse) \
|
629 |
+
_(aten, copy_sparse_to_sparse_) \
|
630 |
+
_(aten, copysign) \
|
631 |
+
_(aten, copysign_) \
|
632 |
+
_(aten, corrcoef) \
|
633 |
+
_(aten, cos) \
|
634 |
+
_(aten, cos_) \
|
635 |
+
_(aten, cosh) \
|
636 |
+
_(aten, cosh_) \
|
637 |
+
_(aten, cosine_embedding_loss) \
|
638 |
+
_(aten, cosine_similarity) \
|
639 |
+
_(aten, count_nonzero) \
|
640 |
+
_(aten, cov) \
|
641 |
+
_(aten, cross) \
|
642 |
+
_(aten, cross_entropy_loss) \
|
643 |
+
_(aten, crow_indices) \
|
644 |
+
_(aten, crow_indices_copy) \
|
645 |
+
_(aten, ctc_loss) \
|
646 |
+
_(aten, cudnn_affine_grid_generator) \
|
647 |
+
_(aten, cudnn_affine_grid_generator_backward) \
|
648 |
+
_(aten, cudnn_batch_norm) \
|
649 |
+
_(aten, cudnn_batch_norm_backward) \
|
650 |
+
_(aten, cudnn_convolution) \
|
651 |
+
_(aten, cudnn_convolution_add_relu) \
|
652 |
+
_(aten, cudnn_convolution_relu) \
|
653 |
+
_(aten, cudnn_convolution_transpose) \
|
654 |
+
_(aten, cudnn_grid_sampler) \
|
655 |
+
_(aten, cudnn_grid_sampler_backward) \
|
656 |
+
_(aten, cudnn_is_acceptable) \
|
657 |
+
_(aten, cummax) \
|
658 |
+
_(aten, cummaxmin_backward) \
|
659 |
+
_(aten, cummin) \
|
660 |
+
_(aten, cumprod) \
|
661 |
+
_(aten, cumprod_) \
|
662 |
+
_(aten, cumprod_backward) \
|
663 |
+
_(aten, cumsum) \
|
664 |
+
_(aten, cumsum_) \
|
665 |
+
_(aten, cumulative_trapezoid) \
|
666 |
+
_(aten, data) \
|
667 |
+
_(aten, deg2rad) \
|
668 |
+
_(aten, deg2rad_) \
|
669 |
+
_(aten, dense_dim) \
|
670 |
+
_(aten, dequantize) \
|
671 |
+
_(aten, det) \
|
672 |
+
_(aten, detach) \
|
673 |
+
_(aten, detach_) \
|
674 |
+
_(aten, detach_copy) \
|
675 |
+
_(aten, diag) \
|
676 |
+
_(aten, diag_embed) \
|
677 |
+
_(aten, diagflat) \
|
678 |
+
_(aten, diagonal) \
|
679 |
+
_(aten, diagonal_backward) \
|
680 |
+
_(aten, diagonal_copy) \
|
681 |
+
_(aten, diagonal_scatter) \
|
682 |
+
_(aten, diff) \
|
683 |
+
_(aten, digamma) \
|
684 |
+
_(aten, digamma_) \
|
685 |
+
_(aten, dist) \
|
686 |
+
_(aten, div) \
|
687 |
+
_(aten, div_) \
|
688 |
+
_(aten, divide) \
|
689 |
+
_(aten, divide_) \
|
690 |
+
_(aten, dot) \
|
691 |
+
_(aten, dropout) \
|
692 |
+
_(aten, dropout_) \
|
693 |
+
_(aten, dsplit) \
|
694 |
+
_(aten, dstack) \
|
695 |
+
_(aten, einsum) \
|
696 |
+
_(aten, elu) \
|
697 |
+
_(aten, elu_) \
|
698 |
+
_(aten, elu_backward) \
|
699 |
+
_(aten, embedding) \
|
700 |
+
_(aten, embedding_backward) \
|
701 |
+
_(aten, embedding_bag) \
|
702 |
+
_(aten, embedding_dense_backward) \
|
703 |
+
_(aten, embedding_renorm) \
|
704 |
+
_(aten, embedding_renorm_) \
|
705 |
+
_(aten, embedding_sparse_backward) \
|
706 |
+
_(aten, empty) \
|
707 |
+
_(aten, empty_like) \
|
708 |
+
_(aten, empty_permuted) \
|
709 |
+
_(aten, empty_quantized) \
|
710 |
+
_(aten, empty_strided) \
|
711 |
+
_(aten, eq) \
|
712 |
+
_(aten, eq_) \
|
713 |
+
_(aten, equal) \
|
714 |
+
_(aten, erf) \
|
715 |
+
_(aten, erf_) \
|
716 |
+
_(aten, erfc) \
|
717 |
+
_(aten, erfc_) \
|
718 |
+
_(aten, erfinv) \
|
719 |
+
_(aten, erfinv_) \
|
720 |
+
_(aten, exp) \
|
721 |
+
_(aten, exp2) \
|
722 |
+
_(aten, exp2_) \
|
723 |
+
_(aten, exp_) \
|
724 |
+
_(aten, expand) \
|
725 |
+
_(aten, expand_as) \
|
726 |
+
_(aten, expand_copy) \
|
727 |
+
_(aten, expm1) \
|
728 |
+
_(aten, expm1_) \
|
729 |
+
_(aten, exponential) \
|
730 |
+
_(aten, exponential_) \
|
731 |
+
_(aten, eye) \
|
732 |
+
_(aten, fake_quantize_per_channel_affine) \
|
733 |
+
_(aten, fake_quantize_per_channel_affine_cachemask) \
|
734 |
+
_(aten, fake_quantize_per_channel_affine_cachemask_backward) \
|
735 |
+
_(aten, fake_quantize_per_tensor_affine) \
|
736 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask) \
|
737 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask_backward) \
|
738 |
+
_(aten, fbgemm_linear_fp16_weight) \
|
739 |
+
_(aten, fbgemm_linear_fp16_weight_fp32_activation) \
|
740 |
+
_(aten, fbgemm_linear_int8_weight) \
|
741 |
+
_(aten, fbgemm_linear_int8_weight_fp32_activation) \
|
742 |
+
_(aten, fbgemm_linear_quantize_weight) \
|
743 |
+
_(aten, fbgemm_pack_gemm_matrix_fp16) \
|
744 |
+
_(aten, fbgemm_pack_quantized_matrix) \
|
745 |
+
_(aten, feature_alpha_dropout) \
|
746 |
+
_(aten, feature_alpha_dropout_) \
|
747 |
+
_(aten, feature_dropout) \
|
748 |
+
_(aten, feature_dropout_) \
|
749 |
+
_(aten, fft_fft) \
|
750 |
+
_(aten, fft_fft2) \
|
751 |
+
_(aten, fft_fftfreq) \
|
752 |
+
_(aten, fft_fftn) \
|
753 |
+
_(aten, fft_fftshift) \
|
754 |
+
_(aten, fft_hfft) \
|
755 |
+
_(aten, fft_hfft2) \
|
756 |
+
_(aten, fft_hfftn) \
|
757 |
+
_(aten, fft_ifft) \
|
758 |
+
_(aten, fft_ifft2) \
|
759 |
+
_(aten, fft_ifftn) \
|
760 |
+
_(aten, fft_ifftshift) \
|
761 |
+
_(aten, fft_ihfft) \
|
762 |
+
_(aten, fft_ihfft2) \
|
763 |
+
_(aten, fft_ihfftn) \
|
764 |
+
_(aten, fft_irfft) \
|
765 |
+
_(aten, fft_irfft2) \
|
766 |
+
_(aten, fft_irfftn) \
|
767 |
+
_(aten, fft_rfft) \
|
768 |
+
_(aten, fft_rfft2) \
|
769 |
+
_(aten, fft_rfftfreq) \
|
770 |
+
_(aten, fft_rfftn) \
|
771 |
+
_(aten, fill) \
|
772 |
+
_(aten, fill_) \
|
773 |
+
_(aten, fill_diagonal) \
|
774 |
+
_(aten, fill_diagonal_) \
|
775 |
+
_(aten, fix) \
|
776 |
+
_(aten, fix_) \
|
777 |
+
_(aten, flatten) \
|
778 |
+
_(aten, flatten_dense_tensors) \
|
779 |
+
_(aten, flip) \
|
780 |
+
_(aten, fliplr) \
|
781 |
+
_(aten, flipud) \
|
782 |
+
_(aten, float_power) \
|
783 |
+
_(aten, float_power_) \
|
784 |
+
_(aten, floor) \
|
785 |
+
_(aten, floor_) \
|
786 |
+
_(aten, floor_divide) \
|
787 |
+
_(aten, floor_divide_) \
|
788 |
+
_(aten, fmax) \
|
789 |
+
_(aten, fmin) \
|
790 |
+
_(aten, fmod) \
|
791 |
+
_(aten, fmod_) \
|
792 |
+
_(aten, frac) \
|
793 |
+
_(aten, frac_) \
|
794 |
+
_(aten, fractional_max_pool2d) \
|
795 |
+
_(aten, fractional_max_pool2d_backward) \
|
796 |
+
_(aten, fractional_max_pool3d) \
|
797 |
+
_(aten, fractional_max_pool3d_backward) \
|
798 |
+
_(aten, frexp) \
|
799 |
+
_(aten, frobenius_norm) \
|
800 |
+
_(aten, from_file) \
|
801 |
+
_(aten, full) \
|
802 |
+
_(aten, full_like) \
|
803 |
+
_(aten, fused_moving_avg_obs_fake_quant) \
|
804 |
+
_(aten, gather) \
|
805 |
+
_(aten, gather_backward) \
|
806 |
+
_(aten, gcd) \
|
807 |
+
_(aten, gcd_) \
|
808 |
+
_(aten, ge) \
|
809 |
+
_(aten, ge_) \
|
810 |
+
_(aten, gelu) \
|
811 |
+
_(aten, gelu_) \
|
812 |
+
_(aten, gelu_backward) \
|
813 |
+
_(aten, geometric) \
|
814 |
+
_(aten, geometric_) \
|
815 |
+
_(aten, geqrf) \
|
816 |
+
_(aten, ger) \
|
817 |
+
_(aten, glu) \
|
818 |
+
_(aten, glu_backward) \
|
819 |
+
_(aten, glu_backward_jvp) \
|
820 |
+
_(aten, glu_jvp) \
|
821 |
+
_(aten, gradient) \
|
822 |
+
_(aten, greater) \
|
823 |
+
_(aten, greater_) \
|
824 |
+
_(aten, greater_equal) \
|
825 |
+
_(aten, greater_equal_) \
|
826 |
+
_(aten, grid_sampler) \
|
827 |
+
_(aten, grid_sampler_2d) \
|
828 |
+
_(aten, grid_sampler_2d_backward) \
|
829 |
+
_(aten, grid_sampler_3d) \
|
830 |
+
_(aten, grid_sampler_3d_backward) \
|
831 |
+
_(aten, group_norm) \
|
832 |
+
_(aten, gru) \
|
833 |
+
_(aten, gru_cell) \
|
834 |
+
_(aten, gt) \
|
835 |
+
_(aten, gt_) \
|
836 |
+
_(aten, hamming_window) \
|
837 |
+
_(aten, hann_window) \
|
838 |
+
_(aten, hardshrink) \
|
839 |
+
_(aten, hardshrink_backward) \
|
840 |
+
_(aten, hardsigmoid) \
|
841 |
+
_(aten, hardsigmoid_) \
|
842 |
+
_(aten, hardsigmoid_backward) \
|
843 |
+
_(aten, hardswish) \
|
844 |
+
_(aten, hardswish_) \
|
845 |
+
_(aten, hardswish_backward) \
|
846 |
+
_(aten, hardtanh) \
|
847 |
+
_(aten, hardtanh_) \
|
848 |
+
_(aten, hardtanh_backward) \
|
849 |
+
_(aten, heaviside) \
|
850 |
+
_(aten, heaviside_) \
|
851 |
+
_(aten, hinge_embedding_loss) \
|
852 |
+
_(aten, histc) \
|
853 |
+
_(aten, histogram) \
|
854 |
+
_(aten, histogramdd) \
|
855 |
+
_(aten, hsplit) \
|
856 |
+
_(aten, hspmm) \
|
857 |
+
_(aten, hstack) \
|
858 |
+
_(aten, huber_loss) \
|
859 |
+
_(aten, huber_loss_backward) \
|
860 |
+
_(aten, hypot) \
|
861 |
+
_(aten, hypot_) \
|
862 |
+
_(aten, i0) \
|
863 |
+
_(aten, i0_) \
|
864 |
+
_(aten, igamma) \
|
865 |
+
_(aten, igamma_) \
|
866 |
+
_(aten, igammac) \
|
867 |
+
_(aten, igammac_) \
|
868 |
+
_(aten, im2col) \
|
869 |
+
_(aten, imag) \
|
870 |
+
_(aten, index) \
|
871 |
+
_(aten, index_add) \
|
872 |
+
_(aten, index_add_) \
|
873 |
+
_(aten, index_copy) \
|
874 |
+
_(aten, index_copy_) \
|
875 |
+
_(aten, index_fill) \
|
876 |
+
_(aten, index_fill_) \
|
877 |
+
_(aten, index_put) \
|
878 |
+
_(aten, index_put_) \
|
879 |
+
_(aten, index_reduce) \
|
880 |
+
_(aten, index_reduce_) \
|
881 |
+
_(aten, index_select) \
|
882 |
+
_(aten, index_select_backward) \
|
883 |
+
_(aten, indices) \
|
884 |
+
_(aten, indices_copy) \
|
885 |
+
_(aten, infinitely_differentiable_gelu_backward) \
|
886 |
+
_(aten, inner) \
|
887 |
+
_(aten, instance_norm) \
|
888 |
+
_(aten, int_repr) \
|
889 |
+
_(aten, inverse) \
|
890 |
+
_(aten, is_coalesced) \
|
891 |
+
_(aten, is_complex) \
|
892 |
+
_(aten, is_conj) \
|
893 |
+
_(aten, is_distributed) \
|
894 |
+
_(aten, is_floating_point) \
|
895 |
+
_(aten, is_inference) \
|
896 |
+
_(aten, is_leaf) \
|
897 |
+
_(aten, is_neg) \
|
898 |
+
_(aten, is_nonzero) \
|
899 |
+
_(aten, is_pinned) \
|
900 |
+
_(aten, is_same_size) \
|
901 |
+
_(aten, is_set_to) \
|
902 |
+
_(aten, is_signed) \
|
903 |
+
_(aten, is_vulkan_available) \
|
904 |
+
_(aten, isclose) \
|
905 |
+
_(aten, isfinite) \
|
906 |
+
_(aten, isin) \
|
907 |
+
_(aten, isinf) \
|
908 |
+
_(aten, isnan) \
|
909 |
+
_(aten, isneginf) \
|
910 |
+
_(aten, isposinf) \
|
911 |
+
_(aten, isreal) \
|
912 |
+
_(aten, istft) \
|
913 |
+
_(aten, item) \
|
914 |
+
_(aten, kaiser_window) \
|
915 |
+
_(aten, kl_div) \
|
916 |
+
_(aten, kron) \
|
917 |
+
_(aten, kthvalue) \
|
918 |
+
_(aten, l1_loss) \
|
919 |
+
_(aten, layer_norm) \
|
920 |
+
_(aten, lcm) \
|
921 |
+
_(aten, lcm_) \
|
922 |
+
_(aten, ldexp) \
|
923 |
+
_(aten, ldexp_) \
|
924 |
+
_(aten, le) \
|
925 |
+
_(aten, le_) \
|
926 |
+
_(aten, leaky_relu) \
|
927 |
+
_(aten, leaky_relu_) \
|
928 |
+
_(aten, leaky_relu_backward) \
|
929 |
+
_(aten, lerp) \
|
930 |
+
_(aten, lerp_) \
|
931 |
+
_(aten, less) \
|
932 |
+
_(aten, less_) \
|
933 |
+
_(aten, less_equal) \
|
934 |
+
_(aten, less_equal_) \
|
935 |
+
_(aten, lgamma) \
|
936 |
+
_(aten, lgamma_) \
|
937 |
+
_(aten, lift) \
|
938 |
+
_(aten, lift_fresh) \
|
939 |
+
_(aten, lift_fresh_copy) \
|
940 |
+
_(aten, linalg_cholesky) \
|
941 |
+
_(aten, linalg_cholesky_ex) \
|
942 |
+
_(aten, linalg_cond) \
|
943 |
+
_(aten, linalg_cross) \
|
944 |
+
_(aten, linalg_det) \
|
945 |
+
_(aten, linalg_diagonal) \
|
946 |
+
_(aten, linalg_eig) \
|
947 |
+
_(aten, linalg_eigh) \
|
948 |
+
_(aten, linalg_eigvals) \
|
949 |
+
_(aten, linalg_eigvalsh) \
|
950 |
+
_(aten, linalg_householder_product) \
|
951 |
+
_(aten, linalg_inv) \
|
952 |
+
_(aten, linalg_inv_ex) \
|
953 |
+
_(aten, linalg_ldl_factor) \
|
954 |
+
_(aten, linalg_ldl_factor_ex) \
|
955 |
+
_(aten, linalg_ldl_solve) \
|
956 |
+
_(aten, linalg_lstsq) \
|
957 |
+
_(aten, linalg_lu) \
|
958 |
+
_(aten, linalg_lu_factor) \
|
959 |
+
_(aten, linalg_lu_factor_ex) \
|
960 |
+
_(aten, linalg_lu_solve) \
|
961 |
+
_(aten, linalg_matmul) \
|
962 |
+
_(aten, linalg_matrix_exp) \
|
963 |
+
_(aten, linalg_matrix_norm) \
|
964 |
+
_(aten, linalg_matrix_power) \
|
965 |
+
_(aten, linalg_matrix_rank) \
|
966 |
+
_(aten, linalg_multi_dot) \
|
967 |
+
_(aten, linalg_norm) \
|
968 |
+
_(aten, linalg_pinv) \
|
969 |
+
_(aten, linalg_qr) \
|
970 |
+
_(aten, linalg_slogdet) \
|
971 |
+
_(aten, linalg_solve) \
|
972 |
+
_(aten, linalg_solve_ex) \
|
973 |
+
_(aten, linalg_solve_triangular) \
|
974 |
+
_(aten, linalg_svd) \
|
975 |
+
_(aten, linalg_svdvals) \
|
976 |
+
_(aten, linalg_tensorinv) \
|
977 |
+
_(aten, linalg_tensorsolve) \
|
978 |
+
_(aten, linalg_vander) \
|
979 |
+
_(aten, linalg_vecdot) \
|
980 |
+
_(aten, linalg_vector_norm) \
|
981 |
+
_(aten, linear) \
|
982 |
+
_(aten, linear_backward) \
|
983 |
+
_(aten, linspace) \
|
984 |
+
_(aten, log) \
|
985 |
+
_(aten, log10) \
|
986 |
+
_(aten, log10_) \
|
987 |
+
_(aten, log1p) \
|
988 |
+
_(aten, log1p_) \
|
989 |
+
_(aten, log2) \
|
990 |
+
_(aten, log2_) \
|
991 |
+
_(aten, log_) \
|
992 |
+
_(aten, log_normal) \
|
993 |
+
_(aten, log_normal_) \
|
994 |
+
_(aten, log_sigmoid) \
|
995 |
+
_(aten, log_sigmoid_backward) \
|
996 |
+
_(aten, log_sigmoid_forward) \
|
997 |
+
_(aten, log_softmax) \
|
998 |
+
_(aten, logaddexp) \
|
999 |
+
_(aten, logaddexp2) \
|
1000 |
+
_(aten, logcumsumexp) \
|
1001 |
+
_(aten, logdet) \
|
1002 |
+
_(aten, logical_and) \
|
1003 |
+
_(aten, logical_and_) \
|
1004 |
+
_(aten, logical_not) \
|
1005 |
+
_(aten, logical_not_) \
|
1006 |
+
_(aten, logical_or) \
|
1007 |
+
_(aten, logical_or_) \
|
1008 |
+
_(aten, logical_xor) \
|
1009 |
+
_(aten, logical_xor_) \
|
1010 |
+
_(aten, logit) \
|
1011 |
+
_(aten, logit_) \
|
1012 |
+
_(aten, logit_backward) \
|
1013 |
+
_(aten, logspace) \
|
1014 |
+
_(aten, logsumexp) \
|
1015 |
+
_(aten, lshift) \
|
1016 |
+
_(aten, lstm) \
|
1017 |
+
_(aten, lstm_cell) \
|
1018 |
+
_(aten, lstm_mps_backward) \
|
1019 |
+
_(aten, lt) \
|
1020 |
+
_(aten, lt_) \
|
1021 |
+
_(aten, lu_solve) \
|
1022 |
+
_(aten, lu_unpack) \
|
1023 |
+
_(aten, mH) \
|
1024 |
+
_(aten, mT) \
|
1025 |
+
_(aten, margin_ranking_loss) \
|
1026 |
+
_(aten, masked_fill) \
|
1027 |
+
_(aten, masked_fill_) \
|
1028 |
+
_(aten, masked_scatter) \
|
1029 |
+
_(aten, masked_scatter_) \
|
1030 |
+
_(aten, masked_scatter_backward) \
|
1031 |
+
_(aten, masked_select) \
|
1032 |
+
_(aten, masked_select_backward) \
|
1033 |
+
_(aten, matmul) \
|
1034 |
+
_(aten, matmul_backward) \
|
1035 |
+
_(aten, matrix_H) \
|
1036 |
+
_(aten, matrix_exp) \
|
1037 |
+
_(aten, matrix_exp_backward) \
|
1038 |
+
_(aten, matrix_power) \
|
1039 |
+
_(aten, max) \
|
1040 |
+
_(aten, max_pool1d) \
|
1041 |
+
_(aten, max_pool1d_with_indices) \
|
1042 |
+
_(aten, max_pool2d) \
|
1043 |
+
_(aten, max_pool2d_backward) \
|
1044 |
+
_(aten, max_pool2d_with_indices) \
|
1045 |
+
_(aten, max_pool2d_with_indices_backward) \
|
1046 |
+
_(aten, max_pool3d) \
|
1047 |
+
_(aten, max_pool3d_with_indices) \
|
1048 |
+
_(aten, max_pool3d_with_indices_backward) \
|
1049 |
+
_(aten, max_unpool2d) \
|
1050 |
+
_(aten, max_unpool3d) \
|
1051 |
+
_(aten, maximum) \
|
1052 |
+
_(aten, mean) \
|
1053 |
+
_(aten, median) \
|
1054 |
+
_(aten, meshgrid) \
|
1055 |
+
_(aten, min) \
|
1056 |
+
_(aten, minimum) \
|
1057 |
+
_(aten, miopen_batch_norm) \
|
1058 |
+
_(aten, miopen_batch_norm_backward) \
|
1059 |
+
_(aten, miopen_convolution) \
|
1060 |
+
_(aten, miopen_convolution_add_relu) \
|
1061 |
+
_(aten, miopen_convolution_relu) \
|
1062 |
+
_(aten, miopen_convolution_transpose) \
|
1063 |
+
_(aten, miopen_depthwise_convolution) \
|
1064 |
+
_(aten, miopen_rnn) \
|
1065 |
+
_(aten, miopen_rnn_backward) \
|
1066 |
+
_(aten, mish) \
|
1067 |
+
_(aten, mish_) \
|
1068 |
+
_(aten, mish_backward) \
|
1069 |
+
_(aten, mkldnn_adaptive_avg_pool2d) \
|
1070 |
+
_(aten, mkldnn_adaptive_avg_pool2d_backward) \
|
1071 |
+
_(aten, mkldnn_convolution) \
|
1072 |
+
_(aten, mkldnn_linear) \
|
1073 |
+
_(aten, mkldnn_linear_backward) \
|
1074 |
+
_(aten, mkldnn_linear_backward_input) \
|
1075 |
+
_(aten, mkldnn_linear_backward_weights) \
|
1076 |
+
_(aten, mkldnn_max_pool2d) \
|
1077 |
+
_(aten, mkldnn_max_pool2d_backward) \
|
1078 |
+
_(aten, mkldnn_max_pool3d) \
|
1079 |
+
_(aten, mkldnn_max_pool3d_backward) \
|
1080 |
+
_(aten, mkldnn_reorder_conv2d_weight) \
|
1081 |
+
_(aten, mkldnn_reorder_conv3d_weight) \
|
1082 |
+
_(aten, mkldnn_rnn_layer) \
|
1083 |
+
_(aten, mkldnn_rnn_layer_backward) \
|
1084 |
+
_(aten, mm) \
|
1085 |
+
_(aten, mode) \
|
1086 |
+
_(aten, moveaxis) \
|
1087 |
+
_(aten, movedim) \
|
1088 |
+
_(aten, mps_convolution_backward) \
|
1089 |
+
_(aten, mps_convolution_transpose_backward) \
|
1090 |
+
_(aten, mse_loss) \
|
1091 |
+
_(aten, mse_loss_backward) \
|
1092 |
+
_(aten, msort) \
|
1093 |
+
_(aten, mul) \
|
1094 |
+
_(aten, mul_) \
|
1095 |
+
_(aten, multi_margin_loss) \
|
1096 |
+
_(aten, multi_margin_loss_backward) \
|
1097 |
+
_(aten, multilabel_margin_loss) \
|
1098 |
+
_(aten, multilabel_margin_loss_backward) \
|
1099 |
+
_(aten, multilabel_margin_loss_forward) \
|
1100 |
+
_(aten, multinomial) \
|
1101 |
+
_(aten, multiply) \
|
1102 |
+
_(aten, multiply_) \
|
1103 |
+
_(aten, mv) \
|
1104 |
+
_(aten, mvlgamma) \
|
1105 |
+
_(aten, mvlgamma_) \
|
1106 |
+
_(aten, nan_to_num) \
|
1107 |
+
_(aten, nan_to_num_) \
|
1108 |
+
_(aten, nanmean) \
|
1109 |
+
_(aten, nanmedian) \
|
1110 |
+
_(aten, nanquantile) \
|
1111 |
+
_(aten, nansum) \
|
1112 |
+
_(aten, narrow) \
|
1113 |
+
_(aten, narrow_copy) \
|
1114 |
+
_(aten, native_batch_norm) \
|
1115 |
+
_(aten, native_batch_norm_backward) \
|
1116 |
+
_(aten, native_channel_shuffle) \
|
1117 |
+
_(aten, native_dropout) \
|
1118 |
+
_(aten, native_dropout_backward) \
|
1119 |
+
_(aten, native_group_norm) \
|
1120 |
+
_(aten, native_group_norm_backward) \
|
1121 |
+
_(aten, native_layer_norm) \
|
1122 |
+
_(aten, native_layer_norm_backward) \
|
1123 |
+
_(aten, native_norm) \
|
1124 |
+
_(aten, ne) \
|
1125 |
+
_(aten, ne_) \
|
1126 |
+
_(aten, neg) \
|
1127 |
+
_(aten, neg_) \
|
1128 |
+
_(aten, negative) \
|
1129 |
+
_(aten, negative_) \
|
1130 |
+
_(aten, nested_to_padded_tensor) \
|
1131 |
+
_(aten, new_empty) \
|
1132 |
+
_(aten, new_empty_strided) \
|
1133 |
+
_(aten, new_full) \
|
1134 |
+
_(aten, new_ones) \
|
1135 |
+
_(aten, new_zeros) \
|
1136 |
+
_(aten, nextafter) \
|
1137 |
+
_(aten, nextafter_) \
|
1138 |
+
_(aten, nll_loss) \
|
1139 |
+
_(aten, nll_loss2d) \
|
1140 |
+
_(aten, nll_loss2d_backward) \
|
1141 |
+
_(aten, nll_loss2d_forward) \
|
1142 |
+
_(aten, nll_loss_backward) \
|
1143 |
+
_(aten, nll_loss_forward) \
|
1144 |
+
_(aten, nll_loss_nd) \
|
1145 |
+
_(aten, nonzero) \
|
1146 |
+
_(aten, nonzero_numpy) \
|
1147 |
+
_(aten, nonzero_static) \
|
1148 |
+
_(aten, norm) \
|
1149 |
+
_(aten, norm_except_dim) \
|
1150 |
+
_(aten, normal) \
|
1151 |
+
_(aten, normal_) \
|
1152 |
+
_(aten, normal_functional) \
|
1153 |
+
_(aten, not_equal) \
|
1154 |
+
_(aten, not_equal_) \
|
1155 |
+
_(aten, nuclear_norm) \
|
1156 |
+
_(aten, numpy_T) \
|
1157 |
+
_(aten, one_hot) \
|
1158 |
+
_(aten, ones) \
|
1159 |
+
_(aten, ones_like) \
|
1160 |
+
_(aten, orgqr) \
|
1161 |
+
_(aten, ormqr) \
|
1162 |
+
_(aten, outer) \
|
1163 |
+
_(aten, output_nr) \
|
1164 |
+
_(aten, pad) \
|
1165 |
+
_(aten, pad_sequence) \
|
1166 |
+
_(aten, pairwise_distance) \
|
1167 |
+
_(aten, pdist) \
|
1168 |
+
_(aten, permute) \
|
1169 |
+
_(aten, permute_copy) \
|
1170 |
+
_(aten, pin_memory) \
|
1171 |
+
_(aten, pinverse) \
|
1172 |
+
_(aten, pixel_shuffle) \
|
1173 |
+
_(aten, pixel_unshuffle) \
|
1174 |
+
_(aten, poisson) \
|
1175 |
+
_(aten, poisson_nll_loss) \
|
1176 |
+
_(aten, polar) \
|
1177 |
+
_(aten, polygamma) \
|
1178 |
+
_(aten, polygamma_) \
|
1179 |
+
_(aten, positive) \
|
1180 |
+
_(aten, pow) \
|
1181 |
+
_(aten, pow_) \
|
1182 |
+
_(aten, prelu) \
|
1183 |
+
_(aten, prod) \
|
1184 |
+
_(aten, promote_types) \
|
1185 |
+
_(aten, put) \
|
1186 |
+
_(aten, put_) \
|
1187 |
+
_(aten, q_per_channel_axis) \
|
1188 |
+
_(aten, q_per_channel_scales) \
|
1189 |
+
_(aten, q_per_channel_zero_points) \
|
1190 |
+
_(aten, q_scale) \
|
1191 |
+
_(aten, q_zero_point) \
|
1192 |
+
_(aten, qr) \
|
1193 |
+
_(aten, qscheme) \
|
1194 |
+
_(aten, quantile) \
|
1195 |
+
_(aten, quantize_per_channel) \
|
1196 |
+
_(aten, quantize_per_tensor) \
|
1197 |
+
_(aten, quantize_per_tensor_dynamic) \
|
1198 |
+
_(aten, quantized_batch_norm) \
|
1199 |
+
_(aten, quantized_gru_cell) \
|
1200 |
+
_(aten, quantized_lstm_cell) \
|
1201 |
+
_(aten, quantized_max_pool1d) \
|
1202 |
+
_(aten, quantized_max_pool2d) \
|
1203 |
+
_(aten, quantized_max_pool3d) \
|
1204 |
+
_(aten, quantized_rnn_relu_cell) \
|
1205 |
+
_(aten, quantized_rnn_tanh_cell) \
|
1206 |
+
_(aten, rad2deg) \
|
1207 |
+
_(aten, rad2deg_) \
|
1208 |
+
_(aten, rand) \
|
1209 |
+
_(aten, rand_like) \
|
1210 |
+
_(aten, randint) \
|
1211 |
+
_(aten, randint_like) \
|
1212 |
+
_(aten, randn) \
|
1213 |
+
_(aten, randn_like) \
|
1214 |
+
_(aten, random) \
|
1215 |
+
_(aten, random_) \
|
1216 |
+
_(aten, randperm) \
|
1217 |
+
_(aten, range) \
|
1218 |
+
_(aten, ravel) \
|
1219 |
+
_(aten, real) \
|
1220 |
+
_(aten, reciprocal) \
|
1221 |
+
_(aten, reciprocal_) \
|
1222 |
+
_(aten, record_stream) \
|
1223 |
+
_(aten, refine_names) \
|
1224 |
+
_(aten, reflection_pad1d) \
|
1225 |
+
_(aten, reflection_pad1d_backward) \
|
1226 |
+
_(aten, reflection_pad2d) \
|
1227 |
+
_(aten, reflection_pad2d_backward) \
|
1228 |
+
_(aten, reflection_pad3d) \
|
1229 |
+
_(aten, reflection_pad3d_backward) \
|
1230 |
+
_(aten, relu) \
|
1231 |
+
_(aten, relu6) \
|
1232 |
+
_(aten, relu6_) \
|
1233 |
+
_(aten, relu_) \
|
1234 |
+
_(aten, remainder) \
|
1235 |
+
_(aten, remainder_) \
|
1236 |
+
_(aten, rename) \
|
1237 |
+
_(aten, rename_) \
|
1238 |
+
_(aten, renorm) \
|
1239 |
+
_(aten, renorm_) \
|
1240 |
+
_(aten, repeat) \
|
1241 |
+
_(aten, repeat_interleave) \
|
1242 |
+
_(aten, replication_pad1d) \
|
1243 |
+
_(aten, replication_pad1d_backward) \
|
1244 |
+
_(aten, replication_pad2d) \
|
1245 |
+
_(aten, replication_pad2d_backward) \
|
1246 |
+
_(aten, replication_pad3d) \
|
1247 |
+
_(aten, replication_pad3d_backward) \
|
1248 |
+
_(aten, requires_grad) \
|
1249 |
+
_(aten, requires_grad_) \
|
1250 |
+
_(aten, reshape) \
|
1251 |
+
_(aten, reshape_as) \
|
1252 |
+
_(aten, resize) \
|
1253 |
+
_(aten, resize_) \
|
1254 |
+
_(aten, resize_as) \
|
1255 |
+
_(aten, resize_as_) \
|
1256 |
+
_(aten, resize_as_sparse) \
|
1257 |
+
_(aten, resize_as_sparse_) \
|
1258 |
+
_(aten, resolve_conj) \
|
1259 |
+
_(aten, resolve_neg) \
|
1260 |
+
_(aten, result_type) \
|
1261 |
+
_(aten, retain_grad) \
|
1262 |
+
_(aten, retains_grad) \
|
1263 |
+
_(aten, rnn_relu) \
|
1264 |
+
_(aten, rnn_relu_cell) \
|
1265 |
+
_(aten, rnn_tanh) \
|
1266 |
+
_(aten, rnn_tanh_cell) \
|
1267 |
+
_(aten, roll) \
|
1268 |
+
_(aten, rot90) \
|
1269 |
+
_(aten, round) \
|
1270 |
+
_(aten, round_) \
|
1271 |
+
_(aten, row_indices) \
|
1272 |
+
_(aten, row_indices_copy) \
|
1273 |
+
_(aten, row_stack) \
|
1274 |
+
_(aten, rrelu) \
|
1275 |
+
_(aten, rrelu_) \
|
1276 |
+
_(aten, rrelu_with_noise) \
|
1277 |
+
_(aten, rrelu_with_noise_) \
|
1278 |
+
_(aten, rrelu_with_noise_backward) \
|
1279 |
+
_(aten, rshift) \
|
1280 |
+
_(aten, rsqrt) \
|
1281 |
+
_(aten, rsqrt_) \
|
1282 |
+
_(aten, rsub) \
|
1283 |
+
_(aten, scalar_tensor) \
|
1284 |
+
_(aten, scaled_dot_product_attention) \
|
1285 |
+
_(aten, scatter) \
|
1286 |
+
_(aten, scatter_) \
|
1287 |
+
_(aten, scatter_add) \
|
1288 |
+
_(aten, scatter_add_) \
|
1289 |
+
_(aten, scatter_reduce) \
|
1290 |
+
_(aten, scatter_reduce_) \
|
1291 |
+
_(aten, searchsorted) \
|
1292 |
+
_(aten, segment_reduce) \
|
1293 |
+
_(aten, select) \
|
1294 |
+
_(aten, select_backward) \
|
1295 |
+
_(aten, select_copy) \
|
1296 |
+
_(aten, select_scatter) \
|
1297 |
+
_(aten, selu) \
|
1298 |
+
_(aten, selu_) \
|
1299 |
+
_(aten, set) \
|
1300 |
+
_(aten, set_) \
|
1301 |
+
_(aten, set_data) \
|
1302 |
+
_(aten, sgn) \
|
1303 |
+
_(aten, sgn_) \
|
1304 |
+
_(aten, sigmoid) \
|
1305 |
+
_(aten, sigmoid_) \
|
1306 |
+
_(aten, sigmoid_backward) \
|
1307 |
+
_(aten, sign) \
|
1308 |
+
_(aten, sign_) \
|
1309 |
+
_(aten, signbit) \
|
1310 |
+
_(aten, silu) \
|
1311 |
+
_(aten, silu_) \
|
1312 |
+
_(aten, silu_backward) \
|
1313 |
+
_(aten, sin) \
|
1314 |
+
_(aten, sin_) \
|
1315 |
+
_(aten, sinc) \
|
1316 |
+
_(aten, sinc_) \
|
1317 |
+
_(aten, sinh) \
|
1318 |
+
_(aten, sinh_) \
|
1319 |
+
_(aten, size) \
|
1320 |
+
_(aten, slice) \
|
1321 |
+
_(aten, slice_backward) \
|
1322 |
+
_(aten, slice_copy) \
|
1323 |
+
_(aten, slice_inverse) \
|
1324 |
+
_(aten, slice_scatter) \
|
1325 |
+
_(aten, slogdet) \
|
1326 |
+
_(aten, slow_conv3d) \
|
1327 |
+
_(aten, slow_conv3d_forward) \
|
1328 |
+
_(aten, slow_conv_dilated2d) \
|
1329 |
+
_(aten, slow_conv_dilated3d) \
|
1330 |
+
_(aten, slow_conv_transpose2d) \
|
1331 |
+
_(aten, slow_conv_transpose3d) \
|
1332 |
+
_(aten, smm) \
|
1333 |
+
_(aten, smooth_l1_loss) \
|
1334 |
+
_(aten, smooth_l1_loss_backward) \
|
1335 |
+
_(aten, soft_margin_loss) \
|
1336 |
+
_(aten, soft_margin_loss_backward) \
|
1337 |
+
_(aten, softmax) \
|
1338 |
+
_(aten, softplus) \
|
1339 |
+
_(aten, softplus_backward) \
|
1340 |
+
_(aten, softshrink) \
|
1341 |
+
_(aten, softshrink_backward) \
|
1342 |
+
_(aten, sort) \
|
1343 |
+
_(aten, sparse_bsc_tensor) \
|
1344 |
+
_(aten, sparse_bsr_tensor) \
|
1345 |
+
_(aten, sparse_compressed_tensor) \
|
1346 |
+
_(aten, sparse_coo_tensor) \
|
1347 |
+
_(aten, sparse_csc_tensor) \
|
1348 |
+
_(aten, sparse_csr_tensor) \
|
1349 |
+
_(aten, sparse_dim) \
|
1350 |
+
_(aten, sparse_mask) \
|
1351 |
+
_(aten, sparse_resize) \
|
1352 |
+
_(aten, sparse_resize_) \
|
1353 |
+
_(aten, sparse_resize_and_clear) \
|
1354 |
+
_(aten, sparse_resize_and_clear_) \
|
1355 |
+
_(aten, sparse_sampled_addmm) \
|
1356 |
+
_(aten, special_airy_ai) \
|
1357 |
+
_(aten, special_bessel_j0) \
|
1358 |
+
_(aten, special_bessel_j1) \
|
1359 |
+
_(aten, special_bessel_y0) \
|
1360 |
+
_(aten, special_bessel_y1) \
|
1361 |
+
_(aten, special_chebyshev_polynomial_t) \
|
1362 |
+
_(aten, special_chebyshev_polynomial_u) \
|
1363 |
+
_(aten, special_chebyshev_polynomial_v) \
|
1364 |
+
_(aten, special_chebyshev_polynomial_w) \
|
1365 |
+
_(aten, special_digamma) \
|
1366 |
+
_(aten, special_entr) \
|
1367 |
+
_(aten, special_erf) \
|
1368 |
+
_(aten, special_erfc) \
|
1369 |
+
_(aten, special_erfcx) \
|
1370 |
+
_(aten, special_erfinv) \
|
1371 |
+
_(aten, special_exp2) \
|
1372 |
+
_(aten, special_expit) \
|
1373 |
+
_(aten, special_expm1) \
|
1374 |
+
_(aten, special_gammainc) \
|
1375 |
+
_(aten, special_gammaincc) \
|
1376 |
+
_(aten, special_gammaln) \
|
1377 |
+
_(aten, special_hermite_polynomial_h) \
|
1378 |
+
_(aten, special_hermite_polynomial_he) \
|
1379 |
+
_(aten, special_i0) \
|
1380 |
+
_(aten, special_i0e) \
|
1381 |
+
_(aten, special_i1) \
|
1382 |
+
_(aten, special_i1e) \
|
1383 |
+
_(aten, special_laguerre_polynomial_l) \
|
1384 |
+
_(aten, special_legendre_polynomial_p) \
|
1385 |
+
_(aten, special_log1p) \
|
1386 |
+
_(aten, special_log_ndtr) \
|
1387 |
+
_(aten, special_log_softmax) \
|
1388 |
+
_(aten, special_logit) \
|
1389 |
+
_(aten, special_logsumexp) \
|
1390 |
+
_(aten, special_modified_bessel_i0) \
|
1391 |
+
_(aten, special_modified_bessel_i1) \
|
1392 |
+
_(aten, special_modified_bessel_k0) \
|
1393 |
+
_(aten, special_modified_bessel_k1) \
|
1394 |
+
_(aten, special_multigammaln) \
|
1395 |
+
_(aten, special_ndtr) \
|
1396 |
+
_(aten, special_ndtri) \
|
1397 |
+
_(aten, special_polygamma) \
|
1398 |
+
_(aten, special_psi) \
|
1399 |
+
_(aten, special_round) \
|
1400 |
+
_(aten, special_scaled_modified_bessel_k0) \
|
1401 |
+
_(aten, special_scaled_modified_bessel_k1) \
|
1402 |
+
_(aten, special_shifted_chebyshev_polynomial_t) \
|
1403 |
+
_(aten, special_shifted_chebyshev_polynomial_u) \
|
1404 |
+
_(aten, special_shifted_chebyshev_polynomial_v) \
|
1405 |
+
_(aten, special_shifted_chebyshev_polynomial_w) \
|
1406 |
+
_(aten, special_sinc) \
|
1407 |
+
_(aten, special_softmax) \
|
1408 |
+
_(aten, special_spherical_bessel_j0) \
|
1409 |
+
_(aten, special_xlog1py) \
|
1410 |
+
_(aten, special_xlogy) \
|
1411 |
+
_(aten, special_zeta) \
|
1412 |
+
_(aten, split) \
|
1413 |
+
_(aten, split_copy) \
|
1414 |
+
_(aten, split_with_sizes) \
|
1415 |
+
_(aten, split_with_sizes_copy) \
|
1416 |
+
_(aten, sqrt) \
|
1417 |
+
_(aten, sqrt_) \
|
1418 |
+
_(aten, square) \
|
1419 |
+
_(aten, square_) \
|
1420 |
+
_(aten, squeeze) \
|
1421 |
+
_(aten, squeeze_) \
|
1422 |
+
_(aten, squeeze_copy) \
|
1423 |
+
_(aten, sspaddmm) \
|
1424 |
+
_(aten, stack) \
|
1425 |
+
_(aten, std) \
|
1426 |
+
_(aten, std_mean) \
|
1427 |
+
_(aten, stft) \
|
1428 |
+
_(aten, stride) \
|
1429 |
+
_(aten, sub) \
|
1430 |
+
_(aten, sub_) \
|
1431 |
+
_(aten, subtract) \
|
1432 |
+
_(aten, subtract_) \
|
1433 |
+
_(aten, sum) \
|
1434 |
+
_(aten, sum_to_size) \
|
1435 |
+
_(aten, svd) \
|
1436 |
+
_(aten, swapaxes) \
|
1437 |
+
_(aten, swapaxes_) \
|
1438 |
+
_(aten, swapdims) \
|
1439 |
+
_(aten, swapdims_) \
|
1440 |
+
_(aten, sym_constrain_range) \
|
1441 |
+
_(aten, sym_constrain_range_for_size) \
|
1442 |
+
_(aten, sym_numel) \
|
1443 |
+
_(aten, sym_size) \
|
1444 |
+
_(aten, sym_storage_offset) \
|
1445 |
+
_(aten, sym_stride) \
|
1446 |
+
_(aten, t) \
|
1447 |
+
_(aten, t_) \
|
1448 |
+
_(aten, t_copy) \
|
1449 |
+
_(aten, take) \
|
1450 |
+
_(aten, take_along_dim) \
|
1451 |
+
_(aten, tan) \
|
1452 |
+
_(aten, tan_) \
|
1453 |
+
_(aten, tanh) \
|
1454 |
+
_(aten, tanh_) \
|
1455 |
+
_(aten, tanh_backward) \
|
1456 |
+
_(aten, tensor_split) \
|
1457 |
+
_(aten, tensordot) \
|
1458 |
+
_(aten, thnn_conv2d) \
|
1459 |
+
_(aten, threshold) \
|
1460 |
+
_(aten, threshold_) \
|
1461 |
+
_(aten, threshold_backward) \
|
1462 |
+
_(aten, tile) \
|
1463 |
+
_(aten, to) \
|
1464 |
+
_(aten, to_dense) \
|
1465 |
+
_(aten, to_dense_backward) \
|
1466 |
+
_(aten, to_mkldnn) \
|
1467 |
+
_(aten, to_mkldnn_backward) \
|
1468 |
+
_(aten, to_padded_tensor) \
|
1469 |
+
_(aten, to_sparse) \
|
1470 |
+
_(aten, to_sparse_bsc) \
|
1471 |
+
_(aten, to_sparse_bsr) \
|
1472 |
+
_(aten, to_sparse_csc) \
|
1473 |
+
_(aten, to_sparse_csr) \
|
1474 |
+
_(aten, topk) \
|
1475 |
+
_(aten, trace) \
|
1476 |
+
_(aten, trace_backward) \
|
1477 |
+
_(aten, transpose) \
|
1478 |
+
_(aten, transpose_) \
|
1479 |
+
_(aten, transpose_copy) \
|
1480 |
+
_(aten, trapezoid) \
|
1481 |
+
_(aten, trapz) \
|
1482 |
+
_(aten, triangular_solve) \
|
1483 |
+
_(aten, tril) \
|
1484 |
+
_(aten, tril_) \
|
1485 |
+
_(aten, tril_indices) \
|
1486 |
+
_(aten, triplet_margin_loss) \
|
1487 |
+
_(aten, triu) \
|
1488 |
+
_(aten, triu_) \
|
1489 |
+
_(aten, triu_indices) \
|
1490 |
+
_(aten, true_divide) \
|
1491 |
+
_(aten, true_divide_) \
|
1492 |
+
_(aten, trunc) \
|
1493 |
+
_(aten, trunc_) \
|
1494 |
+
_(aten, type_as) \
|
1495 |
+
_(aten, unbind) \
|
1496 |
+
_(aten, unbind_copy) \
|
1497 |
+
_(aten, unflatten) \
|
1498 |
+
_(aten, unflatten_dense_tensors) \
|
1499 |
+
_(aten, unfold) \
|
1500 |
+
_(aten, unfold_backward) \
|
1501 |
+
_(aten, unfold_copy) \
|
1502 |
+
_(aten, uniform) \
|
1503 |
+
_(aten, uniform_) \
|
1504 |
+
_(aten, unique_consecutive) \
|
1505 |
+
_(aten, unique_dim) \
|
1506 |
+
_(aten, unique_dim_consecutive) \
|
1507 |
+
_(aten, unsafe_chunk) \
|
1508 |
+
_(aten, unsafe_split) \
|
1509 |
+
_(aten, unsafe_split_with_sizes) \
|
1510 |
+
_(aten, unsqueeze) \
|
1511 |
+
_(aten, unsqueeze_) \
|
1512 |
+
_(aten, unsqueeze_copy) \
|
1513 |
+
_(aten, upsample_bicubic2d) \
|
1514 |
+
_(aten, upsample_bicubic2d_backward) \
|
1515 |
+
_(aten, upsample_bilinear2d) \
|
1516 |
+
_(aten, upsample_bilinear2d_backward) \
|
1517 |
+
_(aten, upsample_linear1d) \
|
1518 |
+
_(aten, upsample_linear1d_backward) \
|
1519 |
+
_(aten, upsample_nearest1d) \
|
1520 |
+
_(aten, upsample_nearest1d_backward) \
|
1521 |
+
_(aten, upsample_nearest2d) \
|
1522 |
+
_(aten, upsample_nearest2d_backward) \
|
1523 |
+
_(aten, upsample_nearest3d) \
|
1524 |
+
_(aten, upsample_nearest3d_backward) \
|
1525 |
+
_(aten, upsample_trilinear3d) \
|
1526 |
+
_(aten, upsample_trilinear3d_backward) \
|
1527 |
+
_(aten, value_selecting_reduction_backward) \
|
1528 |
+
_(aten, values) \
|
1529 |
+
_(aten, values_copy) \
|
1530 |
+
_(aten, vander) \
|
1531 |
+
_(aten, var) \
|
1532 |
+
_(aten, var_mean) \
|
1533 |
+
_(aten, vdot) \
|
1534 |
+
_(aten, view) \
|
1535 |
+
_(aten, view_as) \
|
1536 |
+
_(aten, view_as_complex) \
|
1537 |
+
_(aten, view_as_complex_copy) \
|
1538 |
+
_(aten, view_as_real) \
|
1539 |
+
_(aten, view_as_real_copy) \
|
1540 |
+
_(aten, view_copy) \
|
1541 |
+
_(aten, vsplit) \
|
1542 |
+
_(aten, vstack) \
|
1543 |
+
_(aten, where) \
|
1544 |
+
_(aten, xlogy) \
|
1545 |
+
_(aten, xlogy_) \
|
1546 |
+
_(aten, zero) \
|
1547 |
+
_(aten, zero_) \
|
1548 |
+
_(aten, zeros) \
|
1549 |
+
_(aten, zeros_like)
|
1550 |
+
|
1551 |
+
#define FORALL_ATTR_BASE_SYMBOLS(_) \
|
1552 |
+
_(attr, A) \
|
1553 |
+
_(attr, B) \
|
1554 |
+
_(attr, C) \
|
1555 |
+
_(attr, H) \
|
1556 |
+
_(attr, HxW) \
|
1557 |
+
_(attr, K) \
|
1558 |
+
_(attr, L) \
|
1559 |
+
_(attr, LD) \
|
1560 |
+
_(attr, LU) \
|
1561 |
+
_(attr, LU_data) \
|
1562 |
+
_(attr, LU_pivots) \
|
1563 |
+
_(attr, M) \
|
1564 |
+
_(attr, N) \
|
1565 |
+
_(attr, P) \
|
1566 |
+
_(attr, Q) \
|
1567 |
+
_(attr, R) \
|
1568 |
+
_(attr, S) \
|
1569 |
+
_(attr, U) \
|
1570 |
+
_(attr, UPLO) \
|
1571 |
+
_(attr, V) \
|
1572 |
+
_(attr, Vh) \
|
1573 |
+
_(attr, W) \
|
1574 |
+
_(attr, X) \
|
1575 |
+
_(attr, a) \
|
1576 |
+
_(attr, abs) \
|
1577 |
+
_(attr, accumulate) \
|
1578 |
+
_(attr, accumulate_matches) \
|
1579 |
+
_(attr, activation) \
|
1580 |
+
_(attr, addends) \
|
1581 |
+
_(attr, adjoint) \
|
1582 |
+
_(attr, alg_id) \
|
1583 |
+
_(attr, align_corners) \
|
1584 |
+
_(attr, allow_tf32) \
|
1585 |
+
_(attr, alpha) \
|
1586 |
+
_(attr, amsgrad) \
|
1587 |
+
_(attr, anchor) \
|
1588 |
+
_(attr, angle) \
|
1589 |
+
_(attr, any) \
|
1590 |
+
_(attr, api_name) \
|
1591 |
+
_(attr, append) \
|
1592 |
+
_(attr, approximate) \
|
1593 |
+
_(attr, arg1) \
|
1594 |
+
_(attr, arg2) \
|
1595 |
+
_(attr, arg3) \
|
1596 |
+
_(attr, arg_out) \
|
1597 |
+
_(attr, assert_msg) \
|
1598 |
+
_(attr, assume_unique) \
|
1599 |
+
_(attr, atol) \
|
1600 |
+
_(attr, attn_bias) \
|
1601 |
+
_(attr, attn_mask) \
|
1602 |
+
_(attr, average_attn_weights) \
|
1603 |
+
_(attr, averaging_const) \
|
1604 |
+
_(attr, aweights) \
|
1605 |
+
_(attr, axis) \
|
1606 |
+
_(attr, axis0) \
|
1607 |
+
_(attr, axis1) \
|
1608 |
+
_(attr, b) \
|
1609 |
+
_(attr, b_hh) \
|
1610 |
+
_(attr, b_ih) \
|
1611 |
+
_(attr, bag_size) \
|
1612 |
+
_(attr, base) \
|
1613 |
+
_(attr, batch1) \
|
1614 |
+
_(attr, batch2) \
|
1615 |
+
_(attr, batch_dim) \
|
1616 |
+
_(attr, batch_first) \
|
1617 |
+
_(attr, batch_size) \
|
1618 |
+
_(attr, batch_sizes) \
|
1619 |
+
_(attr, benchmark) \
|
1620 |
+
_(attr, beta) \
|
1621 |
+
_(attr, beta1) \
|
1622 |
+
_(attr, beta2) \
|
1623 |
+
_(attr, bias) \
|
1624 |
+
_(attr, bias_defined) \
|
1625 |
+
_(attr, bias_g) \
|
1626 |
+
_(attr, bias_requires_grad) \
|
1627 |
+
_(attr, bias_sizes) \
|
1628 |
+
_(attr, bidirectional) \
|
1629 |
+
_(attr, bin_edges) \
|
1630 |
+
_(attr, bins) \
|
1631 |
+
_(attr, bit_width) \
|
1632 |
+
_(attr, blank) \
|
1633 |
+
_(attr, blocksize) \
|
1634 |
+
_(attr, boundaries) \
|
1635 |
+
_(attr, buffer) \
|
1636 |
+
_(attr, causal_diagonal) \
|
1637 |
+
_(attr, ccol_indices) \
|
1638 |
+
_(attr, cdim) \
|
1639 |
+
_(attr, cdist) \
|
1640 |
+
_(attr, ceil_mode) \
|
1641 |
+
_(attr, cell_state_fwd) \
|
1642 |
+
_(attr, center) \
|
1643 |
+
_(attr, ch_axis) \
|
1644 |
+
_(attr, check_errors) \
|
1645 |
+
_(attr, chunks) \
|
1646 |
+
_(attr, coalesced) \
|
1647 |
+
_(attr, coefficients) \
|
1648 |
+
_(attr, col) \
|
1649 |
+
_(attr, col_indices) \
|
1650 |
+
_(attr, col_offsets) \
|
1651 |
+
_(attr, col_offsets_hh) \
|
1652 |
+
_(attr, col_offsets_ih) \
|
1653 |
+
_(attr, compressed_A) \
|
1654 |
+
_(attr, compressed_idx) \
|
1655 |
+
_(attr, compressed_indices) \
|
1656 |
+
_(attr, compressed_indices_dtype) \
|
1657 |
+
_(attr, compute_log_sumexp) \
|
1658 |
+
_(attr, compute_mode) \
|
1659 |
+
_(attr, compute_uv) \
|
1660 |
+
_(attr, compute_v) \
|
1661 |
+
_(attr, condition) \
|
1662 |
+
_(attr, copy) \
|
1663 |
+
_(attr, correction) \
|
1664 |
+
_(attr, count) \
|
1665 |
+
_(attr, count_include_pad) \
|
1666 |
+
_(attr, counts) \
|
1667 |
+
_(attr, cpu_dtype) \
|
1668 |
+
_(attr, cpu_enabled) \
|
1669 |
+
_(attr, cpu_nested_shape_example) \
|
1670 |
+
_(attr, create_graph) \
|
1671 |
+
_(attr, crow_indices) \
|
1672 |
+
_(attr, cu_seqlens_k) \
|
1673 |
+
_(attr, cu_seqlens_q) \
|
1674 |
+
_(attr, cuda_dtype) \
|
1675 |
+
_(attr, cuda_enabled) \
|
1676 |
+
_(attr, cudnn_enable) \
|
1677 |
+
_(attr, cudnn_enabled) \
|
1678 |
+
_(attr, cum_seq_k) \
|
1679 |
+
_(attr, cum_seq_q) \
|
1680 |
+
_(attr, custom_mask_type) \
|
1681 |
+
_(attr, cx) \
|
1682 |
+
_(attr, cx_) \
|
1683 |
+
_(attr, cx_tmp) \
|
1684 |
+
_(attr, cy) \
|
1685 |
+
_(attr, cy_) \
|
1686 |
+
_(attr, d) \
|
1687 |
+
_(attr, dampening) \
|
1688 |
+
_(attr, data) \
|
1689 |
+
_(attr, decimals) \
|
1690 |
+
_(attr, delta) \
|
1691 |
+
_(attr, dense) \
|
1692 |
+
_(attr, dense_B) \
|
1693 |
+
_(attr, dense_dim) \
|
1694 |
+
_(attr, density) \
|
1695 |
+
_(attr, dep_token) \
|
1696 |
+
_(attr, descending) \
|
1697 |
+
_(attr, destination) \
|
1698 |
+
_(attr, deterministic) \
|
1699 |
+
_(attr, device) \
|
1700 |
+
_(attr, device_index) \
|
1701 |
+
_(attr, dgrad_glu) \
|
1702 |
+
_(attr, diagonal) \
|
1703 |
+
_(attr, diagonals) \
|
1704 |
+
_(attr, dilation) \
|
1705 |
+
_(attr, dim) \
|
1706 |
+
_(attr, dim0) \
|
1707 |
+
_(attr, dim1) \
|
1708 |
+
_(attr, dim2) \
|
1709 |
+
_(attr, dimension) \
|
1710 |
+
_(attr, dims) \
|
1711 |
+
_(attr, dims_other) \
|
1712 |
+
_(attr, dims_self) \
|
1713 |
+
_(attr, divisor_override) \
|
1714 |
+
_(attr, downscale_factor) \
|
1715 |
+
_(attr, driver) \
|
1716 |
+
_(attr, dropout) \
|
1717 |
+
_(attr, dropout_mask) \
|
1718 |
+
_(attr, dropout_p) \
|
1719 |
+
_(attr, dropout_seed) \
|
1720 |
+
_(attr, dropout_state) \
|
1721 |
+
_(attr, dst) \
|
1722 |
+
_(attr, dtype) \
|
1723 |
+
_(attr, dual) \
|
1724 |
+
_(attr, dummy) \
|
1725 |
+
_(attr, dx) \
|
1726 |
+
_(attr, edge_order) \
|
1727 |
+
_(attr, eigenvalues) \
|
1728 |
+
_(attr, eigenvectors) \
|
1729 |
+
_(attr, eigvals) \
|
1730 |
+
_(attr, eigvecs) \
|
1731 |
+
_(attr, element) \
|
1732 |
+
_(attr, elements) \
|
1733 |
+
_(attr, ellipsis_idx) \
|
1734 |
+
_(attr, embed_dim) \
|
1735 |
+
_(attr, end) \
|
1736 |
+
_(attr, end_dim) \
|
1737 |
+
_(attr, eps) \
|
1738 |
+
_(attr, epsilon) \
|
1739 |
+
_(attr, equal_nan) \
|
1740 |
+
_(attr, equation) \
|
1741 |
+
_(attr, exp_avg_sqs) \
|
1742 |
+
_(attr, exp_avgs) \
|
1743 |
+
_(attr, expand1) \
|
1744 |
+
_(attr, expand2) \
|
1745 |
+
_(attr, expand3) \
|
1746 |
+
_(attr, exponent) \
|
1747 |
+
_(attr, exponential_average_factor) \
|
1748 |
+
_(attr, fake_quant_enabled) \
|
1749 |
+
_(attr, fake_quant_on) \
|
1750 |
+
_(attr, ffn_bias_1) \
|
1751 |
+
_(attr, ffn_bias_2) \
|
1752 |
+
_(attr, ffn_weight_1) \
|
1753 |
+
_(attr, ffn_weight_2) \
|
1754 |
+
_(attr, filename) \
|
1755 |
+
_(attr, fill_value) \
|
1756 |
+
_(attr, flat) \
|
1757 |
+
_(attr, forward) \
|
1758 |
+
_(attr, found_inf) \
|
1759 |
+
_(attr, from) \
|
1760 |
+
_(attr, full) \
|
1761 |
+
_(attr, full_matrices) \
|
1762 |
+
_(attr, fuse_transform_0213) \
|
1763 |
+
_(attr, fweights) \
|
1764 |
+
_(attr, g) \
|
1765 |
+
_(attr, gO) \
|
1766 |
+
_(attr, generator) \
|
1767 |
+
_(attr, ggI) \
|
1768 |
+
_(attr, ggW) \
|
1769 |
+
_(attr, ggb) \
|
1770 |
+
_(attr, glu) \
|
1771 |
+
_(attr, grad) \
|
1772 |
+
_(attr, grad_bias) \
|
1773 |
+
_(attr, grad_cy) \
|
1774 |
+
_(attr, grad_factor) \
|
1775 |
+
_(attr, grad_glu) \
|
1776 |
+
_(attr, grad_hy) \
|
1777 |
+
_(attr, grad_in) \
|
1778 |
+
_(attr, grad_input) \
|
1779 |
+
_(attr, grad_input_mask) \
|
1780 |
+
_(attr, grad_out) \
|
1781 |
+
_(attr, grad_out_) \
|
1782 |
+
_(attr, grad_output) \
|
1783 |
+
_(attr, grad_scale) \
|
1784 |
+
_(attr, grad_w) \
|
1785 |
+
_(attr, grad_weight) \
|
1786 |
+
_(attr, grad_x) \
|
1787 |
+
_(attr, grad_y) \
|
1788 |
+
_(attr, gradient) \
|
1789 |
+
_(attr, grads) \
|
1790 |
+
_(attr, grid) \
|
1791 |
+
_(attr, group) \
|
1792 |
+
_(attr, groups) \
|
1793 |
+
_(attr, growth_interval) \
|
1794 |
+
_(attr, growth_tracker) \
|
1795 |
+
_(attr, half_to_float) \
|
1796 |
+
_(attr, has_bias) \
|
1797 |
+
_(attr, has_biases) \
|
1798 |
+
_(attr, hermitian) \
|
1799 |
+
_(attr, hidden_bias) \
|
1800 |
+
_(attr, hidden_gates) \
|
1801 |
+
_(attr, hidden_size) \
|
1802 |
+
_(attr, high) \
|
1803 |
+
_(attr, hist) \
|
1804 |
+
_(attr, hop_length) \
|
1805 |
+
_(attr, hx) \
|
1806 |
+
_(attr, hx_) \
|
1807 |
+
_(attr, hy_) \
|
1808 |
+
_(attr, i1) \
|
1809 |
+
_(attr, i2) \
|
1810 |
+
_(attr, i3) \
|
1811 |
+
_(attr, ignore_index) \
|
1812 |
+
_(attr, imag) \
|
1813 |
+
_(attr, impl_index) \
|
1814 |
+
_(attr, implicit) \
|
1815 |
+
_(attr, include_last_offset) \
|
1816 |
+
_(attr, include_self) \
|
1817 |
+
_(attr, increasing) \
|
1818 |
+
_(attr, ind) \
|
1819 |
+
_(attr, index) \
|
1820 |
+
_(attr, indexing) \
|
1821 |
+
_(attr, indices) \
|
1822 |
+
_(attr, info) \
|
1823 |
+
_(attr, initial) \
|
1824 |
+
_(attr, innerKTiles) \
|
1825 |
+
_(attr, input) \
|
1826 |
+
_(attr, input1) \
|
1827 |
+
_(attr, input2) \
|
1828 |
+
_(attr, input3) \
|
1829 |
+
_(attr, input_bias) \
|
1830 |
+
_(attr, input_dtype) \
|
1831 |
+
_(attr, input_g) \
|
1832 |
+
_(attr, input_gates) \
|
1833 |
+
_(attr, input_lengths) \
|
1834 |
+
_(attr, input_scale) \
|
1835 |
+
_(attr, input_size) \
|
1836 |
+
_(attr, input_sizes) \
|
1837 |
+
_(attr, inputs) \
|
1838 |
+
_(attr, interpolation) \
|
1839 |
+
_(attr, interpolation_mode) \
|
1840 |
+
_(attr, inv_scale) \
|
1841 |
+
_(attr, inverse) \
|
1842 |
+
_(attr, invert) \
|
1843 |
+
_(attr, invstd) \
|
1844 |
+
_(attr, is_causal) \
|
1845 |
+
_(attr, is_coalesced) \
|
1846 |
+
_(attr, is_crow) \
|
1847 |
+
_(attr, is_first_step) \
|
1848 |
+
_(attr, is_matrix) \
|
1849 |
+
_(attr, is_result) \
|
1850 |
+
_(attr, is_target) \
|
1851 |
+
_(attr, k) \
|
1852 |
+
_(attr, keepdim) \
|
1853 |
+
_(attr, kernel_size) \
|
1854 |
+
_(attr, key) \
|
1855 |
+
_(attr, label_smoothing) \
|
1856 |
+
_(attr, lambd) \
|
1857 |
+
_(attr, largest) \
|
1858 |
+
_(attr, last_dim_size) \
|
1859 |
+
_(attr, layersOutputs) \
|
1860 |
+
_(attr, layout) \
|
1861 |
+
_(attr, left) \
|
1862 |
+
_(attr, length) \
|
1863 |
+
_(attr, lengths) \
|
1864 |
+
_(attr, level) \
|
1865 |
+
_(attr, like) \
|
1866 |
+
_(attr, list) \
|
1867 |
+
_(attr, log_alpha) \
|
1868 |
+
_(attr, log_input) \
|
1869 |
+
_(attr, log_probs) \
|
1870 |
+
_(attr, log_target) \
|
1871 |
+
_(attr, logabsdet) \
|
1872 |
+
_(attr, logsumexp) \
|
1873 |
+
_(attr, low) \
|
1874 |
+
_(attr, lower) \
|
1875 |
+
_(attr, lr) \
|
1876 |
+
_(attr, ltm) \
|
1877 |
+
_(attr, m) \
|
1878 |
+
_(attr, mantissa) \
|
1879 |
+
_(attr, margin) \
|
1880 |
+
_(attr, mask) \
|
1881 |
+
_(attr, mask_check) \
|
1882 |
+
_(attr, mask_type) \
|
1883 |
+
_(attr, masked_grad) \
|
1884 |
+
_(attr, mat) \
|
1885 |
+
_(attr, mat1) \
|
1886 |
+
_(attr, mat2) \
|
1887 |
+
_(attr, matrices) \
|
1888 |
+
_(attr, max) \
|
1889 |
+
_(attr, max_exp_avg_sqs) \
|
1890 |
+
_(attr, max_k) \
|
1891 |
+
_(attr, max_norm) \
|
1892 |
+
_(attr, max_q) \
|
1893 |
+
_(attr, max_seqlen_k) \
|
1894 |
+
_(attr, max_seqlen_q) \
|
1895 |
+
_(attr, max_size) \
|
1896 |
+
_(attr, max_val) \
|
1897 |
+
_(attr, max_values) \
|
1898 |
+
_(attr, maximize) \
|
1899 |
+
_(attr, maximum_indices) \
|
1900 |
+
_(attr, maxnorm) \
|
1901 |
+
_(attr, mean) \
|
1902 |
+
_(attr, median) \
|
1903 |
+
_(attr, memory_format) \
|
1904 |
+
_(attr, meta) \
|
1905 |
+
_(attr, min) \
|
1906 |
+
_(attr, min_indices) \
|
1907 |
+
_(attr, min_val) \
|
1908 |
+
_(attr, minlength) \
|
1909 |
+
_(attr, mode) \
|
1910 |
+
_(attr, momentum) \
|
1911 |
+
_(attr, momentum_buffer_list) \
|
1912 |
+
_(attr, n) \
|
1913 |
+
_(attr, n_bins) \
|
1914 |
+
_(attr, n_fft) \
|
1915 |
+
_(attr, names) \
|
1916 |
+
_(attr, nan) \
|
1917 |
+
_(attr, need_weights) \
|
1918 |
+
_(attr, neg_log_likelihood) \
|
1919 |
+
_(attr, negative) \
|
1920 |
+
_(attr, negative_slope) \
|
1921 |
+
_(attr, neginf) \
|
1922 |
+
_(attr, nested_size) \
|
1923 |
+
_(attr, nested_strides) \
|
1924 |
+
_(attr, nesterov) \
|
1925 |
+
_(attr, new_data) \
|
1926 |
+
_(attr, nnz) \
|
1927 |
+
_(attr, noise) \
|
1928 |
+
_(attr, non_blocking) \
|
1929 |
+
_(attr, norm) \
|
1930 |
+
_(attr, norm_bias_1) \
|
1931 |
+
_(attr, norm_bias_2) \
|
1932 |
+
_(attr, norm_first) \
|
1933 |
+
_(attr, norm_type) \
|
1934 |
+
_(attr, norm_weight_1) \
|
1935 |
+
_(attr, norm_weight_2) \
|
1936 |
+
_(attr, normalization) \
|
1937 |
+
_(attr, normalized) \
|
1938 |
+
_(attr, normalized_shape) \
|
1939 |
+
_(attr, nt_example) \
|
1940 |
+
_(attr, num_chunks) \
|
1941 |
+
_(attr, num_classes) \
|
1942 |
+
_(attr, num_generated) \
|
1943 |
+
_(attr, num_groups) \
|
1944 |
+
_(attr, num_head) \
|
1945 |
+
_(attr, num_heads) \
|
1946 |
+
_(attr, num_layers) \
|
1947 |
+
_(attr, num_parallel) \
|
1948 |
+
_(attr, num_samples) \
|
1949 |
+
_(attr, num_splits_key) \
|
1950 |
+
_(attr, num_weights) \
|
1951 |
+
_(attr, numel) \
|
1952 |
+
_(attr, observer_on) \
|
1953 |
+
_(attr, offset) \
|
1954 |
+
_(attr, offset2bag) \
|
1955 |
+
_(attr, offsets) \
|
1956 |
+
_(attr, onesided) \
|
1957 |
+
_(attr, ord) \
|
1958 |
+
_(attr, order) \
|
1959 |
+
_(attr, other) \
|
1960 |
+
_(attr, out) \
|
1961 |
+
_(attr, out0) \
|
1962 |
+
_(attr, out1) \
|
1963 |
+
_(attr, out2) \
|
1964 |
+
_(attr, out3) \
|
1965 |
+
_(attr, out4) \
|
1966 |
+
_(attr, out5) \
|
1967 |
+
_(attr, out6) \
|
1968 |
+
_(attr, out_amax) \
|
1969 |
+
_(attr, out_dim) \
|
1970 |
+
_(attr, out_dtype) \
|
1971 |
+
_(attr, out_int32) \
|
1972 |
+
_(attr, outdim) \
|
1973 |
+
_(attr, output) \
|
1974 |
+
_(attr, output_mask) \
|
1975 |
+
_(attr, output_padding) \
|
1976 |
+
_(attr, output_scale) \
|
1977 |
+
_(attr, output_size) \
|
1978 |
+
_(attr, output_zero_point) \
|
1979 |
+
_(attr, p) \
|
1980 |
+
_(attr, packed) \
|
1981 |
+
_(attr, packed_hh) \
|
1982 |
+
_(attr, packed_ih) \
|
1983 |
+
_(attr, packed_weight) \
|
1984 |
+
_(attr, pad) \
|
1985 |
+
_(attr, pad_mode) \
|
1986 |
+
_(attr, padded) \
|
1987 |
+
_(attr, padding) \
|
1988 |
+
_(attr, padding_idx) \
|
1989 |
+
_(attr, padding_mode) \
|
1990 |
+
_(attr, padding_value) \
|
1991 |
+
_(attr, params) \
|
1992 |
+
_(attr, path) \
|
1993 |
+
_(attr, pdist) \
|
1994 |
+
_(attr, per_row_fake_quant) \
|
1995 |
+
_(attr, per_sample_weights) \
|
1996 |
+
_(attr, periodic) \
|
1997 |
+
_(attr, philox_offset) \
|
1998 |
+
_(attr, philox_seed) \
|
1999 |
+
_(attr, physical_layout) \
|
2000 |
+
_(attr, pin_memory) \
|
2001 |
+
_(attr, pivot) \
|
2002 |
+
_(attr, pivots) \
|
2003 |
+
_(attr, plain_idx) \
|
2004 |
+
_(attr, plain_indices) \
|
2005 |
+
_(attr, pos_weight) \
|
2006 |
+
_(attr, posinf) \
|
2007 |
+
_(attr, positive) \
|
2008 |
+
_(attr, pow) \
|
2009 |
+
_(attr, prepend) \
|
2010 |
+
_(attr, primal) \
|
2011 |
+
_(attr, prob) \
|
2012 |
+
_(attr, proj_bias) \
|
2013 |
+
_(attr, proj_size) \
|
2014 |
+
_(attr, proj_weight) \
|
2015 |
+
_(attr, q) \
|
2016 |
+
_(attr, qGroupSize) \
|
2017 |
+
_(attr, qScaleAndZeros) \
|
2018 |
+
_(attr, qkv) \
|
2019 |
+
_(attr, qkv_bias) \
|
2020 |
+
_(attr, qkv_weight) \
|
2021 |
+
_(attr, qtensor) \
|
2022 |
+
_(attr, quant_max) \
|
2023 |
+
_(attr, quant_min) \
|
2024 |
+
_(attr, quasi) \
|
2025 |
+
_(attr, query) \
|
2026 |
+
_(attr, r) \
|
2027 |
+
_(attr, ragged_idx) \
|
2028 |
+
_(attr, random_samples) \
|
2029 |
+
_(attr, range) \
|
2030 |
+
_(attr, rank) \
|
2031 |
+
_(attr, ratio) \
|
2032 |
+
_(attr, rcond) \
|
2033 |
+
_(attr, real) \
|
2034 |
+
_(attr, reduce) \
|
2035 |
+
_(attr, reduce_range) \
|
2036 |
+
_(attr, reduction) \
|
2037 |
+
_(attr, repeats) \
|
2038 |
+
_(attr, replacement) \
|
2039 |
+
_(attr, requires_grad) \
|
2040 |
+
_(attr, reserve) \
|
2041 |
+
_(attr, reserveSpace) \
|
2042 |
+
_(attr, reservedSpace) \
|
2043 |
+
_(attr, residuals) \
|
2044 |
+
_(attr, result) \
|
2045 |
+
_(attr, retain_graph) \
|
2046 |
+
_(attr, return_complex) \
|
2047 |
+
_(attr, return_counts) \
|
2048 |
+
_(attr, return_debug_mask) \
|
2049 |
+
_(attr, return_inverse) \
|
2050 |
+
_(attr, reverse) \
|
2051 |
+
_(attr, right) \
|
2052 |
+
_(attr, rounding_mode) \
|
2053 |
+
_(attr, row) \
|
2054 |
+
_(attr, row_indices) \
|
2055 |
+
_(attr, rstd) \
|
2056 |
+
_(attr, rtol) \
|
2057 |
+
_(attr, running_max) \
|
2058 |
+
_(attr, running_mean) \
|
2059 |
+
_(attr, running_min) \
|
2060 |
+
_(attr, running_var) \
|
2061 |
+
_(attr, s) \
|
2062 |
+
_(attr, save_invstd) \
|
2063 |
+
_(attr, save_mean) \
|
2064 |
+
_(attr, save_var) \
|
2065 |
+
_(attr, save_var_transform) \
|
2066 |
+
_(attr, saved_g) \
|
2067 |
+
_(attr, saved_norms) \
|
2068 |
+
_(attr, saved_v) \
|
2069 |
+
_(attr, scalar) \
|
2070 |
+
_(attr, scalar1) \
|
2071 |
+
_(attr, scalar2) \
|
2072 |
+
_(attr, scalars) \
|
2073 |
+
_(attr, scale) \
|
2074 |
+
_(attr, scale_a) \
|
2075 |
+
_(attr, scale_b) \
|
2076 |
+
_(attr, scale_backoff_factor) \
|
2077 |
+
_(attr, scale_factors) \
|
2078 |
+
_(attr, scale_grad_by_freq) \
|
2079 |
+
_(attr, scale_growth_factor) \
|
2080 |
+
_(attr, scale_hh) \
|
2081 |
+
_(attr, scale_ih) \
|
2082 |
+
_(attr, scale_result) \
|
2083 |
+
_(attr, scales) \
|
2084 |
+
_(attr, scales_d) \
|
2085 |
+
_(attr, scales_h) \
|
2086 |
+
_(attr, scales_w) \
|
2087 |
+
_(attr, sections) \
|
2088 |
+
_(attr, seed) \
|
2089 |
+
_(attr, self) \
|
2090 |
+
_(attr, self_is_result) \
|
2091 |
+
_(attr, self_num_batch_dims) \
|
2092 |
+
_(attr, self_or_result) \
|
2093 |
+
_(attr, self_sizes) \
|
2094 |
+
_(attr, seqlen_k) \
|
2095 |
+
_(attr, sequences) \
|
2096 |
+
_(attr, shape) \
|
2097 |
+
_(attr, shared) \
|
2098 |
+
_(attr, shifts) \
|
2099 |
+
_(attr, side) \
|
2100 |
+
_(attr, sigma) \
|
2101 |
+
_(attr, sign) \
|
2102 |
+
_(attr, singular_values) \
|
2103 |
+
_(attr, size) \
|
2104 |
+
_(attr, sizes) \
|
2105 |
+
_(attr, skip_first) \
|
2106 |
+
_(attr, sobolstate) \
|
2107 |
+
_(attr, solution) \
|
2108 |
+
_(attr, some) \
|
2109 |
+
_(attr, sorted) \
|
2110 |
+
_(attr, sorted_sequence) \
|
2111 |
+
_(attr, sorter) \
|
2112 |
+
_(attr, source) \
|
2113 |
+
_(attr, spacing) \
|
2114 |
+
_(attr, sparse) \
|
2115 |
+
_(attr, sparse_dim) \
|
2116 |
+
_(attr, sparse_grad) \
|
2117 |
+
_(attr, split_size) \
|
2118 |
+
_(attr, split_sizes) \
|
2119 |
+
_(attr, src) \
|
2120 |
+
_(attr, stable) \
|
2121 |
+
_(attr, start) \
|
2122 |
+
_(attr, start_dim) \
|
2123 |
+
_(attr, state_steps) \
|
2124 |
+
_(attr, std) \
|
2125 |
+
_(attr, step) \
|
2126 |
+
_(attr, steps) \
|
2127 |
+
_(attr, storage_offset) \
|
2128 |
+
_(attr, stride) \
|
2129 |
+
_(attr, sum_dy) \
|
2130 |
+
_(attr, sum_dy_xmu) \
|
2131 |
+
_(attr, sumdim) \
|
2132 |
+
_(attr, swap) \
|
2133 |
+
_(attr, symmetric_quant) \
|
2134 |
+
_(attr, t) \
|
2135 |
+
_(attr, tangent) \
|
2136 |
+
_(attr, target) \
|
2137 |
+
_(attr, target_lengths) \
|
2138 |
+
_(attr, targets) \
|
2139 |
+
_(attr, tau) \
|
2140 |
+
_(attr, tensor) \
|
2141 |
+
_(attr, tensor1) \
|
2142 |
+
_(attr, tensor2) \
|
2143 |
+
_(attr, tensor_indices_or_sections) \
|
2144 |
+
_(attr, tensors) \
|
2145 |
+
_(attr, tensors1) \
|
2146 |
+
_(attr, test_element) \
|
2147 |
+
_(attr, test_elements) \
|
2148 |
+
_(attr, the_template) \
|
2149 |
+
_(attr, theta) \
|
2150 |
+
_(attr, threshold) \
|
2151 |
+
_(attr, to) \
|
2152 |
+
_(attr, tol) \
|
2153 |
+
_(attr, total) \
|
2154 |
+
_(attr, total_length) \
|
2155 |
+
_(attr, total_weight) \
|
2156 |
+
_(attr, train) \
|
2157 |
+
_(attr, training) \
|
2158 |
+
_(attr, transpose) \
|
2159 |
+
_(attr, transpose_result) \
|
2160 |
+
_(attr, transposed) \
|
2161 |
+
_(attr, type1) \
|
2162 |
+
_(attr, type2) \
|
2163 |
+
_(attr, unbiased) \
|
2164 |
+
_(attr, unitriangular) \
|
2165 |
+
_(attr, unpack_data) \
|
2166 |
+
_(attr, unpack_pivots) \
|
2167 |
+
_(attr, unroll_dim) \
|
2168 |
+
_(attr, unsafe) \
|
2169 |
+
_(attr, upper) \
|
2170 |
+
_(attr, upscale_factor) \
|
2171 |
+
_(attr, use_fast_accum) \
|
2172 |
+
_(attr, use_gelu) \
|
2173 |
+
_(attr, use_input_stats) \
|
2174 |
+
_(attr, v) \
|
2175 |
+
_(attr, value) \
|
2176 |
+
_(attr, values) \
|
2177 |
+
_(attr, var) \
|
2178 |
+
_(attr, vec) \
|
2179 |
+
_(attr, vec1) \
|
2180 |
+
_(attr, vec2) \
|
2181 |
+
_(attr, w_hh) \
|
2182 |
+
_(attr, w_ih) \
|
2183 |
+
_(attr, weight) \
|
2184 |
+
_(attr, weight0) \
|
2185 |
+
_(attr, weight1) \
|
2186 |
+
_(attr, weight2) \
|
2187 |
+
_(attr, weight3) \
|
2188 |
+
_(attr, weight4) \
|
2189 |
+
_(attr, weight_arr) \
|
2190 |
+
_(attr, weight_buf) \
|
2191 |
+
_(attr, weight_decay) \
|
2192 |
+
_(attr, weight_g) \
|
2193 |
+
_(attr, weight_scale) \
|
2194 |
+
_(attr, weight_stride0) \
|
2195 |
+
_(attr, weight_zero_point) \
|
2196 |
+
_(attr, weights) \
|
2197 |
+
_(attr, win_length) \
|
2198 |
+
_(attr, window) \
|
2199 |
+
_(attr, window_length) \
|
2200 |
+
_(attr, with_replacement) \
|
2201 |
+
_(attr, workspace) \
|
2202 |
+
_(attr, wrap) \
|
2203 |
+
_(attr, x) \
|
2204 |
+
_(attr, x1) \
|
2205 |
+
_(attr, x2) \
|
2206 |
+
_(attr, y) \
|
2207 |
+
_(attr, z) \
|
2208 |
+
_(attr, z_state) \
|
2209 |
+
_(attr, zero_infinity) \
|
2210 |
+
_(attr, zero_point) \
|
2211 |
+
_(attr, zero_point_hh) \
|
2212 |
+
_(attr, zero_point_ih) \
|
2213 |
+
_(attr, zero_points)
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/function.h>
|
4 |
+
#include <ATen/core/ivalue.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/intrusive_ptr.h>
|
7 |
+
#include <functional>
|
8 |
+
#include <utility>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace jit {
|
12 |
+
|
13 |
+
struct BuiltinOpFunction : public Function {
|
14 |
+
BuiltinOpFunction(
|
15 |
+
c10::QualifiedName qualname,
|
16 |
+
c10::FunctionSchema schema,
|
17 |
+
std::function<void(Stack&)> callable,
|
18 |
+
std::string doc_string = "")
|
19 |
+
: name_(std::move(qualname)),
|
20 |
+
callable_(std::move(callable)),
|
21 |
+
schema_(std::move(schema)),
|
22 |
+
doc_string_(std::move(doc_string)) {
|
23 |
+
TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1);
|
24 |
+
}
|
25 |
+
|
26 |
+
c10::string_view doc_string() const override {
|
27 |
+
return doc_string_;
|
28 |
+
}
|
29 |
+
|
30 |
+
void run(Stack& stack) override {
|
31 |
+
callable_(stack);
|
32 |
+
}
|
33 |
+
|
34 |
+
c10::intrusive_ptr<c10::ivalue::Future> runAsync(
|
35 |
+
Stack& stack,
|
36 |
+
TaskLauncher /* not used */) override {
|
37 |
+
run(stack);
|
38 |
+
auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type());
|
39 |
+
res->markCompleted(std::move(stack.front()));
|
40 |
+
return res;
|
41 |
+
}
|
42 |
+
|
43 |
+
const c10::QualifiedName& qualname() const override {
|
44 |
+
return name_;
|
45 |
+
}
|
46 |
+
|
47 |
+
// if this isn't yet defined, run its method_creator function
|
48 |
+
void ensure_defined() override {
|
49 |
+
// nop
|
50 |
+
}
|
51 |
+
|
52 |
+
const c10::FunctionSchema& getSchema() const override {
|
53 |
+
return schema_;
|
54 |
+
}
|
55 |
+
|
56 |
+
size_t num_inputs() const override {
|
57 |
+
return schema_.arguments().size();
|
58 |
+
}
|
59 |
+
|
60 |
+
Function& setSchema(c10::FunctionSchema schema) override {
|
61 |
+
schema_ = std::move(schema);
|
62 |
+
return *this;
|
63 |
+
}
|
64 |
+
|
65 |
+
bool call(Stack& stack, c10::optional<size_t>, c10::function_ref<void(const Code&)>) override {
|
66 |
+
run(stack);
|
67 |
+
return false;
|
68 |
+
}
|
69 |
+
|
70 |
+
bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>) override {
|
71 |
+
run(stack);
|
72 |
+
return false;
|
73 |
+
}
|
74 |
+
|
75 |
+
~BuiltinOpFunction() override = default;
|
76 |
+
|
77 |
+
private:
|
78 |
+
c10::QualifiedName name_;
|
79 |
+
|
80 |
+
std::function<void(Stack&)> callable_;
|
81 |
+
|
82 |
+
c10::FunctionSchema schema_;
|
83 |
+
|
84 |
+
std::string doc_string_;
|
85 |
+
};
|
86 |
+
|
87 |
+
} // namespace jit
|
88 |
+
} // namespace torch
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <memory>
|
4 |
+
|
5 |
+
#include <ATen/core/ivalue.h>
|
6 |
+
#include <ATen/core/jit_type_base.h>
|
7 |
+
#include <c10/util/Optional.h>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace jit {
|
11 |
+
struct CompilationUnit;
|
12 |
+
struct Function;
|
13 |
+
} // namespace jit
|
14 |
+
} // namespace torch
|
15 |
+
|
16 |
+
namespace c10 {
|
17 |
+
|
18 |
+
struct FunctionSchema;
|
19 |
+
|
20 |
+
// This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither.
|
21 |
+
// This state is mutually exclusive. Buffers and Parameters can only appear on modules.
|
22 |
+
enum class AttributeKind {
|
23 |
+
BUFFER,
|
24 |
+
PARAMETER,
|
25 |
+
REGULAR_ATTRIBUTE
|
26 |
+
};
|
27 |
+
|
28 |
+
// This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr).
|
29 |
+
// Note: This structure does not represent the value of the attribute.
|
30 |
+
struct TORCH_API ClassAttribute {
|
31 |
+
public:
|
32 |
+
ClassAttribute(AttributeKind kind,
|
33 |
+
TypePtr attributeType,
|
34 |
+
std::string attributeName) :
|
35 |
+
kind_(kind),
|
36 |
+
attributeType_(std::move(attributeType)),
|
37 |
+
attributeName_(std::move(attributeName)) {}
|
38 |
+
|
39 |
+
AttributeKind getKind() const {
|
40 |
+
return kind_;
|
41 |
+
}
|
42 |
+
|
43 |
+
const TypePtr& getType() const {
|
44 |
+
return attributeType_;
|
45 |
+
}
|
46 |
+
|
47 |
+
const std::string& getName() const {
|
48 |
+
return attributeName_;
|
49 |
+
}
|
50 |
+
|
51 |
+
private:
|
52 |
+
AttributeKind kind_;
|
53 |
+
TypePtr attributeType_;
|
54 |
+
std::string attributeName_;
|
55 |
+
};
|
56 |
+
|
57 |
+
/**
|
58 |
+
* User Defined Types
|
59 |
+
*/
|
60 |
+
|
61 |
+
struct ClassType;
|
62 |
+
using ClassTypePtr = std::shared_ptr<ClassType>;
|
63 |
+
using ::torch::jit::CompilationUnit;
|
64 |
+
|
65 |
+
// This represents a class in TorchScript.
|
66 |
+
struct TORCH_API ClassType : public NamedType {
|
67 |
+
// This represents an attribute of a class; a name associated with an attribute, and a
|
68 |
+
// getter and (optional) setter for that attribute.
|
69 |
+
struct Property {
|
70 |
+
std::string name;
|
71 |
+
torch::jit::Function* getter;
|
72 |
+
torch::jit::Function* setter;
|
73 |
+
};
|
74 |
+
|
75 |
+
// Create a class type with name `name` and its methods stored in `cu`.
|
76 |
+
static ClassTypePtr create(
|
77 |
+
c10::optional<QualifiedName> qualifiedName,
|
78 |
+
std::weak_ptr<CompilationUnit> cu,
|
79 |
+
bool is_module = false,
|
80 |
+
std::string doc_string = "",
|
81 |
+
std::vector<std::string> unresolved_class_attributes = {});
|
82 |
+
|
83 |
+
bool equals(const Type& rhs) const override {
|
84 |
+
if (this == &rhs) {
|
85 |
+
return true;
|
86 |
+
}
|
87 |
+
if (auto user_rhs = rhs.castRaw<ClassType>()) {
|
88 |
+
const auto& lhs_name = name().value();
|
89 |
+
const auto& rhs_name = user_rhs->name().value();
|
90 |
+
|
91 |
+
return lhs_name == rhs_name &&
|
92 |
+
this->compilation_unit() == user_rhs->compilation_unit();
|
93 |
+
}
|
94 |
+
return false;
|
95 |
+
}
|
96 |
+
|
97 |
+
std::string str() const override {
|
98 |
+
return annotation_str();
|
99 |
+
}
|
100 |
+
|
101 |
+
std::string repr_str() const override {
|
102 |
+
std::stringstream ss;
|
103 |
+
ss << str()
|
104 |
+
<< " (of Python compilation unit at: " << compilation_unit().get() << ")";
|
105 |
+
return ss.str();
|
106 |
+
}
|
107 |
+
|
108 |
+
const std::vector<torch::jit::Function*>& methods() const;
|
109 |
+
|
110 |
+
TypePtr findAttribute(const std::string& name) const {
|
111 |
+
size_t pos = 0;
|
112 |
+
for (const auto& attr : attributes_) {
|
113 |
+
if (name == attr.getName()) {
|
114 |
+
break;
|
115 |
+
}
|
116 |
+
++pos;
|
117 |
+
}
|
118 |
+
|
119 |
+
if (pos >= attributes_.size()) {
|
120 |
+
return nullptr;
|
121 |
+
}
|
122 |
+
return attributes_[pos].getType();
|
123 |
+
}
|
124 |
+
|
125 |
+
const TypePtr& getAttribute(const std::string& name) const {
|
126 |
+
auto slot = findAttributeSlot(name);
|
127 |
+
TORCH_CHECK(
|
128 |
+
slot,
|
129 |
+
repr_str(),
|
130 |
+
" does not have an attribute with name '",
|
131 |
+
name,
|
132 |
+
"'");
|
133 |
+
return attributes_[*slot].getType();
|
134 |
+
}
|
135 |
+
|
136 |
+
size_t numAttributes() const {
|
137 |
+
return attributes_.size();
|
138 |
+
}
|
139 |
+
|
140 |
+
const TypePtr& getAttribute(size_t slot) const {
|
141 |
+
AT_ASSERT(slot < attributes_.size());
|
142 |
+
return attributes_.at(slot).getType();
|
143 |
+
}
|
144 |
+
|
145 |
+
const std::string getAttributeName(size_t slot) const {
|
146 |
+
AT_ASSERT(slot < attributes_.size());
|
147 |
+
return attributes_[slot].getName();
|
148 |
+
}
|
149 |
+
|
150 |
+
void checkNotExist(const std::string& name, const std::string& what) const;
|
151 |
+
|
152 |
+
// Attributes are stored in a specific slot at runtime for effiency.
|
153 |
+
// When emitting instructions we specify the slot so that attribute access is
|
154 |
+
// a constant lookup
|
155 |
+
c10::optional<size_t> findAttributeSlot(const std::string& name) const {
|
156 |
+
size_t slot = 0;
|
157 |
+
for (const auto& attr : attributes_) {
|
158 |
+
if (name == attr.getName()) {
|
159 |
+
return slot;
|
160 |
+
}
|
161 |
+
slot++;
|
162 |
+
}
|
163 |
+
return c10::nullopt;
|
164 |
+
}
|
165 |
+
size_t getAttributeSlot(const std::string& name) const {
|
166 |
+
if (auto r = findAttributeSlot(name)) {
|
167 |
+
return *r;
|
168 |
+
}
|
169 |
+
TORCH_CHECK(
|
170 |
+
false,
|
171 |
+
repr_str(),
|
172 |
+
" does not have an attribute with name '",
|
173 |
+
name,
|
174 |
+
"'");
|
175 |
+
}
|
176 |
+
|
177 |
+
bool hasAttribute(const std::string& name) const {
|
178 |
+
return std::find_if(
|
179 |
+
attributes_.cbegin(),
|
180 |
+
attributes_.cend(),
|
181 |
+
[&](const ClassAttribute& attr) { return attr.getName() == name; }) !=
|
182 |
+
attributes_.cend();
|
183 |
+
}
|
184 |
+
|
185 |
+
bool isUnresolvedClassAttribute(const std::string& name) const;
|
186 |
+
|
187 |
+
at::ArrayRef<TypePtr> containedTypes() const override {
|
188 |
+
return attributeTypes_;
|
189 |
+
}
|
190 |
+
|
191 |
+
size_t addAttribute(
|
192 |
+
const std::string& name,
|
193 |
+
TypePtr type,
|
194 |
+
bool is_parameter = false,
|
195 |
+
bool is_buffer = false);
|
196 |
+
|
197 |
+
// [Internal Only] Remove attribute from the ClassType,
|
198 |
+
// caller is responsible to make sure the modification is safe:
|
199 |
+
// it is unsafe to having existing allocations
|
200 |
+
// of this object around anymore, and any code that works on
|
201 |
+
// the attribute is now invalid. Only newly created code is
|
202 |
+
// valid again.
|
203 |
+
void unsafeRemoveAttribute(const std::string& name);
|
204 |
+
|
205 |
+
// [Internal Only] Change the type of an attribute of the ClassType,
|
206 |
+
// The caller is responsible to make sure the modification is safe:
|
207 |
+
// it is unsafe to maintain uses of the old type of the attribute,
|
208 |
+
// and any code that works on the attribute is now invalid.
|
209 |
+
// Only newly created code is valid again.
|
210 |
+
void unsafeChangeAttributeType(const std::string& name, const TypePtr& new_ty);
|
211 |
+
|
212 |
+
// Add attribute \p NAME if it doesn't exist or verify that it has a
|
213 |
+
// compatible type otherwise.
|
214 |
+
size_t addOrCheckAttribute(
|
215 |
+
const std::string& name,
|
216 |
+
TypePtr ty,
|
217 |
+
bool is_parameter = false,
|
218 |
+
bool is_buffer = false) {
|
219 |
+
auto slot_idx = findAttributeSlot(name);
|
220 |
+
if (!slot_idx) {
|
221 |
+
return addAttribute(name, std::move(ty), is_parameter, is_buffer);
|
222 |
+
}
|
223 |
+
|
224 |
+
TORCH_CHECK(
|
225 |
+
is_parameter == this->is_parameter(*slot_idx),
|
226 |
+
"Parameter field mismatch for the field '",
|
227 |
+
name,
|
228 |
+
"'");
|
229 |
+
const TypePtr& atype = getAttribute(*slot_idx);
|
230 |
+
TORCH_CHECK(
|
231 |
+
ty->isSubtypeOf(*atype),
|
232 |
+
ty->repr_str(),
|
233 |
+
" is not compatible with the type ",
|
234 |
+
atype->repr_str(),
|
235 |
+
" for the field '",
|
236 |
+
name,
|
237 |
+
"'");
|
238 |
+
return *slot_idx;
|
239 |
+
}
|
240 |
+
|
241 |
+
// Get the property with the given \p name, if it exists on the class.
|
242 |
+
c10::optional<ClassType::Property> getProperty(const std::string& name);
|
243 |
+
// Add a property named \p name with \p getter and \p setter as its getter and setter.
|
244 |
+
void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter);
|
245 |
+
// Get a list of all properties.
|
246 |
+
const std::vector<Property>& properties() const {
|
247 |
+
return properties_;
|
248 |
+
}
|
249 |
+
|
250 |
+
bool hasConstant(const std::string& name) const {
|
251 |
+
return std::find_if(
|
252 |
+
constantNames_.cbegin(),
|
253 |
+
constantNames_.cend(),
|
254 |
+
[&](const std::string& constant) { return constant == name; }) !=
|
255 |
+
constantNames_.cend();
|
256 |
+
}
|
257 |
+
|
258 |
+
size_t addConstant(const std::string& name, const IValue& value);
|
259 |
+
|
260 |
+
c10::optional<size_t> findConstantSlot(const std::string& name) const;
|
261 |
+
|
262 |
+
size_t getConstantSlot(const std::string& name) const {
|
263 |
+
if (auto r = findConstantSlot(name)) {
|
264 |
+
return *r;
|
265 |
+
}
|
266 |
+
TORCH_CHECK(
|
267 |
+
false,
|
268 |
+
repr_str(),
|
269 |
+
" does not have constant field with the name '",
|
270 |
+
name,
|
271 |
+
"'");
|
272 |
+
}
|
273 |
+
|
274 |
+
const std::string& getConstantName(size_t slot) const;
|
275 |
+
|
276 |
+
const std::string& doc_string() const {
|
277 |
+
return doc_string_;
|
278 |
+
}
|
279 |
+
|
280 |
+
IValue getConstant(const std::string& name) const;
|
281 |
+
|
282 |
+
IValue getConstant(size_t slot) const;
|
283 |
+
|
284 |
+
c10::optional<IValue> findConstant(const std::string& name) const;
|
285 |
+
|
286 |
+
size_t numConstants() const;
|
287 |
+
|
288 |
+
at::ArrayRef<std::string> constantNames() const {
|
289 |
+
return constantNames_;
|
290 |
+
}
|
291 |
+
|
292 |
+
at::ArrayRef<IValue> constantValues() const;
|
293 |
+
|
294 |
+
// [Internal Only] Remove constant from the ClassType
|
295 |
+
// caller is responsible to make sure the modification is safe:
|
296 |
+
// it is unsafe to having existing allocations
|
297 |
+
// of this object around anymore, and any code that works on
|
298 |
+
// the attribute is now invalid. Only newly created code is
|
299 |
+
// valid again.
|
300 |
+
void unsafeRemoveConstant(const std::string& name);
|
301 |
+
|
302 |
+
TypePtr createWithContained(std::vector<TypePtr> contained_types) const override {
|
303 |
+
auto ptr = ClassType::create(name(), compilation_unit_, is_module());
|
304 |
+
AT_ASSERT(numAttributes() == contained_types.size());
|
305 |
+
for(size_t i = 0; i < attributes_.size(); ++i) {
|
306 |
+
AT_ASSERT(attributes_[i].getType()->isSubtypeOf(*contained_types[i]));
|
307 |
+
ptr->addAttribute(attributes_[i].getName(), std::move(contained_types[i]));
|
308 |
+
}
|
309 |
+
// Copy methods over
|
310 |
+
for (const auto& method : methods()) {
|
311 |
+
ptr->addMethod(method);
|
312 |
+
}
|
313 |
+
return ptr;
|
314 |
+
}
|
315 |
+
|
316 |
+
bool is_module() const override {
|
317 |
+
return isModule_;
|
318 |
+
}
|
319 |
+
|
320 |
+
const std::vector<ClassAttribute>& getAttributes() const {
|
321 |
+
return attributes_;
|
322 |
+
}
|
323 |
+
|
324 |
+
bool is_parameter(size_t slot) const {
|
325 |
+
TORCH_INTERNAL_ASSERT(
|
326 |
+
is_module(), "asking for parameterSlots of non-Module");
|
327 |
+
return attributes_.at(slot).getKind() == AttributeKind::PARAMETER;
|
328 |
+
}
|
329 |
+
|
330 |
+
bool is_buffer(size_t slot) const {
|
331 |
+
TORCH_INTERNAL_ASSERT(
|
332 |
+
is_module(), "asking for bufferWrittenSlots of non-Module");
|
333 |
+
return attributes_.at(slot).getKind() == AttributeKind::BUFFER;
|
334 |
+
}
|
335 |
+
|
336 |
+
void addForwardPreHook(torch::jit::Function* pre_hook_ptr);
|
337 |
+
void addForwardHook(torch::jit::Function* hook_ptr);
|
338 |
+
torch::jit::Function* findForwardPreHook(const std::string& name) const;
|
339 |
+
torch::jit::Function* findForwardHook(const std::string& name) const;
|
340 |
+
const std::vector<torch::jit::Function*>& getForwardHooks() const;
|
341 |
+
const std::vector<torch::jit::Function*>& getForwardPreHooks() const;
|
342 |
+
|
343 |
+
void checkForwardPreHookSchema(
|
344 |
+
int pre_hook_idx,
|
345 |
+
const FunctionSchema& pre_hook_schema) const;
|
346 |
+
void checkForwardHookSchema(
|
347 |
+
int hook_idx,
|
348 |
+
const FunctionSchema& hook_schema) const;
|
349 |
+
|
350 |
+
void addMethod(torch::jit::Function* method);
|
351 |
+
torch::jit::Function* findMethod(const std::string& name) const;
|
352 |
+
torch::jit::Function& getMethod(const std::string& name) const;
|
353 |
+
torch::jit::Function* findHook(const std::string& name) const;
|
354 |
+
torch::jit::Function& getHook(const std::string& name) const;
|
355 |
+
bool hasMethod(const std::string& name) const;
|
356 |
+
|
357 |
+
torch::jit::Function* findStaticMethod(const std::string& name) const;
|
358 |
+
void addStaticMethod(torch::jit::Function* method);
|
359 |
+
|
360 |
+
// [Internal Only] Remove method from the ClassType
|
361 |
+
// caller is responsible to make sure the modification is safe:
|
362 |
+
// it is unsafe to having existing allocations
|
363 |
+
// of this object around anymore, and any code that works on
|
364 |
+
// the attribute is now invalid. Only newly created code is
|
365 |
+
// valid again.
|
366 |
+
// Note this method is intended for freezing only.
|
367 |
+
void unsafeRemoveMethod(const std::string& name);
|
368 |
+
|
369 |
+
std::shared_ptr<CompilationUnit> compilation_unit();
|
370 |
+
|
371 |
+
std::shared_ptr<const CompilationUnit> compilation_unit() const;
|
372 |
+
|
373 |
+
// generate a refined version of this class.
|
374 |
+
// It has the same name but the slot Types are subtypes of
|
375 |
+
// the original slots. It is only valid to refine a class type in a context
|
376 |
+
// where it is know that there are not assignments to the objects slots
|
377 |
+
// that would invalidate the refinement.
|
378 |
+
// These variants are not registered in the global class table.
|
379 |
+
ClassTypePtr refine(at::ArrayRef<TypePtr> refined_slots) const;
|
380 |
+
|
381 |
+
bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
|
382 |
+
|
383 |
+
static const TypeKind Kind = TypeKind::ClassType;
|
384 |
+
|
385 |
+
private:
|
386 |
+
ClassType(
|
387 |
+
c10::optional<QualifiedName> name,
|
388 |
+
std::weak_ptr<CompilationUnit> cu,
|
389 |
+
bool is_module = false,
|
390 |
+
std::string doc_string = "",
|
391 |
+
std::vector<std::string> unresolved_class_attributes = {});
|
392 |
+
|
393 |
+
std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
|
394 |
+
const auto& n = name().value();
|
395 |
+
return n.qualifiedName();
|
396 |
+
}
|
397 |
+
|
398 |
+
void addAttribute(ClassAttribute classAttribute);
|
399 |
+
std::string getForwardPreHookErrorMessage(int pre_hook_idx) const;
|
400 |
+
std::string getForwardHookErrorMessage(int hook_idx) const;
|
401 |
+
|
402 |
+
// Mapping of attribute names -> their type.
|
403 |
+
// NOTE: this does not contain methods, which are stored in the module
|
404 |
+
// TODO: once modules support arbitrary ivalue attributes, we don't need this
|
405 |
+
// anymore.
|
406 |
+
// TODO: This is better represented as an OrderedDict, but alas it is not yet
|
407 |
+
// available from c10
|
408 |
+
|
409 |
+
// Mapping of constant names -> their value.
|
410 |
+
std::vector<std::string> constantNames_;
|
411 |
+
std::vector<IValue> constantValues_;
|
412 |
+
// Holds method attributes
|
413 |
+
std::weak_ptr<CompilationUnit> compilation_unit_;
|
414 |
+
|
415 |
+
// Holds all atrributes, attribute details are found on ClassAttribute
|
416 |
+
std::vector<ClassAttribute> attributes_;
|
417 |
+
// Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef.
|
418 |
+
// Never fill this without using the appropriate provideNewClassAttribute method
|
419 |
+
std::vector<TypePtr> attributeTypes_;
|
420 |
+
|
421 |
+
// List of methods associated with this class.
|
422 |
+
std::vector<torch::jit::Function*> methods_;
|
423 |
+
std::vector<torch::jit::Function*> staticmethods_;
|
424 |
+
|
425 |
+
// List of hooks to be run before/after forward.
|
426 |
+
std::vector<torch::jit::Function*> forward_hooks_;
|
427 |
+
std::vector<torch::jit::Function*> forward_pre_hooks_;
|
428 |
+
|
429 |
+
// List of properties exposed by this class.
|
430 |
+
std::vector<Property> properties_;
|
431 |
+
|
432 |
+
bool isModule_ = false;
|
433 |
+
|
434 |
+
// Doc string of class.
|
435 |
+
std::string doc_string_ = "";
|
436 |
+
|
437 |
+
// For error reporting accesses to class level attributes.
|
438 |
+
std::vector<std::string> unresolved_class_attributes_;
|
439 |
+
};
|
440 |
+
|
441 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h
ADDED
@@ -0,0 +1,687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/StringUtil.h>
|
4 |
+
#include <c10/util/string_view.h>
|
5 |
+
#include <c10/util/irange.h>
|
6 |
+
#include <ATen/core/jit_type.h>
|
7 |
+
#include <ATen/core/symbol.h>
|
8 |
+
#include <ATen/core/ivalue.h>
|
9 |
+
#include <ATen/core/alias_info.h>
|
10 |
+
#include <ATen/core/operator_name.h>
|
11 |
+
#include <ATen/core/dispatch/OperatorOptions.h>
|
12 |
+
#include <unordered_map>
|
13 |
+
#include <utility>
|
14 |
+
|
15 |
+
namespace c10 {
|
16 |
+
|
17 |
+
// schema as used in the compiler for resolving function calls and reporting
|
18 |
+
// errors. These objects should be constructed from C10 schema once those
|
19 |
+
// are available.
|
20 |
+
|
21 |
+
struct Argument;
|
22 |
+
struct FunctionSchema;
|
23 |
+
|
24 |
+
using AliasTypeSet = std::vector<TypePtr>;
|
25 |
+
|
26 |
+
bool operator==(const Argument& lhs, const Argument& rhs);
|
27 |
+
|
28 |
+
struct Argument {
|
29 |
+
Argument(
|
30 |
+
std::string name = "",
|
31 |
+
const TypePtr& type = nullptr,
|
32 |
+
c10::optional<int32_t> N = c10::nullopt,
|
33 |
+
c10::optional<IValue> default_value = c10::nullopt,
|
34 |
+
bool kwarg_only = false,
|
35 |
+
c10::optional<AliasInfo> alias_info = c10::nullopt)
|
36 |
+
: Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
|
37 |
+
|
38 |
+
Argument(
|
39 |
+
std::string name,
|
40 |
+
TypePtr fake_type,
|
41 |
+
TypePtr real_type,
|
42 |
+
c10::optional<int32_t> N = c10::nullopt,
|
43 |
+
c10::optional<IValue> default_value = c10::nullopt,
|
44 |
+
bool kwarg_only = false,
|
45 |
+
c10::optional<AliasInfo> alias_info = c10::nullopt)
|
46 |
+
: name_(std::move(name)),
|
47 |
+
type_(fake_type ? std::move(fake_type) : TensorType::get()),
|
48 |
+
real_type_(real_type ? std::move(real_type) : type_),
|
49 |
+
N_(N),
|
50 |
+
default_value_(std::move(default_value)),
|
51 |
+
alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr),
|
52 |
+
kwarg_only_(kwarg_only) {
|
53 |
+
// this is an softly-enforced invariant for out arguments.
|
54 |
+
bool is_alias = alias_info_ != nullptr && alias_info_->isWrite();
|
55 |
+
is_out_ = kwarg_only_ && is_alias;
|
56 |
+
}
|
57 |
+
|
58 |
+
Argument(Argument&& rhs) noexcept = default;
|
59 |
+
|
60 |
+
Argument(const Argument& rhs)
|
61 |
+
: name_(rhs.name_),
|
62 |
+
type_(rhs.type_),
|
63 |
+
real_type_(rhs.real_type_),
|
64 |
+
N_(rhs.N_),
|
65 |
+
default_value_(rhs.default_value_),
|
66 |
+
alias_info_(rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr),
|
67 |
+
kwarg_only_(rhs.kwarg_only_),
|
68 |
+
is_out_(rhs.is_out_) {}
|
69 |
+
|
70 |
+
Argument& operator=(Argument&& rhs) = default;
|
71 |
+
|
72 |
+
Argument& operator=(const Argument& rhs) {
|
73 |
+
if (this != &rhs) {
|
74 |
+
name_ = rhs.name_;
|
75 |
+
type_ = rhs.type_;
|
76 |
+
real_type_ = rhs.real_type_;
|
77 |
+
N_ = rhs.N_;
|
78 |
+
default_value_ = rhs.default_value_;
|
79 |
+
alias_info_ = rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr;
|
80 |
+
kwarg_only_ = rhs.kwarg_only_;
|
81 |
+
is_out_ = rhs.is_out_;
|
82 |
+
}
|
83 |
+
return *this;
|
84 |
+
}
|
85 |
+
|
86 |
+
const std::string& name() const {
|
87 |
+
return name_;
|
88 |
+
}
|
89 |
+
const TypePtr& type() const {
|
90 |
+
return type_;
|
91 |
+
}
|
92 |
+
// if type() is non-null, this is guaranteed to be non-null (if no real
|
93 |
+
// type was provided, this takes on type()'s value)
|
94 |
+
const TypePtr& real_type() const {
|
95 |
+
return real_type_;
|
96 |
+
}
|
97 |
+
c10::optional<int32_t> N() const {
|
98 |
+
return N_;
|
99 |
+
}
|
100 |
+
const c10::optional<IValue>& default_value() const {
|
101 |
+
return default_value_;
|
102 |
+
}
|
103 |
+
bool kwarg_only() const {
|
104 |
+
return kwarg_only_;
|
105 |
+
}
|
106 |
+
|
107 |
+
bool is_out() const {
|
108 |
+
return is_out_;
|
109 |
+
}
|
110 |
+
|
111 |
+
C10_NODISCARD const AliasInfo* alias_info() const {
|
112 |
+
return alias_info_.get();
|
113 |
+
}
|
114 |
+
|
115 |
+
bool is_inferred_type() const {
|
116 |
+
bool is_inferred_type = false;
|
117 |
+
TORCH_INTERNAL_ASSERT(type_);
|
118 |
+
if (auto pt = type_->cast<TensorType>()) {
|
119 |
+
if (pt->isInferredType()) {
|
120 |
+
is_inferred_type = true;
|
121 |
+
}
|
122 |
+
}
|
123 |
+
return is_inferred_type;
|
124 |
+
}
|
125 |
+
|
126 |
+
std::string formatTypeMismatchMsg(const std::string& actual_type) const {
|
127 |
+
std::string inferred_type_hint;
|
128 |
+
if (is_inferred_type()) {
|
129 |
+
inferred_type_hint = c10::str(
|
130 |
+
"Inferred '",
|
131 |
+
name(),
|
132 |
+
"' to be of type 'Tensor' ",
|
133 |
+
"because it was not annotated with an explicit type.\n");
|
134 |
+
}
|
135 |
+
return c10::str(
|
136 |
+
"Expected a value of type '",
|
137 |
+
type()->repr_str(),
|
138 |
+
"' for argument '",
|
139 |
+
name(),
|
140 |
+
"' but instead found type '",
|
141 |
+
actual_type,
|
142 |
+
"'.\n",
|
143 |
+
inferred_type_hint);
|
144 |
+
}
|
145 |
+
|
146 |
+
Argument cloneWithType(TypePtr new_type) const {
|
147 |
+
return Argument(
|
148 |
+
name_,
|
149 |
+
std::move(new_type),
|
150 |
+
N_,
|
151 |
+
default_value_,
|
152 |
+
kwarg_only_,
|
153 |
+
alias_info_ ? c10::optional<AliasInfo>(*alias_info_) : c10::nullopt);
|
154 |
+
}
|
155 |
+
|
156 |
+
// this function checks whether this Argument is backward compatible with
|
157 |
+
// the old one. we consider the following cases are backward compatible:
|
158 |
+
// 1) two arguments are equal
|
159 |
+
// 2) this arg's type should be subtype of old
|
160 |
+
// 3) this arg must provide the same default value if old arg has one,
|
161 |
+
bool isBackwardCompatibleWith(
|
162 |
+
const Argument& old,
|
163 |
+
std::ostream* why_not=nullptr) const;
|
164 |
+
|
165 |
+
// this function checks whether this Argument is forward compatible with
|
166 |
+
// the old one. we consider the following cases are forward compatible:
|
167 |
+
// 1) two arguments are equal
|
168 |
+
// 2) this arg's type should be subtype of old
|
169 |
+
// 3) this arg must provide the same default value if old arg has one,
|
170 |
+
bool isForwardCompatibleWith(
|
171 |
+
const Argument& old,
|
172 |
+
std::ostream* why_not = nullptr) const;
|
173 |
+
|
174 |
+
private:
|
175 |
+
std::string name_;
|
176 |
+
TypePtr type_;
|
177 |
+
TypePtr real_type_; // this is ScalarType, not int, e.g.
|
178 |
+
// for list types, an optional statically known length for the list
|
179 |
+
// e.g. for int[3]: type = ListType::ofInts(), N = 3
|
180 |
+
// If present, this will allow scalars to be broadcast to this length to
|
181 |
+
// become a list.
|
182 |
+
c10::optional<int32_t> N_;
|
183 |
+
|
184 |
+
c10::optional<IValue> default_value_;
|
185 |
+
// AliasInfo is huge, so let's only allocate memory for it if
|
186 |
+
// necessary (which it isn't during schema parsing on startup, to
|
187 |
+
// give a pertinent example).
|
188 |
+
std::unique_ptr<AliasInfo> alias_info_;
|
189 |
+
// is this only specifiable as a keyword argument?
|
190 |
+
bool kwarg_only_;
|
191 |
+
// marks if the argument is out variant of the schema
|
192 |
+
bool is_out_;
|
193 |
+
};
|
194 |
+
|
195 |
+
inline bool operator==(const Argument& lhs, const Argument& rhs) {
|
196 |
+
return lhs.name() == rhs.name()
|
197 |
+
&& *lhs.type() == *rhs.type()
|
198 |
+
&& lhs.N() == rhs.N()
|
199 |
+
&& lhs.default_value() == rhs.default_value()
|
200 |
+
&& lhs.kwarg_only() == rhs.kwarg_only()
|
201 |
+
&& (lhs.alias_info() == rhs.alias_info()
|
202 |
+
|| (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr
|
203 |
+
&& *lhs.alias_info() == *rhs.alias_info()));
|
204 |
+
}
|
205 |
+
|
206 |
+
inline bool operator!=(const Argument& lhs, const Argument& rhs) {
|
207 |
+
return !(lhs == rhs);
|
208 |
+
}
|
209 |
+
|
210 |
+
enum struct TORCH_API SchemaArgType { input, output };
|
211 |
+
|
212 |
+
/**
|
213 |
+
* struct SchemaArgument
|
214 |
+
*
|
215 |
+
* Structure used to represent arguments or returns for a schema.
|
216 |
+
*/
|
217 |
+
struct TORCH_API SchemaArgument {
|
218 |
+
SchemaArgType type;
|
219 |
+
size_t index;
|
220 |
+
SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {}
|
221 |
+
bool operator==(const SchemaArgument& rhs) const {
|
222 |
+
return type == rhs.type && index == rhs.index;
|
223 |
+
}
|
224 |
+
};
|
225 |
+
|
226 |
+
bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs);
|
227 |
+
|
228 |
+
struct TORCH_API FunctionSchema {
|
229 |
+
FunctionSchema(
|
230 |
+
std::string name,
|
231 |
+
std::string overload_name,
|
232 |
+
std::vector<Argument> arguments,
|
233 |
+
std::vector<Argument> returns,
|
234 |
+
bool is_vararg = false,
|
235 |
+
bool is_varret = false)
|
236 |
+
: name_({std::move(name), std::move(overload_name)}),
|
237 |
+
arguments_(std::move(arguments)),
|
238 |
+
returns_(std::move(returns)),
|
239 |
+
is_vararg_(is_vararg),
|
240 |
+
is_varret_(is_varret) {
|
241 |
+
checkSchema();
|
242 |
+
}
|
243 |
+
|
244 |
+
FunctionSchema(
|
245 |
+
Symbol name,
|
246 |
+
std::string overload_name,
|
247 |
+
std::vector<Argument> arguments,
|
248 |
+
std::vector<Argument> returns,
|
249 |
+
bool is_vararg = false,
|
250 |
+
bool is_varret = false)
|
251 |
+
: FunctionSchema(
|
252 |
+
name.toQualString(),
|
253 |
+
std::move(overload_name),
|
254 |
+
std::move(arguments),
|
255 |
+
std::move(returns),
|
256 |
+
is_vararg,
|
257 |
+
is_varret) {
|
258 |
+
checkSchema();
|
259 |
+
}
|
260 |
+
|
261 |
+
// Checks whether this schema is backward compatible with the old one.
|
262 |
+
// The following conditions must be true:
|
263 |
+
// [Function structure] The new schema's name, overload-name, varargs, and
|
264 |
+
// return arity are the same.
|
265 |
+
// [Output Narrowing] The new schema's output type must be the same class
|
266 |
+
// or inherit from the old schema's output type.
|
267 |
+
// [Argument count] The new schema must have at least as many arguments as
|
268 |
+
// the old schema (considering the list of positional and kwargs).
|
269 |
+
// [Arg Compatibility] Every argument in the old schema has a corresponding
|
270 |
+
// argument in the new schema that:
|
271 |
+
// * is at the same position.
|
272 |
+
// * has the same name.
|
273 |
+
// * is either positional, or kwarg and the old argument was kwarg.
|
274 |
+
// * has the same type, or the old argument's type inherits from the
|
275 |
+
// new argument's type.
|
276 |
+
// [Default Values] Every new argument must have a default value.
|
277 |
+
// E.g.
|
278 |
+
// OK f_new(a, b, c=1) => f_old(a, b)
|
279 |
+
// NOK f_new(a, c=1, *, b) => f_old(a, *, b)
|
280 |
+
// OK f_new(a, b, *, c) => f_old(a, *, b, c)
|
281 |
+
// NOK f_new(a, *, b, c) -> f_old(a, b, *, c)
|
282 |
+
// NOK f_new(a, *, c, b) => f_old(a, *, b, c)
|
283 |
+
// OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c)
|
284 |
+
bool isBackwardCompatibleWith(
|
285 |
+
const FunctionSchema& old,
|
286 |
+
std::ostream* why_not = nullptr) const;
|
287 |
+
|
288 |
+
// Checks whether this schema is forward compatible with the old one.
|
289 |
+
// The following conditions must be true:
|
290 |
+
// [Function structure] The new schema's name, overload-name, varargs, and
|
291 |
+
// return arity are the same.
|
292 |
+
// [Output Narrowing] The new schema's output type must be the same class
|
293 |
+
// or inherit from the old schema's output type.
|
294 |
+
// [Arg Compatibility] Every argument in the old schema has a corresponding
|
295 |
+
// argument in the new schema that:
|
296 |
+
// * is at the same position.
|
297 |
+
// * has the same name.
|
298 |
+
// * is either positional, or kwarg and the old argument was kwarg.
|
299 |
+
// * has the same type, or the old argument's type inherits from the
|
300 |
+
// new argument's type.
|
301 |
+
// [Default Values] Every new argument must have a default value.
|
302 |
+
// Each default value type should NOT be a container type.
|
303 |
+
// [Positioning] All defaults arguments MUST go after either old
|
304 |
+
// default arguments or the end of positional arguments
|
305 |
+
// and right BEFORE all out arguments
|
306 |
+
bool isForwardCompatibleWith(
|
307 |
+
const FunctionSchema& old,
|
308 |
+
std::ostringstream& why_not) const;
|
309 |
+
|
310 |
+
private:
|
311 |
+
OperatorName name_;
|
312 |
+
std::vector<Argument> arguments_;
|
313 |
+
std::vector<Argument> returns_;
|
314 |
+
// if true then this schema takes an arbitrary number of additional arguments
|
315 |
+
// after the argument specified in arguments
|
316 |
+
// currently this is used primarily to represent 'primitive' operators whose
|
317 |
+
// arguments are not checked by schema
|
318 |
+
bool is_vararg_;
|
319 |
+
bool is_varret_;
|
320 |
+
|
321 |
+
// if no alias information is directly specified, what kind of "default"
|
322 |
+
// alias information should we infer?
|
323 |
+
// NB: due to alias analysis kind merging, this may be nullopt. Eventually
|
324 |
+
// this should always be set no matter what
|
325 |
+
c10::optional<AliasAnalysisKind> alias_kind_;
|
326 |
+
|
327 |
+
template <typename T>
|
328 |
+
void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const;
|
329 |
+
|
330 |
+
void checkSchema() const {
|
331 |
+
bool seen_default_arg = false;
|
332 |
+
for (const auto& arg : arguments()) {
|
333 |
+
if (arg.default_value()) {
|
334 |
+
seen_default_arg = true;
|
335 |
+
} else {
|
336 |
+
// we have historically serialized broadcasting lists wo/default values,
|
337 |
+
// so to not break BC allow lists here
|
338 |
+
if (arg.type()->kind() == ListType::Kind) {
|
339 |
+
continue;
|
340 |
+
}
|
341 |
+
TORCH_INTERNAL_ASSERT(
|
342 |
+
!seen_default_arg || arg.kwarg_only(),
|
343 |
+
"Non-default positional argument follows default argument. Parameter ",
|
344 |
+
arg.name(),
|
345 |
+
" in ",
|
346 |
+
*this);
|
347 |
+
}
|
348 |
+
}
|
349 |
+
}
|
350 |
+
|
351 |
+
public:
|
352 |
+
|
353 |
+
void dump() const;
|
354 |
+
|
355 |
+
const OperatorName& operator_name() const {
|
356 |
+
return name_;
|
357 |
+
}
|
358 |
+
const std::string& name() const {
|
359 |
+
return name_.name;
|
360 |
+
}
|
361 |
+
const std::string& overload_name() const {
|
362 |
+
return name_.overload_name;
|
363 |
+
}
|
364 |
+
const std::vector<Argument>& arguments() const {
|
365 |
+
return arguments_;
|
366 |
+
}
|
367 |
+
const std::vector<Argument>& returns() const {
|
368 |
+
return returns_;
|
369 |
+
}
|
370 |
+
bool is_vararg() const {
|
371 |
+
return is_vararg_;
|
372 |
+
}
|
373 |
+
bool is_varret() const {
|
374 |
+
return is_varret_;
|
375 |
+
}
|
376 |
+
bool is_aliasing(const c10::SchemaArgument &argument) const {
|
377 |
+
TORCH_INTERNAL_ASSERT(
|
378 |
+
argument.index < getCorrectList(argument.type).size(),
|
379 |
+
"Invalid index for schema.");
|
380 |
+
const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
|
381 |
+
return aliasInfo;
|
382 |
+
}
|
383 |
+
bool is_mutable() const {
|
384 |
+
return std::any_of(
|
385 |
+
arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) {
|
386 |
+
const AliasInfo* aliasInfo = arg.alias_info();
|
387 |
+
return aliasInfo && aliasInfo->isWrite();
|
388 |
+
});
|
389 |
+
}
|
390 |
+
bool is_mutable(const c10::SchemaArgument &argument) const {
|
391 |
+
TORCH_INTERNAL_ASSERT(
|
392 |
+
argument.index < getCorrectList(argument.type).size(),
|
393 |
+
"Invalid index for schema.");
|
394 |
+
const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info();
|
395 |
+
return aliasInfo && aliasInfo->isWrite();
|
396 |
+
}
|
397 |
+
bool is_mutable(c10::string_view name) const {
|
398 |
+
c10::optional<int> index = argumentIndexWithName(name);
|
399 |
+
TORCH_INTERNAL_ASSERT(
|
400 |
+
index != c10::nullopt, "Schema has no argument named ", name);
|
401 |
+
|
402 |
+
return is_mutable({c10::SchemaArgType::input, static_cast<size_t>(*index)});
|
403 |
+
}
|
404 |
+
|
405 |
+
// Returns whether lhs and rhs may alias directly.
|
406 |
+
// This does not account for cases where lhs or rhs are a container that
|
407 |
+
// may contain elements that alias the other argument.
|
408 |
+
// FunctionSchema::may_contain_alias will include that functionality.
|
409 |
+
bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const;
|
410 |
+
|
411 |
+
// Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container
|
412 |
+
// that may contain elements that alias the other argument.
|
413 |
+
// bidirectional = false only returns whether lhs may contain an alias of rhs
|
414 |
+
// while bidirectional = true returns both directions.
|
415 |
+
bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const;
|
416 |
+
|
417 |
+
// Returns whether the two AliasTypeSets contain any similarities
|
418 |
+
// ie: whether the two type sets can alias.
|
419 |
+
bool canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
|
420 |
+
|
421 |
+
// Recursively Finds all contained types within the AliasTypeSet.
|
422 |
+
c10::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
|
423 |
+
|
424 |
+
// Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
|
425 |
+
// Used to map types to a type such that all types that can alias will be mapped to the same type.
|
426 |
+
// For example, calling this method on 'Optional[List[int]]' is the same as calling this method
|
427 |
+
// on 'List[int]'.
|
428 |
+
c10::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
|
429 |
+
|
430 |
+
// Returns either arguments() or returns() depending on the SchemaArgType
|
431 |
+
// output => returns(), input => arguments()
|
432 |
+
const std::vector<Argument>& getCorrectList(SchemaArgType type) const;
|
433 |
+
|
434 |
+
c10::optional<int> argumentIndexWithName(c10::string_view name) const {
|
435 |
+
for (const auto i : c10::irange(arguments().size())) {
|
436 |
+
if(name == arguments()[i].name())
|
437 |
+
return i;
|
438 |
+
}
|
439 |
+
return c10::nullopt;
|
440 |
+
}
|
441 |
+
FunctionSchema cloneWithName(std::string name, std::string overload_name) const {
|
442 |
+
return FunctionSchema(
|
443 |
+
std::move(name),
|
444 |
+
std::move(overload_name),
|
445 |
+
arguments(),
|
446 |
+
returns(),
|
447 |
+
is_vararg(),
|
448 |
+
is_varret()
|
449 |
+
);
|
450 |
+
}
|
451 |
+
FunctionSchema cloneWithArguments(std::vector<Argument> new_arguments) const {
|
452 |
+
return FunctionSchema(
|
453 |
+
name(),
|
454 |
+
overload_name(),
|
455 |
+
std::move(new_arguments),
|
456 |
+
returns(),
|
457 |
+
is_vararg(),
|
458 |
+
is_varret());
|
459 |
+
}
|
460 |
+
FunctionSchema cloneWithReturns(std::vector<Argument> new_returns) const {
|
461 |
+
return FunctionSchema(
|
462 |
+
name(),
|
463 |
+
overload_name(),
|
464 |
+
arguments(),
|
465 |
+
std::move(new_returns),
|
466 |
+
is_vararg(),
|
467 |
+
is_varret());
|
468 |
+
}
|
469 |
+
|
470 |
+
std::string formatTypeMismatchMsg(
|
471 |
+
const Argument& expected,
|
472 |
+
const std::string& actual_type,
|
473 |
+
c10::optional<size_t> position = c10::nullopt,
|
474 |
+
c10::optional<std::string> value = c10::nullopt) const;
|
475 |
+
|
476 |
+
FunctionSchema cloneWithRemappedTypes(
|
477 |
+
const std::function<TypePtr(TypePtr)> type_map) const;
|
478 |
+
|
479 |
+
FunctionSchema cloneWithRealTypes(bool with_symint=true) const;
|
480 |
+
|
481 |
+
// Check that inputs have the correct types and appends any missing default
|
482 |
+
// values.
|
483 |
+
template <typename T = c10::PlatformType>
|
484 |
+
void checkAndNormalizeInputs(
|
485 |
+
std::vector<IValue>& inputs,
|
486 |
+
const std::unordered_map<std::string, IValue>& kwargs =
|
487 |
+
std::unordered_map<std::string, IValue>{}) const;
|
488 |
+
|
489 |
+
std::string findErrorInKwargs(const std::vector<std::string>& kwargs) const;
|
490 |
+
|
491 |
+
bool hasAnyAliasInfo() const {
|
492 |
+
for (const auto& arg : arguments_) {
|
493 |
+
if (arg.alias_info() != nullptr) {
|
494 |
+
return true;
|
495 |
+
}
|
496 |
+
}
|
497 |
+
for (const auto& ret : returns_) {
|
498 |
+
if (ret.alias_info() != nullptr) {
|
499 |
+
return true;
|
500 |
+
}
|
501 |
+
}
|
502 |
+
return false;
|
503 |
+
}
|
504 |
+
|
505 |
+
|
506 |
+
// TODO remove the mutation here
|
507 |
+
bool isDefaultAliasAnalysisKind() const {
|
508 |
+
return !alias_kind_;
|
509 |
+
}
|
510 |
+
AliasAnalysisKind aliasAnalysis() const {
|
511 |
+
return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE);
|
512 |
+
}
|
513 |
+
void setAliasAnalysis(AliasAnalysisKind v) {
|
514 |
+
alias_kind_ = v;
|
515 |
+
}
|
516 |
+
|
517 |
+
c10::optional<c10::string_view> getNamespace() const {
|
518 |
+
return name_.getNamespace();
|
519 |
+
}
|
520 |
+
|
521 |
+
// Returns true if we successfully set the namespace (as there
|
522 |
+
// was none set, and false otherwise)
|
523 |
+
bool setNamespaceIfNotSet(const char* ns) {
|
524 |
+
return name_.setNamespaceIfNotSet(ns);
|
525 |
+
}
|
526 |
+
|
527 |
+
// can a function with this schema be substituted for a function of rhs's
|
528 |
+
// schema and have the program typecheck?
|
529 |
+
// as_method - if true, treat this schema as a method and ignore
|
530 |
+
// the first argument, which will be the object in both cases
|
531 |
+
bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const;
|
532 |
+
};
|
533 |
+
|
534 |
+
inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
535 |
+
return lhs.name() == rhs.name()
|
536 |
+
&& lhs.overload_name() == rhs.overload_name()
|
537 |
+
&& lhs.arguments() == rhs.arguments()
|
538 |
+
&& lhs.returns() == rhs.returns()
|
539 |
+
&& lhs.is_vararg() == rhs.is_vararg()
|
540 |
+
&& lhs.is_varret() == rhs.is_varret();
|
541 |
+
}
|
542 |
+
|
543 |
+
inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) {
|
544 |
+
return !(lhs == rhs);
|
545 |
+
}
|
546 |
+
|
547 |
+
// print out Argument, which is compatible with FunctionSchema parser
|
548 |
+
// full format: Type(alias)? name=default_value
|
549 |
+
inline std::ostream& operator<<(std::ostream& out, const Argument& arg) {
|
550 |
+
|
551 |
+
// for adjusting the ? position.
|
552 |
+
// in schema, we have Tensor?(a!) input, and t(a!)?.
|
553 |
+
// however, t?(a!) doesn't work with schema parser.
|
554 |
+
// so we always use Type(alias)? format
|
555 |
+
// real_type versus fake_type: in order to be compatible with FunctionSchema
|
556 |
+
// parser, printing an argument with either MemoryFormat or Layout type should
|
557 |
+
// give us the original schema string, hence printing out real_type.
|
558 |
+
auto type = arg.real_type();
|
559 |
+
bool is_opt = type->kind() == OptionalType::Kind;
|
560 |
+
auto unopt_type = is_opt ? type->castRaw<OptionalType>()->getElementType() : type;
|
561 |
+
|
562 |
+
if (unopt_type->kind() == ListType::Kind) {
|
563 |
+
// sized lists get size N from arg, not type
|
564 |
+
auto list = unopt_type->cast<c10::ListType>();
|
565 |
+
out << list->getElementType()->str();
|
566 |
+
if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){
|
567 |
+
out << arg.alias_info()->containedTypes()[0];
|
568 |
+
}
|
569 |
+
std::string N = "";
|
570 |
+
if (arg.N()) {
|
571 |
+
N = std::to_string(*arg.N());
|
572 |
+
}
|
573 |
+
out << "[" << N << "]";
|
574 |
+
} else {
|
575 |
+
out << unopt_type->str();
|
576 |
+
}
|
577 |
+
|
578 |
+
// print alias info if it has beforeSets.
|
579 |
+
if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) {
|
580 |
+
out << *arg.alias_info();
|
581 |
+
}
|
582 |
+
|
583 |
+
if (is_opt) {
|
584 |
+
out << "?";
|
585 |
+
}
|
586 |
+
|
587 |
+
if (!arg.name().empty()) {
|
588 |
+
out << " " << arg.name();
|
589 |
+
}
|
590 |
+
|
591 |
+
if (arg.default_value()) {
|
592 |
+
out << "=";
|
593 |
+
if ((type->kind() == c10::TypeKind::StringType ||
|
594 |
+
unopt_type->kind() == c10::TypeKind::StringType) &&
|
595 |
+
arg.default_value().value().isString()) {
|
596 |
+
printQuotedString(out, arg.default_value().value().toStringRef());
|
597 |
+
} else if (type->kind() == TypeKind::ListType && type->castRaw<ListType>()->getElementType()->kind() == c10::TypeKind::IntType) {
|
598 |
+
// We want to faithfully replicate JIT schema.
|
599 |
+
// in native_functions.yaml defaults for int arrays with a single value always look like
|
600 |
+
// int[2] stride=1
|
601 |
+
// instead of
|
602 |
+
// int[2] stride=[1, 1]
|
603 |
+
auto default_val = arg.default_value().value().toIntList();
|
604 |
+
if (default_val.size() > 1) {
|
605 |
+
auto all_defaults_the_same = true;
|
606 |
+
for (const auto i : c10::irange(1, default_val.size())) {
|
607 |
+
if (default_val[0] != default_val[i]) all_defaults_the_same = false;
|
608 |
+
}
|
609 |
+
if (all_defaults_the_same) {
|
610 |
+
out << default_val[0];
|
611 |
+
} else {
|
612 |
+
out << arg.default_value().value();
|
613 |
+
}
|
614 |
+
} else {
|
615 |
+
out << arg.default_value().value();
|
616 |
+
}
|
617 |
+
} else {
|
618 |
+
out << arg.default_value().value();
|
619 |
+
}
|
620 |
+
}
|
621 |
+
|
622 |
+
return out;
|
623 |
+
}
|
624 |
+
|
625 |
+
inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema);
|
626 |
+
|
627 |
+
inline std::string toString(const FunctionSchema& schema) {
|
628 |
+
std::ostringstream str;
|
629 |
+
str << schema;
|
630 |
+
return str.str();
|
631 |
+
}
|
632 |
+
|
633 |
+
} // namespace c10
|
634 |
+
|
635 |
+
namespace std {
|
636 |
+
template<>
|
637 |
+
struct hash<c10::SchemaArgument> {
|
638 |
+
size_t operator()(const c10::SchemaArgument& arg) const
|
639 |
+
{
|
640 |
+
return c10::hash_combine(std::hash<size_t>()(arg.index), std::hash<size_t>()(static_cast<std::size_t>(arg.type)));
|
641 |
+
}
|
642 |
+
};
|
643 |
+
template<>
|
644 |
+
struct hash<c10::Argument> {
|
645 |
+
size_t operator()(const c10::Argument& arg) const
|
646 |
+
{
|
647 |
+
auto hash = std::hash<std::string>{}(arg.name());
|
648 |
+
auto type_hash = std::hash<c10::TypePtr>{}(arg.type());
|
649 |
+
auto kwarg_only_hash = std::hash<bool>{}(arg.kwarg_only());
|
650 |
+
hash = c10::hash_combine(hash, type_hash);
|
651 |
+
hash = c10::hash_combine(hash, kwarg_only_hash);
|
652 |
+
// hashing optional fields if they exist
|
653 |
+
if (arg.default_value()) {
|
654 |
+
auto default_value_hash = c10::hash<c10::IValue>{}(arg.default_value().value());
|
655 |
+
hash = c10::hash_combine(hash, default_value_hash);
|
656 |
+
}
|
657 |
+
if (arg.N()) {
|
658 |
+
auto N_hash = std::hash<int64_t>{}(*arg.N());
|
659 |
+
hash = c10::hash_combine(hash, N_hash);
|
660 |
+
}
|
661 |
+
if (arg.alias_info()) {
|
662 |
+
auto alias_info_hash = std::hash<c10::AliasInfo>{}(*arg.alias_info());
|
663 |
+
hash = c10::hash_combine(hash, alias_info_hash);
|
664 |
+
}
|
665 |
+
return hash;
|
666 |
+
}
|
667 |
+
};
|
668 |
+
template<>
|
669 |
+
struct hash<c10::FunctionSchema> {
|
670 |
+
size_t operator()(const c10::FunctionSchema& schema) const
|
671 |
+
{
|
672 |
+
auto hash = std::hash<c10::OperatorName>{}(schema.operator_name());
|
673 |
+
auto args_hash = c10::hash<std::vector<c10::Argument>>{}(schema.arguments());
|
674 |
+
auto returns_hash = c10::hash<std::vector<c10::Argument>>{}(schema.returns());
|
675 |
+
auto is_vararg_hash = std::hash<bool>{}(schema.is_vararg());
|
676 |
+
auto is_varret_hash = std::hash<bool>{}(schema.is_varret());
|
677 |
+
hash = c10::hash_combine(hash, args_hash);
|
678 |
+
hash = c10::hash_combine(hash, returns_hash);
|
679 |
+
hash = c10::hash_combine(hash, is_vararg_hash);
|
680 |
+
hash = c10::hash_combine(hash, is_varret_hash);
|
681 |
+
return hash;
|
682 |
+
}
|
683 |
+
};
|
684 |
+
} // namespace std
|
685 |
+
|
686 |
+
|
687 |
+
#include <ATen/core/function_schema_inl.h> // IWYU pragma: keep
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h
ADDED
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ostream>
|
3 |
+
#include <sstream>
|
4 |
+
|
5 |
+
// note: windows build doesn't find symbols in operator files unless
|
6 |
+
// this is a header file
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
|
10 |
+
inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) {
|
11 |
+
// eventually this should look almost identical to python arg parser, but
|
12 |
+
// it is simpler for now to work directly on this schema
|
13 |
+
|
14 |
+
out << schema.name();
|
15 |
+
if (!schema.overload_name().empty()) {
|
16 |
+
out << "." << schema.overload_name();
|
17 |
+
}
|
18 |
+
out << "(";
|
19 |
+
|
20 |
+
bool seen_kwarg_only = false;
|
21 |
+
for (const auto i : c10::irange(schema.arguments().size())) {
|
22 |
+
if (i > 0) out << ", ";
|
23 |
+
if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) {
|
24 |
+
out << "*, ";
|
25 |
+
seen_kwarg_only = true;
|
26 |
+
}
|
27 |
+
out << schema.arguments()[i];
|
28 |
+
}
|
29 |
+
|
30 |
+
if(schema.is_vararg()) {
|
31 |
+
if(!schema.arguments().empty())
|
32 |
+
out << ", ";
|
33 |
+
out << "...";
|
34 |
+
}
|
35 |
+
|
36 |
+
out << ") -> ";
|
37 |
+
|
38 |
+
const auto& returns = schema.returns();
|
39 |
+
|
40 |
+
/*
|
41 |
+
* We should skip parenthesis if we return a single item and it's not varret,
|
42 |
+
* or we return nothing but varret.
|
43 |
+
*
|
44 |
+
* Need special handling for schema
|
45 |
+
* aten::items.str(Dict(str, t) self) -> (str,t)[]
|
46 |
+
* Even though this schema returns a single item, we need add parenthesis.
|
47 |
+
* The is necessary so the printed schema can be parsed by the C++ SchemaParser
|
48 |
+
* Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly
|
49 |
+
* treat the return type as a tuple. An alternative is to enhance the Lexer
|
50 |
+
* to lookahead multiple tokens to accurately decide if the return type is
|
51 |
+
* a tuple.
|
52 |
+
*/
|
53 |
+
bool need_paren = !(
|
54 |
+
(returns.size() == 1 && !schema.is_varret()) ||
|
55 |
+
(returns.empty() && schema.is_varret()));
|
56 |
+
|
57 |
+
if (returns.size() == 1 && !schema.is_varret()) {
|
58 |
+
std::stringstream return_ss;
|
59 |
+
return_ss << returns.at(0);
|
60 |
+
auto return_str = return_ss.str();
|
61 |
+
|
62 |
+
// enclosing the single return item with parenthesis if the return type
|
63 |
+
// starts with a left parenthesis.
|
64 |
+
//
|
65 |
+
// There are 2 cases
|
66 |
+
// 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'.
|
67 |
+
// without the extra parenthesis, the c++ schem parser can not parse it.
|
68 |
+
// 2. something like '-> ((str, str))'. Need extra parenthesis so the return
|
69 |
+
// type is a single tuple rather than two strings.
|
70 |
+
// PR (https://github.com/pytorch/pytorch/pull/23204) has more context about
|
71 |
+
// this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15)
|
72 |
+
// also covers this case.
|
73 |
+
if (!return_str.empty() && return_str.front() == '(') {
|
74 |
+
need_paren = true;
|
75 |
+
}
|
76 |
+
}
|
77 |
+
|
78 |
+
if (need_paren) {
|
79 |
+
out << "(";
|
80 |
+
}
|
81 |
+
for (const auto i : c10::irange(returns.size())) {
|
82 |
+
if (i > 0) {
|
83 |
+
out << ", ";
|
84 |
+
}
|
85 |
+
out << returns.at(i);
|
86 |
+
}
|
87 |
+
if (schema.is_varret()) {
|
88 |
+
if (!returns.empty()) {
|
89 |
+
out << ", ";
|
90 |
+
}
|
91 |
+
out << "...";
|
92 |
+
}
|
93 |
+
if (need_paren) {
|
94 |
+
out << ")";
|
95 |
+
}
|
96 |
+
return out;
|
97 |
+
}
|
98 |
+
|
99 |
+
inline size_t findFirstOutArg(const std::vector<Argument>& args) {
|
100 |
+
// find the start of out args in the schema
|
101 |
+
for (const auto out_start_idx : c10::irange(args.size())) {
|
102 |
+
if (args.at(out_start_idx).is_out()) {
|
103 |
+
return out_start_idx;
|
104 |
+
}
|
105 |
+
}
|
106 |
+
return args.size();
|
107 |
+
}
|
108 |
+
|
109 |
+
inline bool Argument::isBackwardCompatibleWith(
|
110 |
+
const Argument& old,
|
111 |
+
std::ostream* why_not) const {
|
112 |
+
const Argument* lhs = this;
|
113 |
+
const Argument* rhs = &old;
|
114 |
+
if (!(lhs->name() == rhs->name()
|
115 |
+
&& lhs->N() == rhs->N()
|
116 |
+
&& (lhs->alias_info() == rhs->alias_info()
|
117 |
+
|| (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
|
118 |
+
&& *lhs->alias_info() == *rhs->alias_info())))) {
|
119 |
+
return false;
|
120 |
+
}
|
121 |
+
if (lhs->kwarg_only() && !rhs->kwarg_only()) {
|
122 |
+
return false;
|
123 |
+
}
|
124 |
+
if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) {
|
125 |
+
return false;
|
126 |
+
}
|
127 |
+
if (rhs->default_value().has_value() &&
|
128 |
+
lhs->default_value() != rhs->default_value()) {
|
129 |
+
return false;
|
130 |
+
}
|
131 |
+
return true;
|
132 |
+
}
|
133 |
+
|
134 |
+
inline bool Argument::isForwardCompatibleWith(
|
135 |
+
const Argument& old,
|
136 |
+
std::ostream* why_not) const {
|
137 |
+
const Argument* lhs = this;
|
138 |
+
const Argument* rhs = &old;
|
139 |
+
if (!(lhs->name() == rhs->name()
|
140 |
+
&& lhs->N() == rhs->N()
|
141 |
+
&& (lhs->alias_info() == rhs->alias_info()
|
142 |
+
|| (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
|
143 |
+
&& *lhs->alias_info() == *rhs->alias_info())))) {
|
144 |
+
return false;
|
145 |
+
}
|
146 |
+
if (lhs->kwarg_only() && !rhs->kwarg_only()) {
|
147 |
+
return false;
|
148 |
+
}
|
149 |
+
if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) {
|
150 |
+
return false;
|
151 |
+
}
|
152 |
+
if (rhs->default_value().has_value() &&
|
153 |
+
lhs->default_value() != rhs->default_value()) {
|
154 |
+
return false;
|
155 |
+
}
|
156 |
+
if (lhs->default_value().has_value() && !rhs->default_value().has_value()) {
|
157 |
+
return false;
|
158 |
+
}
|
159 |
+
return true;
|
160 |
+
}
|
161 |
+
|
162 |
+
inline std::string FunctionSchema::formatTypeMismatchMsg(
|
163 |
+
const Argument& expected,
|
164 |
+
const std::string& actual_type,
|
165 |
+
c10::optional<size_t> position,
|
166 |
+
c10::optional<std::string> value) const {
|
167 |
+
std::string position_str;
|
168 |
+
if (position) {
|
169 |
+
position_str = c10::str("Position: ", *position, "\n");
|
170 |
+
}
|
171 |
+
std::string value_str;
|
172 |
+
if (value) {
|
173 |
+
value_str = c10::str("Value: ", *value, "\n");
|
174 |
+
}
|
175 |
+
return c10::str(
|
176 |
+
name(),
|
177 |
+
"() ",
|
178 |
+
expected.formatTypeMismatchMsg(actual_type),
|
179 |
+
position_str,
|
180 |
+
value_str,
|
181 |
+
"Declaration: ",
|
182 |
+
*this);
|
183 |
+
}
|
184 |
+
|
185 |
+
inline bool FunctionSchema::isBackwardCompatibleWith(
|
186 |
+
const FunctionSchema& old,
|
187 |
+
std::ostream* why_not) const {
|
188 |
+
if (!(name() == old.name()
|
189 |
+
&& overload_name() == old.overload_name()
|
190 |
+
// we are conservative on is_vararg and is_varret,
|
191 |
+
// since they are only used by internal operators
|
192 |
+
&& is_vararg() == old.is_vararg()
|
193 |
+
&& is_varret() == old.is_varret()
|
194 |
+
&& returns().size() == old.returns().size()
|
195 |
+
&& arguments().size() >= old.arguments().size())) {
|
196 |
+
return false;
|
197 |
+
}
|
198 |
+
for (const auto i : c10::irange(returns().size())) {
|
199 |
+
// Backwards compatibility requires covariance on argument types
|
200 |
+
// (i.e. more generic), and contravariance on return types (i.e.
|
201 |
+
// more specific).
|
202 |
+
if (!old.returns().at(i).isBackwardCompatibleWith(
|
203 |
+
returns().at(i),
|
204 |
+
why_not)) {
|
205 |
+
return false;
|
206 |
+
}
|
207 |
+
}
|
208 |
+
|
209 |
+
// we want to test both out and default args separately
|
210 |
+
size_t old_out_start_idx = findFirstOutArg(old.arguments());
|
211 |
+
size_t new_out_start_idx = findFirstOutArg(arguments());
|
212 |
+
|
213 |
+
// make sure among the default args, they are backward compatible
|
214 |
+
for (const auto i : c10::irange(old_out_start_idx)) {
|
215 |
+
if (!arguments().at(i).isBackwardCompatibleWith(
|
216 |
+
old.arguments().at(i), why_not)) {
|
217 |
+
return false;
|
218 |
+
}
|
219 |
+
}
|
220 |
+
|
221 |
+
// Validate that all new arguments provided has a default value
|
222 |
+
for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) {
|
223 |
+
if (!arguments().at(i).default_value()) {
|
224 |
+
if (why_not) {
|
225 |
+
*why_not
|
226 |
+
<< "Function schema not backward compatible since the new argument '"
|
227 |
+
<< arguments().at(i).name() << "' of type "
|
228 |
+
<< arguments().at(i).type()->str()
|
229 |
+
<< " did not provide a default value.";
|
230 |
+
}
|
231 |
+
return false;
|
232 |
+
}
|
233 |
+
}
|
234 |
+
|
235 |
+
// now compare the out args
|
236 |
+
for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) {
|
237 |
+
if (!arguments()
|
238 |
+
.at(i - old_out_start_idx + new_out_start_idx)
|
239 |
+
.isBackwardCompatibleWith(old.arguments().at(i), why_not)) {
|
240 |
+
return false;
|
241 |
+
}
|
242 |
+
}
|
243 |
+
|
244 |
+
return true;
|
245 |
+
}
|
246 |
+
|
247 |
+
inline bool FunctionSchema::isForwardCompatibleWith(
|
248 |
+
const FunctionSchema& old,
|
249 |
+
std::ostringstream& why_not) const {
|
250 |
+
if (!(name() == old.name() &&
|
251 |
+
overload_name() == old.overload_name()
|
252 |
+
// we are conservative on is_vararg and is_varret,
|
253 |
+
// since they are only used by internal operators
|
254 |
+
&& is_vararg() == old.is_vararg() && is_varret() == old.is_varret() &&
|
255 |
+
returns().size() == old.returns().size())) {
|
256 |
+
return false;
|
257 |
+
}
|
258 |
+
|
259 |
+
// we want to test both out and default args separately
|
260 |
+
size_t old_out_start_idx = findFirstOutArg(old.arguments());
|
261 |
+
size_t new_out_start_idx = findFirstOutArg(arguments());
|
262 |
+
|
263 |
+
if (old.arguments().size() - old_out_start_idx !=
|
264 |
+
arguments().size() - new_out_start_idx) {
|
265 |
+
if (why_not) {
|
266 |
+
why_not << "Function schema should have the "
|
267 |
+
<< "same number of out arguments";
|
268 |
+
}
|
269 |
+
return false;
|
270 |
+
}
|
271 |
+
|
272 |
+
// make sure among the default args, they are forward compatible
|
273 |
+
for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) {
|
274 |
+
if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) {
|
275 |
+
if (why_not) {
|
276 |
+
why_not
|
277 |
+
<< "'" << arguments().at(i).name() << "'"
|
278 |
+
<< " is not forward compatible with the older version of the schema";
|
279 |
+
}
|
280 |
+
return false;
|
281 |
+
}
|
282 |
+
}
|
283 |
+
|
284 |
+
// Validate that all new arguments provided has a default value
|
285 |
+
for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) {
|
286 |
+
if (!arguments().at(i).default_value()) {
|
287 |
+
if (why_not) {
|
288 |
+
why_not
|
289 |
+
<< "Function schema is not forward compatible since the new argument '"
|
290 |
+
<< arguments().at(i).name() << "' of type "
|
291 |
+
<< arguments().at(i).type()->str()
|
292 |
+
<< " did not provide a default value.";
|
293 |
+
}
|
294 |
+
return false;
|
295 |
+
}
|
296 |
+
|
297 |
+
auto default_val = arguments().at(i).default_value().value();
|
298 |
+
if (default_val.isList() || default_val.isGenericDict()) {
|
299 |
+
if (why_not) {
|
300 |
+
why_not
|
301 |
+
<< "Function schema is not forward compatible since the new argument '"
|
302 |
+
<< arguments().at(i).name() << "' of type "
|
303 |
+
<< arguments().at(i).type()->str() << " has a container type "
|
304 |
+
<< "as its default value.";
|
305 |
+
}
|
306 |
+
return false;
|
307 |
+
}
|
308 |
+
}
|
309 |
+
|
310 |
+
// now compare the out args
|
311 |
+
for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) {
|
312 |
+
if (!arguments()
|
313 |
+
.at(i - old_out_start_idx + new_out_start_idx)
|
314 |
+
.isForwardCompatibleWith(old.arguments().at(i))) {
|
315 |
+
if (why_not) {
|
316 |
+
why_not << "Out argument '"
|
317 |
+
<< "'" << arguments().at(i).name()
|
318 |
+
<< " is not FC with the older version of the schema";
|
319 |
+
}
|
320 |
+
return false;
|
321 |
+
}
|
322 |
+
}
|
323 |
+
|
324 |
+
return true;
|
325 |
+
}
|
326 |
+
|
327 |
+
template<typename T>
|
328 |
+
inline void FunctionSchema::checkArg(
|
329 |
+
const IValue& value,
|
330 |
+
const Argument& argument,
|
331 |
+
optional<size_t> pos) const {
|
332 |
+
if (value.isTensor() && argument.type() == TensorType::get()) {
|
333 |
+
// Fast-path for the common case
|
334 |
+
return;
|
335 |
+
}
|
336 |
+
if (!value.type<T>()->isSubtypeOf(*argument.type())) {
|
337 |
+
TORCH_CHECK(
|
338 |
+
false,
|
339 |
+
formatTypeMismatchMsg(
|
340 |
+
argument, value.type<T>()->repr_str(), pos));
|
341 |
+
}
|
342 |
+
}
|
343 |
+
|
344 |
+
inline std::string FunctionSchema::findErrorInKwargs(const std::vector<std::string>& kwargs) const {
|
345 |
+
// First check if any of the kwargs are unknown, i.e. don't match the name of
|
346 |
+
// any argument in the schema.
|
347 |
+
for (const auto& kwarg : kwargs) {
|
348 |
+
if (!std::count_if(
|
349 |
+
arguments().begin(),
|
350 |
+
arguments().end(),
|
351 |
+
[&kwarg](const Argument& argument) {
|
352 |
+
return argument.name() == kwarg;
|
353 |
+
})) {
|
354 |
+
return c10::str(
|
355 |
+
"Unknown keyword argument '",
|
356 |
+
kwarg,
|
357 |
+
"' for operator '",
|
358 |
+
name(),
|
359 |
+
"'. Schema: ",
|
360 |
+
*this);
|
361 |
+
}
|
362 |
+
}
|
363 |
+
// If there are unconsumed kwargs but none of them were unknown, the first
|
364 |
+
// positional argument present in the kwargs is duplicated.
|
365 |
+
for (const auto& argument : arguments()) {
|
366 |
+
if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) {
|
367 |
+
AT_ASSERT(!argument.default_value());
|
368 |
+
return c10::str(
|
369 |
+
"Argument '",
|
370 |
+
argument.name(),
|
371 |
+
"' specified both as positional and ",
|
372 |
+
"keyword argument. Schema: ",
|
373 |
+
*this);
|
374 |
+
}
|
375 |
+
}
|
376 |
+
return "";
|
377 |
+
}
|
378 |
+
|
379 |
+
template <typename T>
|
380 |
+
inline void FunctionSchema::checkAndNormalizeInputs(
|
381 |
+
std::vector<IValue>& inputs,
|
382 |
+
const std::unordered_map<std::string, IValue>& kwargs) const {
|
383 |
+
// Do we have more inputs than the schema accepts?
|
384 |
+
TORCH_CHECK(
|
385 |
+
inputs.size() <= arguments().size(),
|
386 |
+
"Expected at most ",
|
387 |
+
arguments().size(),
|
388 |
+
" argument(s) for operator '",
|
389 |
+
name(),
|
390 |
+
"', but received ",
|
391 |
+
inputs.size(),
|
392 |
+
" argument(s). Declaration: ",
|
393 |
+
*this);
|
394 |
+
|
395 |
+
size_t consumed_kwargs = 0;
|
396 |
+
for (const auto pos : c10::irange(arguments().size())) {
|
397 |
+
const auto& argument = arguments()[pos];
|
398 |
+
if (pos < inputs.size()) {
|
399 |
+
checkArg<T>(inputs[pos], argument, pos);
|
400 |
+
continue;
|
401 |
+
}
|
402 |
+
auto it = kwargs.find(argument.name());
|
403 |
+
if (it != kwargs.end()) {
|
404 |
+
checkArg<T>(it->second, argument, nullopt);
|
405 |
+
inputs.push_back(it->second);
|
406 |
+
consumed_kwargs++;
|
407 |
+
continue;
|
408 |
+
}
|
409 |
+
if (argument.default_value()) {
|
410 |
+
inputs.push_back(*argument.default_value());
|
411 |
+
continue;
|
412 |
+
}
|
413 |
+
AT_ERROR(
|
414 |
+
name(),
|
415 |
+
"() is missing value for argument '",
|
416 |
+
argument.name(),
|
417 |
+
"'. Declaration: ",
|
418 |
+
*this);
|
419 |
+
}
|
420 |
+
if (consumed_kwargs != kwargs.size()) {
|
421 |
+
std::vector<std::string> names;
|
422 |
+
names.reserve(kwargs.size());
|
423 |
+
for(const auto& k : kwargs) {
|
424 |
+
names.emplace_back(k.first);
|
425 |
+
}
|
426 |
+
throw std::runtime_error(findErrorInKwargs(names));
|
427 |
+
}
|
428 |
+
}
|
429 |
+
|
430 |
+
inline FunctionSchema FunctionSchema::cloneWithRemappedTypes(
|
431 |
+
const std::function<TypePtr(TypePtr)> type_map) const {
|
432 |
+
auto update_args = [&](const std::vector<Argument>& args) {
|
433 |
+
std::vector<Argument> new_args;
|
434 |
+
new_args.reserve(args.size());
|
435 |
+
for(const Argument& arg : args) {
|
436 |
+
new_args.emplace_back(arg.cloneWithType(type_map(arg.type())));
|
437 |
+
}
|
438 |
+
return new_args;
|
439 |
+
};
|
440 |
+
return FunctionSchema(
|
441 |
+
name(),
|
442 |
+
overload_name(),
|
443 |
+
update_args(arguments()),
|
444 |
+
update_args(returns()),
|
445 |
+
is_vararg(),
|
446 |
+
is_varret());
|
447 |
+
}
|
448 |
+
|
449 |
+
// covariant subtyping of list of Arguments
|
450 |
+
inline bool isSubtypeOfList(
|
451 |
+
ArrayRef<Argument> child,
|
452 |
+
ArrayRef<Argument> parent,
|
453 |
+
std::ostream* why_not) {
|
454 |
+
if (child.size() != parent.size()) {
|
455 |
+
return false;
|
456 |
+
}
|
457 |
+
for (const auto i : c10::irange(child.size())) {
|
458 |
+
const Argument& c = child[i];
|
459 |
+
const Argument& p = parent[i];
|
460 |
+
if (c.name() != p.name()) {
|
461 |
+
return false;
|
462 |
+
}
|
463 |
+
if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) {
|
464 |
+
return false;
|
465 |
+
}
|
466 |
+
}
|
467 |
+
return true;
|
468 |
+
}
|
469 |
+
|
470 |
+
inline bool FunctionSchema::isSubtypeOf(
|
471 |
+
const FunctionSchema& rhs,
|
472 |
+
bool as_method,
|
473 |
+
std::ostream* why_not) const {
|
474 |
+
size_t start = as_method ? 1 : 0;
|
475 |
+
// functions are contravariant in arguments but covariant in returns
|
476 |
+
return isSubtypeOfList(
|
477 |
+
ArrayRef<Argument>(rhs.arguments()).slice(start),
|
478 |
+
ArrayRef<Argument>(arguments()).slice(start),
|
479 |
+
why_not) &&
|
480 |
+
isSubtypeOfList(returns(), rhs.returns(), why_not);
|
481 |
+
}
|
482 |
+
|
483 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/functional.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
// The passed in function must take T by value (T), or by
|
9 |
+
// const reference (const T&); taking T by non-const reference
|
10 |
+
// will result in an error like:
|
11 |
+
//
|
12 |
+
// error: no type named 'type' in 'class std::result_of<foobar::__lambda(T)>'
|
13 |
+
//
|
14 |
+
// No explicit template parameters are required.
|
15 |
+
|
16 |
+
// Overload for explicit function and ArrayRef
|
17 |
+
template<class F, class T>
|
18 |
+
inline auto fmap(const T& inputs, const F& fn) -> std::vector<decltype(fn(*inputs.begin()))> {
|
19 |
+
std::vector<decltype(fn(*inputs.begin()))> r;
|
20 |
+
r.reserve(inputs.size());
|
21 |
+
for(const auto & input : inputs)
|
22 |
+
r.push_back(fn(input));
|
23 |
+
return r;
|
24 |
+
}
|
25 |
+
|
26 |
+
// C++ forbids taking an address of a constructor, so here's a workaround...
|
27 |
+
// Overload for constructor (R) application
|
28 |
+
template<typename R, typename T>
|
29 |
+
inline std::vector<R> fmap(const T& inputs) {
|
30 |
+
std::vector<R> r;
|
31 |
+
r.reserve(inputs.size());
|
32 |
+
for(auto & input : inputs)
|
33 |
+
r.push_back(R(input));
|
34 |
+
return r;
|
35 |
+
}
|
36 |
+
|
37 |
+
template<typename F, typename T>
|
38 |
+
inline std::vector<T> filter(at::ArrayRef<T> inputs, const F& fn) {
|
39 |
+
std::vector<T> r;
|
40 |
+
r.reserve(inputs.size());
|
41 |
+
for(auto & input : inputs) {
|
42 |
+
if (fn(input)) {
|
43 |
+
r.push_back(input);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
return r;
|
47 |
+
}
|
48 |
+
|
49 |
+
template<typename F, typename T>
|
50 |
+
inline std::vector<T> filter(const std::vector<T>& inputs, const F& fn) {
|
51 |
+
return filter<F, T>(static_cast<at::ArrayRef<T>>(inputs), fn);
|
52 |
+
}
|
53 |
+
|
54 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <cstdint>
|
2 |
+
#include <cstring>
|
3 |
+
#include <mutex>
|
4 |
+
#include <string>
|
5 |
+
#include <unordered_map>
|
6 |
+
#include <vector>
|
7 |
+
#include <ATen/core/symbol.h>
|
8 |
+
#include <c10/util/Exception.h>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
struct TORCH_API InternedStrings {
|
13 |
+
InternedStrings();
|
14 |
+
Symbol symbol(const std::string& s);
|
15 |
+
std::pair<const char*, const char*> string(Symbol sym);
|
16 |
+
Symbol ns(Symbol sym);
|
17 |
+
|
18 |
+
private:
|
19 |
+
// prereq - holding mutex_
|
20 |
+
Symbol _symbol(const std::string& s);
|
21 |
+
std::pair<const char*, const char*> customString(Symbol sym);
|
22 |
+
std::unordered_map<std::string, Symbol> string_to_sym_;
|
23 |
+
|
24 |
+
struct SymbolInfo {
|
25 |
+
Symbol ns;
|
26 |
+
std::string qual_name;
|
27 |
+
std::string unqual_name;
|
28 |
+
};
|
29 |
+
std::vector<SymbolInfo> sym_to_info_;
|
30 |
+
|
31 |
+
std::mutex mutex_;
|
32 |
+
};
|
33 |
+
|
34 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h
ADDED
@@ -0,0 +1,1555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/DimVector.h>
|
4 |
+
#include <ATen/core/TensorBody.h>
|
5 |
+
#include <ATen/core/blob.h>
|
6 |
+
#include <ATen/core/custom_class.h>
|
7 |
+
#include <ATen/core/ivalue_to.h>
|
8 |
+
#include <ATen/core/jit_type_base.h>
|
9 |
+
#include <ATen/core/type_factory.h>
|
10 |
+
#include <c10/core/SymBool.h>
|
11 |
+
#include <c10/core/SymFloat.h>
|
12 |
+
#include <c10/macros/Export.h>
|
13 |
+
#include <c10/util/MaybeOwned.h>
|
14 |
+
#include <c10/util/intrusive_ptr.h>
|
15 |
+
#include <type_traits>
|
16 |
+
#include <typeindex>
|
17 |
+
#include <unordered_map>
|
18 |
+
#include <unordered_set>
|
19 |
+
#include <utility>
|
20 |
+
|
21 |
+
namespace torch {
|
22 |
+
class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
|
23 |
+
namespace jit {
|
24 |
+
using ::torch::CustomClassHolder;
|
25 |
+
struct Function;
|
26 |
+
struct CompilationUnit;
|
27 |
+
struct Module;
|
28 |
+
} // namespace jit
|
29 |
+
} // namespace torch
|
30 |
+
namespace c10 {
|
31 |
+
template <class Key, class Value>
|
32 |
+
class Dict;
|
33 |
+
template <class T>
|
34 |
+
class List;
|
35 |
+
template <class T>
|
36 |
+
class IListRef;
|
37 |
+
struct IValue;
|
38 |
+
struct ClassType;
|
39 |
+
struct Type;
|
40 |
+
class RRefInterface;
|
41 |
+
|
42 |
+
struct ClassType;
|
43 |
+
using ClassTypePtr = std::shared_ptr<ClassType>;
|
44 |
+
|
45 |
+
TORCH_API bool _fastEqualsForContainer(const IValue& lhs, const IValue& rhs);
|
46 |
+
|
47 |
+
TORCH_API torch::jit::Function* checkObjectSortSchema(
|
48 |
+
const c10::ClassTypePtr& t,
|
49 |
+
std::stringstream& why_not);
|
50 |
+
|
51 |
+
// A comparator that checks ordering of two IValues of same type.
|
52 |
+
typedef std::function<bool(const IValue& a, const IValue& b)> IValueComparator;
|
53 |
+
|
54 |
+
TORCH_API IValueComparator getLessThanComparator(const IValue& v);
|
55 |
+
TORCH_API IValueComparator getGreaterThanComparator(const IValue& v);
|
56 |
+
|
57 |
+
namespace ivalue {
|
58 |
+
struct Tuple;
|
59 |
+
struct Future;
|
60 |
+
struct Await;
|
61 |
+
struct ConstantString;
|
62 |
+
struct GenericDict;
|
63 |
+
struct Object;
|
64 |
+
struct PyObjectHolder;
|
65 |
+
struct EnumHolder;
|
66 |
+
// We need a ComplexHolder because currently the payloads in the Union
|
67 |
+
// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big
|
68 |
+
// to fit in the IValue directly, we indirect complex numbers through an
|
69 |
+
// intrusive pointer to ComplexHolder (which contains a c10::complex).
|
70 |
+
struct ComplexHolder : c10::intrusive_ptr_target {
|
71 |
+
public:
|
72 |
+
template <typename T>
|
73 |
+
ComplexHolder(c10::complex<T> c) {
|
74 |
+
val = convert<decltype(val), c10::complex<T>>(c);
|
75 |
+
}
|
76 |
+
ComplexHolder() = default;
|
77 |
+
c10::complex<double> val;
|
78 |
+
};
|
79 |
+
|
80 |
+
// Similar to ComplexHolder, for StreamData3
|
81 |
+
struct StreamData3Holder : c10::intrusive_ptr_target {
|
82 |
+
public:
|
83 |
+
StreamData3Holder(struct c10::StreamData3 d) : val(d) {}
|
84 |
+
StreamData3Holder() = delete;
|
85 |
+
struct c10::StreamData3 val;
|
86 |
+
};
|
87 |
+
|
88 |
+
} // namespace ivalue
|
89 |
+
|
90 |
+
// This is an owning wrapper for a c10::optional<std::vector<T>>
|
91 |
+
// that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>.
|
92 |
+
// Its purpose is to be used in generated code to keep the vector alive
|
93 |
+
// either until the end of a statement (as a temporary), or as a saved arg
|
94 |
+
// in autograd.
|
95 |
+
template <typename T>
|
96 |
+
struct OptionalArray {
|
97 |
+
c10::optional<std::vector<T>> list;
|
98 |
+
|
99 |
+
OptionalArray() = default;
|
100 |
+
OptionalArray(std::vector<T> val) : list(std::move(val)) {}
|
101 |
+
|
102 |
+
// Used when saving an argument for the backwards pass.
|
103 |
+
OptionalArray& operator=(c10::optional<ArrayRef<T>> ref) {
|
104 |
+
if (ref) {
|
105 |
+
list = std::vector<T>(ref->begin(), ref->end());
|
106 |
+
} else {
|
107 |
+
list = nullopt;
|
108 |
+
}
|
109 |
+
return *this;
|
110 |
+
}
|
111 |
+
|
112 |
+
// Used when saving an argument for the backwards pass.
|
113 |
+
OptionalArray& operator=(c10::OptionalArrayRef<T> ref) {
|
114 |
+
if (ref) {
|
115 |
+
list = std::vector<T>(ref->begin(), ref->end());
|
116 |
+
} else {
|
117 |
+
list = nullopt;
|
118 |
+
}
|
119 |
+
return *this;
|
120 |
+
}
|
121 |
+
|
122 |
+
operator c10::optional<c10::ArrayRef<T>>() {
|
123 |
+
if (!list) {
|
124 |
+
return nullopt;
|
125 |
+
}
|
126 |
+
return *list;
|
127 |
+
}
|
128 |
+
|
129 |
+
operator c10::OptionalArrayRef<T>() {
|
130 |
+
if (!list) {
|
131 |
+
return nullopt;
|
132 |
+
}
|
133 |
+
return *list;
|
134 |
+
}
|
135 |
+
};
|
136 |
+
|
137 |
+
// Capsule is an internal implementation detail of custom C++ classes. We
|
138 |
+
// define it as an owning wrapper for
|
139 |
+
// c10::intrusive_ptr<torch::CustomClassHolder> This wrapper is here to serve as
|
140 |
+
// an abstraction of the type erased custom class object pointer. It also allow
|
141 |
+
// pybind11 to treat this as a standalone class to register as a separate type
|
142 |
+
// caster, instead of a custom pointer holder which the pointer holder type
|
143 |
+
// caster try to "unwrap" it automatically.
|
144 |
+
struct Capsule {
|
145 |
+
c10::intrusive_ptr<torch::CustomClassHolder> obj_ptr;
|
146 |
+
explicit Capsule(c10::intrusive_ptr<torch::CustomClassHolder> ptr)
|
147 |
+
: obj_ptr(std::move(ptr)) {}
|
148 |
+
};
|
149 |
+
|
150 |
+
// IValue is the generic tagged union used by the interpreter to hold
|
151 |
+
// all value types.
|
152 |
+
// It is a 16-byte object with an 8-byte payload and an 8-byte tag.
|
153 |
+
// The tag is currently 4 bytes to determine the type, and 1 byte
|
154 |
+
// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs
|
155 |
+
// retain/release calls.
|
156 |
+
|
157 |
+
#define TORCH_FORALL_TAGS(_) \
|
158 |
+
_(None) \
|
159 |
+
_(Tensor) \
|
160 |
+
_(Storage) \
|
161 |
+
_(Double) \
|
162 |
+
_(ComplexDouble) \
|
163 |
+
_(Int) \
|
164 |
+
_(SymInt) \
|
165 |
+
_(SymFloat) \
|
166 |
+
_(SymBool) \
|
167 |
+
_(Bool) \
|
168 |
+
_(Tuple) \
|
169 |
+
_(String) \
|
170 |
+
_(Blob) \
|
171 |
+
_(GenericList) \
|
172 |
+
_(GenericDict) \
|
173 |
+
_(Future) \
|
174 |
+
_(Await) \
|
175 |
+
_(Device) \
|
176 |
+
_(Stream) \
|
177 |
+
_(Object) \
|
178 |
+
_(PyObject) \
|
179 |
+
_(Uninitialized) \
|
180 |
+
_(Capsule) \
|
181 |
+
_(RRef) \
|
182 |
+
_(Quantizer) \
|
183 |
+
_(Generator) \
|
184 |
+
_(Enum)
|
185 |
+
|
186 |
+
// [doxygen private]
|
187 |
+
// These methods are not actually private but we don't want to document them, so
|
188 |
+
// they are marked `@private`, which hides them on the doxygen documentation for
|
189 |
+
// this page.
|
190 |
+
|
191 |
+
/// IValue (Interpreter Value) is a tagged union over the types
|
192 |
+
/// supported by the TorchScript interpreter. IValues contain their
|
193 |
+
/// values as an `IValue::Payload`, which holds primitive types
|
194 |
+
/// (`int64_t`, `bool`, `double`, `Device`) and `Tensor` as values,
|
195 |
+
/// and all other types as a `c10::intrusive_ptr`. In order to
|
196 |
+
/// optimize performance of the destructor and related operations by
|
197 |
+
/// making the `Tensor` and `c10::intrusive_ptr` paths generate the
|
198 |
+
/// same code, we represent a null `c10::intrusive_ptr` as
|
199 |
+
/// `UndefinedTensorImpl::singleton()`, *not* `nullptr`.
|
200 |
+
///
|
201 |
+
/// IValues are used as inputs to and outputs from the TorchScript interpreter.
|
202 |
+
/// To retrieve the value contained within an IValue, use the `.toX()` methods,
|
203 |
+
/// where `X` is the type you are trying to get. Note that neither the `.toX()`
|
204 |
+
/// methods nor the templated `.to<T>` functions do any kind of casting, they
|
205 |
+
/// only unwrap the contained value. For example:
|
206 |
+
///
|
207 |
+
/// \rst
|
208 |
+
/// .. code-block:: cpp
|
209 |
+
///
|
210 |
+
/// // Make the IValue
|
211 |
+
/// torch::IValue my_ivalue(26);
|
212 |
+
/// std::cout << my_ivalue << "\n";
|
213 |
+
///
|
214 |
+
/// // Unwrap the IValue
|
215 |
+
/// int64_t my_int = my_ivalue.toInt();
|
216 |
+
/// std::cout << my_int << "\n";
|
217 |
+
///
|
218 |
+
/// // This will throw an error!
|
219 |
+
/// // `my_ivalue` is tagged as an int and cannot be used as another type
|
220 |
+
/// torch::Tensor my_tensor = my_ivalue.toTensor();
|
221 |
+
/// \endrst
|
222 |
+
struct TORCH_API IValue final {
|
223 |
+
IValue(const IValue& rhs) : IValue(rhs.payload, rhs.tag) {
|
224 |
+
if (isIntrusivePtr() &&
|
225 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
226 |
+
c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
|
227 |
+
}
|
228 |
+
}
|
229 |
+
|
230 |
+
IValue(IValue&& rhs) noexcept : tag(rhs.tag) {
|
231 |
+
moveFrom(std::move(rhs));
|
232 |
+
}
|
233 |
+
|
234 |
+
/// @private [doxygen private]
|
235 |
+
~IValue() {
|
236 |
+
destroy();
|
237 |
+
}
|
238 |
+
|
239 |
+
C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept {
|
240 |
+
if (&rhs == this) {
|
241 |
+
return *this;
|
242 |
+
}
|
243 |
+
|
244 |
+
destroy();
|
245 |
+
moveFrom(std::move(rhs));
|
246 |
+
return *this;
|
247 |
+
}
|
248 |
+
|
249 |
+
IValue& operator=(IValue const& rhs) & {
|
250 |
+
*this = IValue(rhs);
|
251 |
+
return *this;
|
252 |
+
}
|
253 |
+
|
254 |
+
void dump() const;
|
255 |
+
|
256 |
+
/**
|
257 |
+
* Equality comparison. The semantics are the same as Python's `==`:
|
258 |
+
* 1. Numerical types are compared by value.
|
259 |
+
* 2. Tensors compute element-wise equality, returning a BoolTensor (see:
|
260 |
+
* `torch.eq()`)
|
261 |
+
* 3. Strings are compared by value.
|
262 |
+
* 4. Sequence types (list, tuple) are compared lexicographically by
|
263 |
+
* comparing their elements. Different sequence types never compare equal.
|
264 |
+
* 5. Mappings (dict) must have equal (key, value) pairs.
|
265 |
+
* 6. If not listed above, the default behavior for is to test identity
|
266 |
+
* equality (e.g. pointer equality).
|
267 |
+
*
|
268 |
+
* Why does this return an IValue instead of a bool? Because in PyTorch,
|
269 |
+
* `tensor1 == tensor2` returns a `BoolTensor`, not a bool.
|
270 |
+
*
|
271 |
+
* NOTE: we (like Python) assume that identity equality implies value equality
|
272 |
+
* for efficiency.
|
273 |
+
* TODO: need to support customizing equality
|
274 |
+
*/
|
275 |
+
IValue equals(const IValue& rhs) const;
|
276 |
+
/**
|
277 |
+
* This implements the same semantics as `bool(lhs == rhs)` in Python. which
|
278 |
+
* is the same as `equals()` except for Tensor types.
|
279 |
+
*/
|
280 |
+
TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs);
|
281 |
+
TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs);
|
282 |
+
|
283 |
+
/**
|
284 |
+
* Identity comparison. Checks if `this` is the same object as `rhs`. The
|
285 |
+
* semantics are the same as Python's `is` operator.
|
286 |
+
*
|
287 |
+
* NOTE: Like in Python, this operation is poorly defined for primitive types
|
288 |
+
* like numbers and strings. Prefer to use `==` unless you really want to
|
289 |
+
* check identity equality.
|
290 |
+
*/
|
291 |
+
bool is(const IValue& rhs) const;
|
292 |
+
|
293 |
+
/**
|
294 |
+
* Hashing for IValues. Returns an IValue-boxed int.
|
295 |
+
*
|
296 |
+
* Some notes:
|
297 |
+
* - Like eager, Tensors are hashed by looking at the pointer. This is not
|
298 |
+
* strictly correct because two value-equal tensors with different tensor
|
299 |
+
* pointers will hash differently, but we choose to reproduce the eager
|
300 |
+
* semantics.
|
301 |
+
* - Hashing is not defined on all built-in IValue types (e.g. list and
|
302 |
+
* dict), following Python. Calling `hash()` on these types will throw.
|
303 |
+
*/
|
304 |
+
IValue hash() const {
|
305 |
+
return (int64_t)IValue::hash(*this);
|
306 |
+
}
|
307 |
+
// This is defined because `c10::hash` dispatches to a function of this
|
308 |
+
// signature. See the member function `hash()`.
|
309 |
+
static size_t hash(const IValue& iv);
|
310 |
+
|
311 |
+
/**
|
312 |
+
* @private [doxygen private]
|
313 |
+
* [container equality]
|
314 |
+
* This is an equality implementation that assumes objects with the same
|
315 |
+
* identity equal themselves, for efficiency reasons. We primarily have this
|
316 |
+
* for consistency, because Python does the same thing. This actually
|
317 |
+
* provokes user-visible changes in behavior due to quirks in torch:
|
318 |
+
* [tensor1] == [tensor1] -> True (because container equality will first
|
319 |
+
* compare identity) [tensor1] == [tensor1_copy] -> RuntimeError:
|
320 |
+
* Boolean value of Tensor with more than one value is ambiguous
|
321 |
+
*/
|
322 |
+
TORCH_API friend bool _fastEqualsForContainer(
|
323 |
+
const IValue& lhs,
|
324 |
+
const IValue& rhs);
|
325 |
+
|
326 |
+
private:
|
327 |
+
static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) {
|
328 |
+
if (a.is_sparse()) {
|
329 |
+
return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b);
|
330 |
+
}
|
331 |
+
if (b.is_sparse()) {
|
332 |
+
return isAliasOf(a, b._values()) || isAliasOf(a, b._indices());
|
333 |
+
}
|
334 |
+
if (a.is_sparse_csr()) {
|
335 |
+
return isAliasOf(a.values(), b) || isAliasOf(a.crow_indices(), b) ||
|
336 |
+
isAliasOf(a.col_indices(), b);
|
337 |
+
}
|
338 |
+
if (b.is_sparse_csr()) {
|
339 |
+
return isAliasOf(a, b.values()) || isAliasOf(a, b.crow_indices()) ||
|
340 |
+
isAliasOf(a, b.col_indices());
|
341 |
+
}
|
342 |
+
|
343 |
+
// Opaque tensors such as the ones constructed by the MKL-DNN backend
|
344 |
+
// don't have storage so we just compare their TensorImpls.
|
345 |
+
// TODO: Find way to expose alias info for opaque tensors.
|
346 |
+
if (!a.has_storage() || !b.has_storage()) {
|
347 |
+
return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl();
|
348 |
+
}
|
349 |
+
|
350 |
+
return a.is_alias_of(b);
|
351 |
+
}
|
352 |
+
|
353 |
+
template <typename T>
|
354 |
+
bool isListOf() const;
|
355 |
+
|
356 |
+
public:
|
357 |
+
/// @private [doxygen private]
|
358 |
+
bool isAliasOf(const IValue& rhs) const {
|
359 |
+
if (this->tag != rhs.tag) {
|
360 |
+
// Trivially don't alias if the type is different
|
361 |
+
return false;
|
362 |
+
}
|
363 |
+
|
364 |
+
// Tensors should be compared based on internal storage
|
365 |
+
if (this->isTensor()) {
|
366 |
+
return isAliasOf(this->toTensor(), rhs.toTensor());
|
367 |
+
}
|
368 |
+
|
369 |
+
if (!isIntrusivePtr()) {
|
370 |
+
// Primitive types don't alias anything
|
371 |
+
return false;
|
372 |
+
}
|
373 |
+
|
374 |
+
AT_ASSERT(rhs.isIntrusivePtr());
|
375 |
+
|
376 |
+
// Other types can be compared by their ptr value
|
377 |
+
return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
|
378 |
+
}
|
379 |
+
|
380 |
+
/// @private [doxygen private]
|
381 |
+
size_t use_count() const noexcept {
|
382 |
+
if (isTensor()) {
|
383 |
+
return payload.as_tensor.use_count();
|
384 |
+
}
|
385 |
+
|
386 |
+
if (!isIntrusivePtrLegacyBehavior()) {
|
387 |
+
return 1;
|
388 |
+
}
|
389 |
+
|
390 |
+
if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
|
391 |
+
return 0;
|
392 |
+
}
|
393 |
+
return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr);
|
394 |
+
}
|
395 |
+
|
396 |
+
/// @private [doxygen private]
|
397 |
+
void swap(IValue& rhs) noexcept {
|
398 |
+
if (isTensor() && rhs.isTensor()) {
|
399 |
+
std::swap(payload.as_tensor, rhs.payload.as_tensor);
|
400 |
+
} else if (isTensor()) {
|
401 |
+
at::Tensor t = std::move(payload.as_tensor);
|
402 |
+
// As far as I can tell, omitting the usual explicit destructor call
|
403 |
+
// is not UB in and of itself, and it's a slight perf win. The
|
404 |
+
// destructor is a no-op, because the moved-from Tensor is
|
405 |
+
// effectively an intrusive_ptr in the null state, so we don't need
|
406 |
+
// the behavior for correctness reasons either. Leaving this
|
407 |
+
// explanatory comment, including commented-out destructor call, to
|
408 |
+
// make this abundantly clear.
|
409 |
+
//
|
410 |
+
// payload.as_tensor.~Tensor();
|
411 |
+
payload.u = rhs.payload.u;
|
412 |
+
new (&rhs.payload.as_tensor) at::Tensor(std::move(t));
|
413 |
+
} else if (rhs.isTensor()) {
|
414 |
+
rhs.swap(*this);
|
415 |
+
return;
|
416 |
+
} else {
|
417 |
+
std::swap(payload.u, rhs.payload.u);
|
418 |
+
}
|
419 |
+
std::swap(tag, rhs.tag);
|
420 |
+
}
|
421 |
+
|
422 |
+
// Accessors for subtypes are arranged together below
|
423 |
+
// While some of these accessors could be generated through templates,
|
424 |
+
// we prefer to write them manually for clarity
|
425 |
+
|
426 |
+
IValue(at::TensorBase t) : tag(Tag::Tensor) {
|
427 |
+
new (&payload.as_tensor) at::Tensor(std::move(t));
|
428 |
+
}
|
429 |
+
bool isTensor() const {
|
430 |
+
return Tag::Tensor == tag;
|
431 |
+
}
|
432 |
+
|
433 |
+
private:
|
434 |
+
// Outlined error path so that toTensor() can be inlined.
|
435 |
+
[[noreturn]] void reportToTensorTypeError() const;
|
436 |
+
|
437 |
+
public:
|
438 |
+
at::Tensor toTensor() &&;
|
439 |
+
at::Tensor& toTensor() &;
|
440 |
+
const at::Tensor& toTensor() const&;
|
441 |
+
at::TensorImpl* unsafeToTensorImpl() const {
|
442 |
+
TORCH_INTERNAL_ASSERT(isTensor());
|
443 |
+
return payload.as_tensor.unsafeGetTensorImpl();
|
444 |
+
}
|
445 |
+
|
446 |
+
IValue(at::Storage s) : tag(Tag::Storage) {
|
447 |
+
payload.u.as_intrusive_ptr =
|
448 |
+
null_to_undefined_tensor(s.unsafeReleaseStorageImpl());
|
449 |
+
}
|
450 |
+
bool isStorage() const {
|
451 |
+
return Tag::Storage == tag;
|
452 |
+
}
|
453 |
+
c10::Storage toStorage() &&;
|
454 |
+
c10::Storage toStorage() const&;
|
455 |
+
|
456 |
+
const IValue& toIValue() const {
|
457 |
+
return *this;
|
458 |
+
}
|
459 |
+
IValue& toIValue() {
|
460 |
+
return *this;
|
461 |
+
}
|
462 |
+
|
463 |
+
/// @private [doxygen private]
|
464 |
+
IValue(intrusive_ptr<caffe2::Blob> blob) : tag(Tag::Blob) {
|
465 |
+
// TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract
|
466 |
+
// and store it as a Tensor instead.
|
467 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
|
468 |
+
}
|
469 |
+
|
470 |
+
/// @private [doxygen private]
|
471 |
+
bool isBlob() const {
|
472 |
+
return Tag::Blob == tag;
|
473 |
+
}
|
474 |
+
|
475 |
+
/// @private [doxygen private]
|
476 |
+
c10::intrusive_ptr<caffe2::Blob> toBlob() &&;
|
477 |
+
|
478 |
+
/// @private [doxygen private]
|
479 |
+
c10::intrusive_ptr<caffe2::Blob> toBlob() const&;
|
480 |
+
|
481 |
+
// Capsule. No new callsites of these APIs should
|
482 |
+
// be introduced.
|
483 |
+
static inline IValue make_capsule(
|
484 |
+
intrusive_ptr<torch::CustomClassHolder> blob);
|
485 |
+
bool isCapsule() const {
|
486 |
+
return Tag::Capsule == tag;
|
487 |
+
}
|
488 |
+
c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() &&;
|
489 |
+
c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() const&;
|
490 |
+
|
491 |
+
// Custom C++ classes
|
492 |
+
template <
|
493 |
+
typename T,
|
494 |
+
std::enable_if_t<
|
495 |
+
std::is_base_of<torch::CustomClassHolder, T>::value,
|
496 |
+
int> = 0>
|
497 |
+
IValue(intrusive_ptr<T> custom_class);
|
498 |
+
bool isCustomClass() const;
|
499 |
+
template <typename T>
|
500 |
+
c10::intrusive_ptr<T> toCustomClass() &&;
|
501 |
+
template <typename T>
|
502 |
+
c10::intrusive_ptr<T> toCustomClass() const&;
|
503 |
+
|
504 |
+
// Tuple
|
505 |
+
IValue(c10::intrusive_ptr<ivalue::Tuple> v);
|
506 |
+
|
507 |
+
template <
|
508 |
+
typename... Args,
|
509 |
+
std::enable_if_t<
|
510 |
+
!std::disjunction<
|
511 |
+
std::is_lvalue_reference<Args>...,
|
512 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
513 |
+
std::nullptr_t> = nullptr>
|
514 |
+
IValue(const std::tuple<Args...>& t);
|
515 |
+
template <
|
516 |
+
typename... Args,
|
517 |
+
std::enable_if_t<
|
518 |
+
!std::disjunction<
|
519 |
+
std::is_lvalue_reference<Args>...,
|
520 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
521 |
+
std::nullptr_t> = nullptr>
|
522 |
+
IValue(std::tuple<Args...>&& t);
|
523 |
+
bool isTuple() const {
|
524 |
+
return Tag::Tuple == tag;
|
525 |
+
}
|
526 |
+
c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
|
527 |
+
c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
|
528 |
+
C10_NODISCARD ivalue::Tuple& toTupleRef() const;
|
529 |
+
|
530 |
+
// Double
|
531 |
+
IValue(double d) : tag(Tag::Double) {
|
532 |
+
payload.u.as_double = d;
|
533 |
+
}
|
534 |
+
bool isDouble() const {
|
535 |
+
return Tag::Double == tag;
|
536 |
+
}
|
537 |
+
double toDouble() const {
|
538 |
+
AT_ASSERT(isDouble());
|
539 |
+
return payload.u.as_double;
|
540 |
+
}
|
541 |
+
|
542 |
+
// ComplexDouble
|
543 |
+
template <typename T>
|
544 |
+
IValue(c10::complex<T> c);
|
545 |
+
bool isComplexDouble() const {
|
546 |
+
return Tag::ComplexDouble == tag;
|
547 |
+
}
|
548 |
+
c10::complex<double> toComplexDouble() const;
|
549 |
+
|
550 |
+
// Future
|
551 |
+
IValue(c10::intrusive_ptr<ivalue::Future> v);
|
552 |
+
bool isFuture() const {
|
553 |
+
return Tag::Future == tag;
|
554 |
+
}
|
555 |
+
c10::intrusive_ptr<ivalue::Future> toFuture() &&;
|
556 |
+
c10::intrusive_ptr<ivalue::Future> toFuture() const&;
|
557 |
+
|
558 |
+
IValue(c10::intrusive_ptr<ivalue::Await> v);
|
559 |
+
bool isAwait() const {
|
560 |
+
return Tag::Await == tag;
|
561 |
+
}
|
562 |
+
c10::intrusive_ptr<ivalue::Await> toAwait() &&;
|
563 |
+
c10::intrusive_ptr<ivalue::Await> toAwait() const&;
|
564 |
+
|
565 |
+
// RRef
|
566 |
+
IValue(c10::intrusive_ptr<c10::RRefInterface> v);
|
567 |
+
bool isRRef() const {
|
568 |
+
return Tag::RRef == tag;
|
569 |
+
}
|
570 |
+
c10::intrusive_ptr<c10::RRefInterface> toRRef() &&;
|
571 |
+
c10::intrusive_ptr<c10::RRefInterface> toRRef() const&;
|
572 |
+
|
573 |
+
// Quantizer
|
574 |
+
IValue(c10::intrusive_ptr<at::Quantizer> v);
|
575 |
+
bool isQuantizer() const {
|
576 |
+
return Tag::Quantizer == tag;
|
577 |
+
}
|
578 |
+
c10::intrusive_ptr<at::Quantizer> toQuantizer() &&;
|
579 |
+
c10::intrusive_ptr<at::Quantizer> toQuantizer() const&;
|
580 |
+
|
581 |
+
// Int
|
582 |
+
IValue(int64_t i) : tag(Tag::Int) {
|
583 |
+
payload.u.as_int = i;
|
584 |
+
}
|
585 |
+
|
586 |
+
IValue(const c10::SymInt& i) {
|
587 |
+
if (auto mi = i.maybe_as_int()) {
|
588 |
+
tag = Tag::Int;
|
589 |
+
payload.u.as_int = *mi;
|
590 |
+
} else {
|
591 |
+
tag = Tag::SymInt;
|
592 |
+
payload.u.as_intrusive_ptr = i.toSymNode().release();
|
593 |
+
}
|
594 |
+
}
|
595 |
+
|
596 |
+
bool isSymInt() const {
|
597 |
+
return Tag::SymInt == tag;
|
598 |
+
}
|
599 |
+
|
600 |
+
c10::SymInt toSymInt() &&;
|
601 |
+
c10::SymInt toSymInt() const&;
|
602 |
+
|
603 |
+
IValue(const c10::SymFloat& i) {
|
604 |
+
if (i.is_symbolic()) {
|
605 |
+
tag = Tag::SymFloat;
|
606 |
+
payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
|
607 |
+
} else {
|
608 |
+
tag = Tag::Double;
|
609 |
+
payload.u.as_double = i.as_float_unchecked();
|
610 |
+
}
|
611 |
+
}
|
612 |
+
|
613 |
+
bool isSymFloat() const {
|
614 |
+
return Tag::SymFloat == tag;
|
615 |
+
}
|
616 |
+
|
617 |
+
c10::SymFloat toSymFloat() &&;
|
618 |
+
c10::SymFloat toSymFloat() const&;
|
619 |
+
|
620 |
+
IValue(const c10::SymBool& i) {
|
621 |
+
if (auto mi = i.maybe_as_bool()) {
|
622 |
+
tag = Tag::Bool;
|
623 |
+
payload.u.as_int = *mi;
|
624 |
+
} else {
|
625 |
+
tag = Tag::SymBool;
|
626 |
+
payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
|
627 |
+
}
|
628 |
+
}
|
629 |
+
|
630 |
+
bool isSymBool() const {
|
631 |
+
return Tag::SymBool == tag;
|
632 |
+
}
|
633 |
+
|
634 |
+
c10::SymBool toSymBool() &&;
|
635 |
+
c10::SymBool toSymBool() const&;
|
636 |
+
|
637 |
+
// allow you to pass literals (3, 4) without ambiguity
|
638 |
+
IValue(int32_t i) : IValue(static_cast<int64_t>(i)) {}
|
639 |
+
|
640 |
+
bool isInt() const {
|
641 |
+
return Tag::Int == tag;
|
642 |
+
}
|
643 |
+
|
644 |
+
int64_t toInt() const {
|
645 |
+
AT_ASSERT(isInt());
|
646 |
+
return payload.u.as_int;
|
647 |
+
}
|
648 |
+
|
649 |
+
// Bool
|
650 |
+
IValue(bool b) : tag(Tag::Bool) {
|
651 |
+
#if defined(__clang__) && defined(__x86_64__)
|
652 |
+
// Initializing entire payload stops valgrind's from reporting
|
653 |
+
// "jump or move depends on uninitialised value" in IValue copy constructor
|
654 |
+
// See https://github.com/pytorch/pytorch/issues/37117
|
655 |
+
payload.u.as_int = b;
|
656 |
+
#else
|
657 |
+
payload.u.as_bool = b;
|
658 |
+
#endif
|
659 |
+
}
|
660 |
+
bool isBool() const {
|
661 |
+
return Tag::Bool == tag;
|
662 |
+
}
|
663 |
+
bool toBool() const {
|
664 |
+
AT_ASSERT(isBool());
|
665 |
+
return payload.u.as_bool;
|
666 |
+
}
|
667 |
+
|
668 |
+
// IntList
|
669 |
+
bool isIntList() const;
|
670 |
+
bool isSymIntList() const;
|
671 |
+
c10::List<int64_t> toIntList() &&;
|
672 |
+
c10::List<int64_t> toIntList() const&;
|
673 |
+
std::vector<int64_t> toIntVector() const;
|
674 |
+
std::vector<c10::SymInt> toSymIntVector() const;
|
675 |
+
at::DimVector toDimVector() const;
|
676 |
+
|
677 |
+
// ConstantString
|
678 |
+
IValue(c10::intrusive_ptr<ivalue::ConstantString> v);
|
679 |
+
IValue(std::string v);
|
680 |
+
IValue(const char* v) : IValue(std::string(v)) {}
|
681 |
+
IValue(c10::string_view v) : IValue(std::string(v)){};
|
682 |
+
bool isString() const {
|
683 |
+
return Tag::String == tag;
|
684 |
+
}
|
685 |
+
c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
|
686 |
+
c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
|
687 |
+
const std::string& toStringRef() const;
|
688 |
+
c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
|
689 |
+
const;
|
690 |
+
c10::string_view toStringView() const;
|
691 |
+
|
692 |
+
// DoubleList
|
693 |
+
bool isDoubleList() const;
|
694 |
+
c10::List<double> toDoubleList() &&;
|
695 |
+
c10::List<double> toDoubleList() const&;
|
696 |
+
std::vector<double> toDoubleVector() const;
|
697 |
+
|
698 |
+
// ComplexDoubleList
|
699 |
+
bool isComplexDoubleList() const;
|
700 |
+
c10::List<c10::complex<double>> toComplexDoubleList() &&;
|
701 |
+
c10::List<c10::complex<double>> toComplexDoubleList() const&;
|
702 |
+
std::vector<c10::complex<double>> toComplexDoubleVector() const;
|
703 |
+
|
704 |
+
// BoolList
|
705 |
+
bool isBoolList() const;
|
706 |
+
c10::List<bool> toBoolList() &&;
|
707 |
+
c10::List<bool> toBoolList() const&;
|
708 |
+
|
709 |
+
// TensorList
|
710 |
+
bool isTensorList() const;
|
711 |
+
c10::List<at::Tensor> toTensorList() &&;
|
712 |
+
c10::List<at::Tensor> toTensorList() const&;
|
713 |
+
std::vector<at::Tensor> toTensorVector() const;
|
714 |
+
|
715 |
+
// OptionalTensorList
|
716 |
+
bool isOptionalTensorList() const;
|
717 |
+
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() &&;
|
718 |
+
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() const&;
|
719 |
+
std::vector<c10::optional<at::Tensor>> toOptionalTensorVector() const;
|
720 |
+
|
721 |
+
// GenericList
|
722 |
+
IValue(c10::List<IValue> v);
|
723 |
+
bool isList() const {
|
724 |
+
return Tag::GenericList == tag;
|
725 |
+
}
|
726 |
+
c10::List<IValue> toList() &&;
|
727 |
+
c10::List<IValue> toList() const&;
|
728 |
+
c10::ArrayRef<IValue> toListRef() const;
|
729 |
+
|
730 |
+
// Some template constructors of IValue calls another constructor recursively.
|
731 |
+
// This SFINAEs the called constructor exists.
|
732 |
+
template <class T>
|
733 |
+
using enable_if_ivalue_constructible =
|
734 |
+
std::enable_if_t<std::is_constructible<IValue, T>::value, std::nullptr_t>;
|
735 |
+
|
736 |
+
// The rule for lists is more complicated; the generic constructor is only
|
737 |
+
// acceptable if your element isn't SymInt. If you do have a SymInt element,
|
738 |
+
// then you must also, at construction time, check if you can decay the list
|
739 |
+
// into an int list (this is MANDATORY, as at a use site we may expect
|
740 |
+
// toIntList to work even if at the call site you had a SymIntArrayRef
|
741 |
+
// argument). In practice, only SymIntArrayRef is used this way, so we
|
742 |
+
// didn't bother making it work for the other constructors, we just make sure
|
743 |
+
// they're not selectable.
|
744 |
+
template <class T>
|
745 |
+
using enable_if_list_is_ivalue_constructible = std::enable_if_t<
|
746 |
+
std::is_constructible<IValue, T>::value &&
|
747 |
+
!std::is_same<T, c10::SymInt>::value,
|
748 |
+
std::nullptr_t>;
|
749 |
+
|
750 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
751 |
+
IValue(c10::List<T>&& v);
|
752 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
753 |
+
IValue(const c10::List<T>& v);
|
754 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
755 |
+
IValue(at::ArrayRef<T> v);
|
756 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
757 |
+
IValue(const std::vector<T>& v);
|
758 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
759 |
+
IValue(std::vector<T>&& v);
|
760 |
+
template <class T, size_t N>
|
761 |
+
IValue(std::array<T, N> v);
|
762 |
+
|
763 |
+
// Manual constructors for lists of symints, which decay to int list if
|
764 |
+
// possible. To avoid ambiguous overload situations, we template them
|
765 |
+
// to prevent implicit conversions
|
766 |
+
template <class T>
|
767 |
+
using enable_if_symint =
|
768 |
+
std::enable_if_t<std::is_same<T, c10::SymInt>::value, std::nullptr_t>;
|
769 |
+
|
770 |
+
template <class T, enable_if_symint<T> = nullptr>
|
771 |
+
IValue(at::ArrayRef<T> v);
|
772 |
+
template <class T, enable_if_symint<T> = nullptr>
|
773 |
+
IValue(at::OptionalArrayRef<T> v);
|
774 |
+
template <class T, enable_if_symint<T> = nullptr>
|
775 |
+
IValue(const std::vector<T>& v);
|
776 |
+
template <class T, enable_if_symint<T> = nullptr>
|
777 |
+
IValue(std::vector<T>&& v);
|
778 |
+
|
779 |
+
|
780 |
+
template <class T>
|
781 |
+
using enable_if_ilist_is_ivalue_constructible = std::enable_if_t<
|
782 |
+
std::is_constructible<IValue, T>::value &&
|
783 |
+
std::is_constructible<IValue, typename IListRef<T>::boxed_type>::
|
784 |
+
value &&
|
785 |
+
!std::is_same<T, c10::SymInt>::value,
|
786 |
+
std::nullptr_t>;
|
787 |
+
|
788 |
+
template <class T, enable_if_ilist_is_ivalue_constructible<T> = nullptr>
|
789 |
+
IValue(c10::IListRef<T> v);
|
790 |
+
|
791 |
+
// GenericDict
|
792 |
+
IValue(c10::Dict<IValue, IValue> v);
|
793 |
+
bool isGenericDict() const {
|
794 |
+
return Tag::GenericDict == tag;
|
795 |
+
}
|
796 |
+
c10::Dict<IValue, IValue> toGenericDict() &&;
|
797 |
+
c10::Dict<IValue, IValue> toGenericDict() const&;
|
798 |
+
|
799 |
+
template <class Key, class Value>
|
800 |
+
IValue(c10::Dict<Key, Value> v);
|
801 |
+
|
802 |
+
template <class Key, class Value>
|
803 |
+
/// \cond
|
804 |
+
/// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN
|
805 |
+
C10_DEPRECATED_MESSAGE(
|
806 |
+
"IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
807 |
+
/// \endcond
|
808 |
+
IValue(std::unordered_map<Key, Value> v);
|
809 |
+
|
810 |
+
template <class T, enable_if_ivalue_constructible<T> = nullptr>
|
811 |
+
IValue(c10::optional<T> v);
|
812 |
+
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
813 |
+
IValue(c10::OptionalArrayRef<T> v);
|
814 |
+
IValue(c10::nullopt_t);
|
815 |
+
|
816 |
+
// ClassType
|
817 |
+
IValue(c10::intrusive_ptr<ivalue::Object> v);
|
818 |
+
bool isObject() const {
|
819 |
+
return tag == Tag::Object;
|
820 |
+
}
|
821 |
+
c10::intrusive_ptr<ivalue::Object> toObject() &&;
|
822 |
+
c10::intrusive_ptr<ivalue::Object> toObject() const&;
|
823 |
+
ivalue::Object& toObjectRef() const;
|
824 |
+
|
825 |
+
torch::jit::Module toModule() const;
|
826 |
+
bool isModule() const;
|
827 |
+
|
828 |
+
// PyObject
|
829 |
+
IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v);
|
830 |
+
bool isPyObject() const {
|
831 |
+
return tag == Tag::PyObject;
|
832 |
+
}
|
833 |
+
c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() &&;
|
834 |
+
c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() const&;
|
835 |
+
PyObject* toPyObject() const;
|
836 |
+
|
837 |
+
// Enum
|
838 |
+
explicit IValue(c10::intrusive_ptr<ivalue::EnumHolder> v);
|
839 |
+
bool isEnum() const {
|
840 |
+
return tag == Tag::Enum;
|
841 |
+
}
|
842 |
+
c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() &&;
|
843 |
+
c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() const&;
|
844 |
+
|
845 |
+
// None
|
846 |
+
IValue() : tag(Tag::None) {}
|
847 |
+
bool isNone() const {
|
848 |
+
return Tag::None == tag;
|
849 |
+
}
|
850 |
+
std::string toNone() const {
|
851 |
+
AT_ASSERT(isNone());
|
852 |
+
return "None";
|
853 |
+
}
|
854 |
+
|
855 |
+
static IValue uninitialized() {
|
856 |
+
auto i = IValue();
|
857 |
+
i.tag = Tag::Uninitialized;
|
858 |
+
return i;
|
859 |
+
}
|
860 |
+
|
861 |
+
// Scalar, which gets encoded as either an Int, a Double or a ComplexDouble
|
862 |
+
IValue(const at::Scalar& s) : IValue() {
|
863 |
+
// NB: do the symbolic versions first, as isFloatingPoint is true
|
864 |
+
// for both SymFloat and double
|
865 |
+
if (s.isSymInt()) {
|
866 |
+
tag = Tag::SymInt;
|
867 |
+
payload.u.as_intrusive_ptr = s.toSymInt().toSymNode().release();
|
868 |
+
} else if (s.isSymFloat()) {
|
869 |
+
tag = Tag::SymFloat;
|
870 |
+
payload.u.as_intrusive_ptr = s.toSymFloat().toSymNodeImpl().release();
|
871 |
+
} else if (s.isSymBool()) {
|
872 |
+
tag = Tag::SymBool;
|
873 |
+
payload.u.as_intrusive_ptr = s.toSymBool().toSymNodeImpl().release();
|
874 |
+
} else if (s.isFloatingPoint()) {
|
875 |
+
tag = Tag::Double;
|
876 |
+
payload.u.as_double = s.toDouble();
|
877 |
+
} else if (s.isComplex()) {
|
878 |
+
*this = s.toComplexDouble();
|
879 |
+
} else if (s.isBoolean()) {
|
880 |
+
tag = Tag::Bool;
|
881 |
+
payload.u.as_bool = s.toBool();
|
882 |
+
} else {
|
883 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
884 |
+
s.isIntegral(false), "Unknown type in Scalar");
|
885 |
+
tag = Tag::Int;
|
886 |
+
payload.u.as_int = s.toLong();
|
887 |
+
}
|
888 |
+
}
|
889 |
+
|
890 |
+
bool isScalar() const {
|
891 |
+
return isDouble() || isInt() || isComplexDouble() || isBool() ||
|
892 |
+
isSymInt() || isSymFloat() || isSymBool();
|
893 |
+
}
|
894 |
+
|
895 |
+
at::Scalar toScalar() const {
|
896 |
+
if (isDouble())
|
897 |
+
return toDouble();
|
898 |
+
else if (isInt())
|
899 |
+
return toInt();
|
900 |
+
else if (isComplexDouble())
|
901 |
+
return toComplexDouble();
|
902 |
+
else if (isBool())
|
903 |
+
return toBool();
|
904 |
+
else if (isSymInt())
|
905 |
+
return toSymInt();
|
906 |
+
else if (isSymFloat())
|
907 |
+
return toSymFloat();
|
908 |
+
else if (isSymBool())
|
909 |
+
return toSymBool();
|
910 |
+
throw std::runtime_error("IValue is not a Scalar");
|
911 |
+
}
|
912 |
+
|
913 |
+
// Device
|
914 |
+
IValue(c10::Device d) : tag(Tag::Device) {
|
915 |
+
payload.u.as_device.type = d.type();
|
916 |
+
payload.u.as_device.index = d.index();
|
917 |
+
}
|
918 |
+
bool isDevice() const {
|
919 |
+
return Tag::Device == tag;
|
920 |
+
}
|
921 |
+
c10::Device toDevice() const {
|
922 |
+
AT_ASSERT(isDevice());
|
923 |
+
return c10::Device(payload.u.as_device.type, payload.u.as_device.index);
|
924 |
+
}
|
925 |
+
|
926 |
+
// Stream
|
927 |
+
IValue(c10::Stream s) : tag(Tag::Stream) {
|
928 |
+
auto v = c10::make_intrusive<ivalue::StreamData3Holder>(s.pack3());
|
929 |
+
payload.u.as_intrusive_ptr = v.release();
|
930 |
+
}
|
931 |
+
c10::Stream toStream() &&;
|
932 |
+
c10::Stream toStream() const&;
|
933 |
+
bool isStream() const {
|
934 |
+
return Tag::Stream == tag;
|
935 |
+
}
|
936 |
+
|
937 |
+
// ScalarType
|
938 |
+
IValue(ScalarType t)
|
939 |
+
: IValue(static_cast<std::underlying_type<ScalarType>::type>(t)) {}
|
940 |
+
at::ScalarType toScalarType() const {
|
941 |
+
return static_cast<at::ScalarType>(toInt());
|
942 |
+
}
|
943 |
+
|
944 |
+
// Layout
|
945 |
+
IValue(Layout l)
|
946 |
+
: IValue(static_cast<std::underlying_type<Layout>::type>(l)) {}
|
947 |
+
at::Layout toLayout() const {
|
948 |
+
return static_cast<at::Layout>(toInt());
|
949 |
+
}
|
950 |
+
|
951 |
+
// MemoryFormat
|
952 |
+
IValue(MemoryFormat m)
|
953 |
+
: IValue(static_cast<std::underlying_type<MemoryFormat>::type>(m)) {}
|
954 |
+
at::MemoryFormat toMemoryFormat() const {
|
955 |
+
return static_cast<at::MemoryFormat>(toInt());
|
956 |
+
}
|
957 |
+
|
958 |
+
// QScheme
|
959 |
+
IValue(at::QScheme qscheme) : tag(Tag::Int) {
|
960 |
+
payload.u.as_int = static_cast<int64_t>(qscheme);
|
961 |
+
}
|
962 |
+
|
963 |
+
at::QScheme toQScheme() const {
|
964 |
+
return static_cast<at::QScheme>(toInt());
|
965 |
+
}
|
966 |
+
|
967 |
+
// Dimname
|
968 |
+
IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {}
|
969 |
+
|
970 |
+
at::Dimname toDimname() const {
|
971 |
+
return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef()));
|
972 |
+
}
|
973 |
+
|
974 |
+
// Generator
|
975 |
+
IValue(at::Generator g) : tag(Tag::Generator) {
|
976 |
+
payload.u.as_intrusive_ptr =
|
977 |
+
null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl());
|
978 |
+
}
|
979 |
+
bool isGenerator() const {
|
980 |
+
return Tag::Generator == tag;
|
981 |
+
}
|
982 |
+
at::Generator toGenerator() &&;
|
983 |
+
at::Generator toGenerator() const&;
|
984 |
+
|
985 |
+
// for debugging
|
986 |
+
std::string tagKind() const {
|
987 |
+
switch (tag) {
|
988 |
+
#define DEFINE_CASE(x) \
|
989 |
+
case Tag::x: \
|
990 |
+
return #x;
|
991 |
+
TORCH_FORALL_TAGS(DEFINE_CASE)
|
992 |
+
#undef DEFINE_CASE
|
993 |
+
}
|
994 |
+
return "InvalidTag(" + std::to_string(static_cast<int>(tag)) + ")";
|
995 |
+
}
|
996 |
+
|
997 |
+
// generic v.to<at::Tensor>() implementations
|
998 |
+
// that can be used in special functions like pop/push
|
999 |
+
// that use template meta-programming.
|
1000 |
+
// prefer the directly named methods when you can,
|
1001 |
+
// since they are simpler to understand
|
1002 |
+
|
1003 |
+
// Note: if you get linker errors saying one of these is missing,
|
1004 |
+
// change it to ... && = delete; and you will see better error messages for
|
1005 |
+
// why However, we cannot commit this because some compiler versions barf on
|
1006 |
+
// it.
|
1007 |
+
template <typename T>
|
1008 |
+
T to() &&;
|
1009 |
+
template <typename T>
|
1010 |
+
typename c10::detail::ivalue_to_const_ref_overload_return<T>::type to()
|
1011 |
+
const&;
|
1012 |
+
|
1013 |
+
// ToOptional: convert a IValue to the Optional obj that accepts both T and
|
1014 |
+
// None
|
1015 |
+
template <typename T>
|
1016 |
+
optional<T> toOptional();
|
1017 |
+
template <typename T>
|
1018 |
+
optional<T> toOptional() const;
|
1019 |
+
|
1020 |
+
/// @private [doxygen private]
|
1021 |
+
/// this is a shallow comparison of two IValues to test the object identity
|
1022 |
+
bool isSameIdentity(const IValue& rhs) const;
|
1023 |
+
|
1024 |
+
// Computes the "official" string representation of an IValue. This produces a
|
1025 |
+
// TorchScript expression that can be used to recreate an IValue with the same
|
1026 |
+
// value (e.g. when we are printing constants in the serializer).
|
1027 |
+
//
|
1028 |
+
// Callers can use `customFormatter` to override how `repr()` prints out an
|
1029 |
+
// IValue. This is useful if you have some other environment where you can
|
1030 |
+
// look up values, and you want to print a reference to that environment (like
|
1031 |
+
// the serializer's constant table).
|
1032 |
+
//
|
1033 |
+
// repr() is not necessarily defined on all objects!
|
1034 |
+
std::ostream& repr(
|
1035 |
+
std::ostream& stream,
|
1036 |
+
std::function<bool(std::ostream&, const IValue& v)> customFormatter)
|
1037 |
+
const;
|
1038 |
+
|
1039 |
+
// Computes an "informal" string representation of an IValue. This should be
|
1040 |
+
// used for debugging, or servicing `print()`-like functions.
|
1041 |
+
// This is different from `repr()` in that there is no expectation that we can
|
1042 |
+
// exactly reconstruct an IValue from the output; feel free to use a
|
1043 |
+
// concise/pretty form
|
1044 |
+
TORCH_API friend std::ostream& operator<<(std::ostream& out, const IValue& v);
|
1045 |
+
|
1046 |
+
bool isPtrType() const {
|
1047 |
+
if (isTensor()) {
|
1048 |
+
return payload.as_tensor.defined();
|
1049 |
+
}
|
1050 |
+
return isIntrusivePtrLegacyBehavior();
|
1051 |
+
}
|
1052 |
+
|
1053 |
+
/// @private [doxygen private]
|
1054 |
+
const void* internalToPointer() const {
|
1055 |
+
TORCH_INTERNAL_ASSERT(
|
1056 |
+
isPtrType(), "Can only call internalToPointer() for pointer types");
|
1057 |
+
if (isTensor()) {
|
1058 |
+
return payload.as_tensor.unsafeGetTensorImpl();
|
1059 |
+
} else {
|
1060 |
+
return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()
|
1061 |
+
? payload.u.as_intrusive_ptr
|
1062 |
+
: nullptr;
|
1063 |
+
}
|
1064 |
+
}
|
1065 |
+
|
1066 |
+
template <typename T = c10::PlatformType>
|
1067 |
+
TypePtr type() const;
|
1068 |
+
|
1069 |
+
// Detect aliased tensors.
|
1070 |
+
struct HashAliasedIValue {
|
1071 |
+
size_t hashTensor(const at::Tensor& ten) const {
|
1072 |
+
if (ten.is_sparse()) {
|
1073 |
+
// COO sparse tensors have a "values" tensor and an "indices" tensor
|
1074 |
+
// so this will detect overlap of sparse tensors that share a values
|
1075 |
+
// tensor, but not sparse tensors that share an indices tensor.
|
1076 |
+
return hashTensor(ten._values());
|
1077 |
+
} else if (ten.is_sparse_csr()) {
|
1078 |
+
// COO sparse tensors have a "values" tensor and an "indices" tensor
|
1079 |
+
// so this will detect overlap of sparse tensors that share a values
|
1080 |
+
// tensor, but not sparse tensors that share an indices tensor.
|
1081 |
+
return hashTensor(ten.values());
|
1082 |
+
} else if (!ten.has_storage()) {
|
1083 |
+
// Opaque tensors such as the ones constructed by the MKL-DNN backend
|
1084 |
+
// don't have storage so we just use their TensorImpls.
|
1085 |
+
// TODO: Find way to expose alias info for opaque tensors.
|
1086 |
+
return reinterpret_cast<size_t>(ten.unsafeGetTensorImpl());
|
1087 |
+
} else {
|
1088 |
+
return reinterpret_cast<size_t>(ten.storage().unsafeGetStorageImpl());
|
1089 |
+
}
|
1090 |
+
}
|
1091 |
+
size_t operator()(const IValue& val) const {
|
1092 |
+
if (val.isTensor()) {
|
1093 |
+
return hashTensor(val.toTensor());
|
1094 |
+
}
|
1095 |
+
// If it is not a Tensor, then two mutable IValues alias each other only
|
1096 |
+
// if they are the same pointer.
|
1097 |
+
return val.payload.u.as_int;
|
1098 |
+
}
|
1099 |
+
};
|
1100 |
+
|
1101 |
+
struct CompAliasedIValues {
|
1102 |
+
bool operator()(const IValue& lhs, const IValue& rhs) const {
|
1103 |
+
return lhs.isAliasOf(rhs);
|
1104 |
+
}
|
1105 |
+
};
|
1106 |
+
|
1107 |
+
using HashAliasedIValues =
|
1108 |
+
std::unordered_set<IValue, HashAliasedIValue, CompAliasedIValues>;
|
1109 |
+
using HashAliasedIValueMap =
|
1110 |
+
std::unordered_map<IValue, IValue, HashAliasedIValue, CompAliasedIValues>;
|
1111 |
+
|
1112 |
+
// Chechs if this and rhs has a subvalues in common.
|
1113 |
+
// [t1,t2] and [t2, t3] returns true.
|
1114 |
+
bool overlaps(const IValue& rhs) const;
|
1115 |
+
|
1116 |
+
// Inserts all subvalues of this in subValues.
|
1117 |
+
void getSubValues(HashAliasedIValues& subValues) const;
|
1118 |
+
|
1119 |
+
// Apply visitor to every subvalue.
|
1120 |
+
// TODO: There are several places that recurse over IValue. This is fragile.
|
1121 |
+
// This visitor should be used to recurse over ivalues.
|
1122 |
+
void visit(const std::function<bool(const IValue&)>& visitor) const;
|
1123 |
+
IValue deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
|
1124 |
+
IValue deepcopy(
|
1125 |
+
HashAliasedIValueMap& memo,
|
1126 |
+
c10::optional<at::Device> device = c10::nullopt) const;
|
1127 |
+
|
1128 |
+
private:
|
1129 |
+
static c10::intrusive_ptr_target* null_to_undefined_tensor(
|
1130 |
+
c10::intrusive_ptr_target* p) {
|
1131 |
+
return p ? p
|
1132 |
+
: static_cast<c10::intrusive_ptr_target*>(
|
1133 |
+
c10::UndefinedTensorImpl::singleton());
|
1134 |
+
}
|
1135 |
+
|
1136 |
+
static bool ptrEqual(const IValue& lhs, const IValue& rhs);
|
1137 |
+
// NOTE: IValue tags are intentionally private. In the future we may encode
|
1138 |
+
// this value different (e.g. using NaN boxing), and this would make it more
|
1139 |
+
// costly to determine the tag for all types vs just determining if something
|
1140 |
+
// is a particular type. Instead we want clients to use the `isX` methods when
|
1141 |
+
// possible. If for perf. reasons you really, absolutely, must have a jump
|
1142 |
+
// table, then we can revisit this.
|
1143 |
+
enum class Tag : uint32_t {
|
1144 |
+
#define DEFINE_TAG(x) x,
|
1145 |
+
TORCH_FORALL_TAGS(DEFINE_TAG)
|
1146 |
+
#undef DEFINE_TAG
|
1147 |
+
};
|
1148 |
+
|
1149 |
+
#define COUNT_TAG(x) 1 +
|
1150 |
+
static constexpr auto kNumTags = TORCH_FORALL_TAGS(COUNT_TAG) 0;
|
1151 |
+
#undef COUNT_TAG
|
1152 |
+
|
1153 |
+
template <
|
1154 |
+
class T,
|
1155 |
+
class NullType = c10::detail::intrusive_target_default_null_type<T>>
|
1156 |
+
c10::intrusive_ptr<T, NullType> moveToIntrusivePtr();
|
1157 |
+
template <
|
1158 |
+
typename T,
|
1159 |
+
class NullType = c10::detail::intrusive_target_default_null_type<T>>
|
1160 |
+
c10::intrusive_ptr<T, NullType> toIntrusivePtr() const;
|
1161 |
+
|
1162 |
+
void destroy() {
|
1163 |
+
// We carefully construct this call to both 1) avoid UB by using
|
1164 |
+
// the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable
|
1165 |
+
// the compiler to generate the same code for each case. It is
|
1166 |
+
// surprisingly difficult to get this right.
|
1167 |
+
if (isTensor() || isIntrusivePtr()) {
|
1168 |
+
c10::intrusive_ptr_target* p = isTensor()
|
1169 |
+
? payload.as_tensor.unsafeGetTensorImpl()
|
1170 |
+
: payload.u.as_intrusive_ptr;
|
1171 |
+
c10::intrusive_ptr<intrusive_ptr_target, c10::UndefinedTensorImpl>::
|
1172 |
+
reclaim(p);
|
1173 |
+
// No need to make this destructor call!
|
1174 |
+
// payload.as_tensor.~Tensor();
|
1175 |
+
}
|
1176 |
+
}
|
1177 |
+
|
1178 |
+
C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept {
|
1179 |
+
if (rhs.isTensor()) {
|
1180 |
+
new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
|
1181 |
+
// As far as I can tell, omitting the usual explicit destructor call
|
1182 |
+
// is not UB in and of itself, and it's a slight perf win. The
|
1183 |
+
// destructor is a no-op, because the moved-from Tensor is
|
1184 |
+
// effectively an intrusive_ptr in the null state, so we don't need
|
1185 |
+
// the behavior for correctness reasons either. Leaving this
|
1186 |
+
// explanatory comment, including commented-out destructor call, to
|
1187 |
+
// make this abundantly clear.
|
1188 |
+
//
|
1189 |
+
// rhs.payload.as_tensor.~Tensor();
|
1190 |
+
} else {
|
1191 |
+
payload.u = rhs.payload.u;
|
1192 |
+
}
|
1193 |
+
tag = rhs.tag;
|
1194 |
+
rhs.clearToNone();
|
1195 |
+
}
|
1196 |
+
|
1197 |
+
void clearToNone() noexcept {
|
1198 |
+
payload.u.as_int = 0;
|
1199 |
+
tag = Tag::None;
|
1200 |
+
}
|
1201 |
+
|
1202 |
+
private:
|
1203 |
+
// This is the source of truth for isIntrusivePtr; edit results here
|
1204 |
+
// as needed and isIntrusivePtr will pick them up.
|
1205 |
+
// NOLINTBEGIN(bugprone-branch-clone)
|
1206 |
+
static constexpr bool isIntrusivePtrConstexpr(Tag tag) {
|
1207 |
+
switch (tag) {
|
1208 |
+
case Tag::None:
|
1209 |
+
return false;
|
1210 |
+
case Tag::Tensor:
|
1211 |
+
return false;
|
1212 |
+
case Tag::Storage:
|
1213 |
+
return true;
|
1214 |
+
case Tag::Generator:
|
1215 |
+
return true;
|
1216 |
+
case Tag::Double:
|
1217 |
+
return false;
|
1218 |
+
case Tag::ComplexDouble:
|
1219 |
+
return true;
|
1220 |
+
case Tag::Int:
|
1221 |
+
return false;
|
1222 |
+
case Tag::SymInt:
|
1223 |
+
return true;
|
1224 |
+
case Tag::SymFloat:
|
1225 |
+
return true;
|
1226 |
+
case Tag::SymBool:
|
1227 |
+
return true;
|
1228 |
+
case Tag::Bool:
|
1229 |
+
return false;
|
1230 |
+
case Tag::Tuple:
|
1231 |
+
return true;
|
1232 |
+
case Tag::String:
|
1233 |
+
return true;
|
1234 |
+
case Tag::Blob:
|
1235 |
+
return true;
|
1236 |
+
case Tag::GenericList:
|
1237 |
+
return true;
|
1238 |
+
case Tag::GenericDict:
|
1239 |
+
return true;
|
1240 |
+
case Tag::Future:
|
1241 |
+
return true;
|
1242 |
+
case Tag::Await:
|
1243 |
+
return true;
|
1244 |
+
case Tag::Device:
|
1245 |
+
return false;
|
1246 |
+
case Tag::Stream:
|
1247 |
+
return true;
|
1248 |
+
case Tag::Object:
|
1249 |
+
return true;
|
1250 |
+
case Tag::PyObject:
|
1251 |
+
return true;
|
1252 |
+
case Tag::Uninitialized:
|
1253 |
+
return false;
|
1254 |
+
case Tag::Capsule:
|
1255 |
+
return true;
|
1256 |
+
case Tag::RRef:
|
1257 |
+
return true;
|
1258 |
+
case Tag::Quantizer:
|
1259 |
+
return true;
|
1260 |
+
case Tag::Enum:
|
1261 |
+
return true;
|
1262 |
+
}
|
1263 |
+
return false;
|
1264 |
+
}
|
1265 |
+
// NOLINTEND(bugprone-branch-clone)
|
1266 |
+
|
1267 |
+
public:
|
1268 |
+
// Don't edit this just to add results for new tags; edit
|
1269 |
+
// isIntrusivePtrConstexpr above.
|
1270 |
+
bool isIntrusivePtr() const {
|
1271 |
+
// Implementation NOTE: the switch in isIntrusivePtrConstexpr
|
1272 |
+
// above is the previous production implementation of this
|
1273 |
+
// function. We observed that, at least on x86_64, the generated
|
1274 |
+
// instruction sequence was a similar bit vector test to what we
|
1275 |
+
// have manually implemented below, except that there was an extra
|
1276 |
+
// "bounds check" branch confirming, essentially, that `tag <
|
1277 |
+
// kNumTags` and providing a consistent result in that case. We
|
1278 |
+
// don't care about the result if tag is out of bounds, so we'd
|
1279 |
+
// like to eliminate that comparison and branch; manually
|
1280 |
+
// implementing this function as a bit test is the simplest way I
|
1281 |
+
// could find to accomplish that elimination.
|
1282 |
+
static constexpr uint32_t kTruthTableBitVector =
|
1283 |
+
#define TRUTH_TABLE_ENTRY(tag) \
|
1284 |
+
(uint32_t(isIntrusivePtrConstexpr(Tag::tag)) << uint32_t(Tag::tag)) |
|
1285 |
+
TORCH_FORALL_TAGS(TRUTH_TABLE_ENTRY)
|
1286 |
+
#undef TRUTH_TABLE_ENTRY
|
1287 |
+
0;
|
1288 |
+
|
1289 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
1290 |
+
static_cast<uint32_t>(tag) < kNumTags,
|
1291 |
+
"unexpected tag ",
|
1292 |
+
static_cast<int>(tag));
|
1293 |
+
return kTruthTableBitVector & (1 << (uint32_t(tag) % 32));
|
1294 |
+
}
|
1295 |
+
|
1296 |
+
// Storage and Generator were treated specially when
|
1297 |
+
// is_intrusive_ptr was stored as explicit state. This getter
|
1298 |
+
// preserves the old behavior for use with WeakIValue for now.
|
1299 |
+
bool isIntrusivePtrLegacyBehavior() const {
|
1300 |
+
if (tag == Tag::Storage || tag == Tag::Generator) {
|
1301 |
+
return payload.u.as_intrusive_ptr !=
|
1302 |
+
c10::UndefinedTensorImpl::singleton();
|
1303 |
+
} else {
|
1304 |
+
return isIntrusivePtr();
|
1305 |
+
}
|
1306 |
+
}
|
1307 |
+
|
1308 |
+
union Payload {
|
1309 |
+
// [TriviallyCopyablePayload]
|
1310 |
+
// We use a nested union here so that we can make the copy easy
|
1311 |
+
// and efficient in the non-tensor (i.e., trivially copyable)
|
1312 |
+
// case. Specifically, we do not have to do a switch-on-tag to
|
1313 |
+
// figure out which union member to assign; we can just use
|
1314 |
+
// TriviallyCopyablePayload::operator=.
|
1315 |
+
union TriviallyCopyablePayload {
|
1316 |
+
TriviallyCopyablePayload() : as_int(0) {}
|
1317 |
+
int64_t as_int;
|
1318 |
+
double as_double;
|
1319 |
+
bool as_bool;
|
1320 |
+
// Invariant: never nullptr; null state is represented as
|
1321 |
+
// c10::UndefinedTensorImpl::singleton() for consistency of
|
1322 |
+
// representation with Tensor.
|
1323 |
+
c10::intrusive_ptr_target* as_intrusive_ptr;
|
1324 |
+
struct {
|
1325 |
+
c10::DeviceType type;
|
1326 |
+
DeviceIndex index;
|
1327 |
+
} as_device;
|
1328 |
+
} u;
|
1329 |
+
at::Tensor as_tensor;
|
1330 |
+
Payload() : u() {}
|
1331 |
+
~Payload() {}
|
1332 |
+
};
|
1333 |
+
|
1334 |
+
IValue(const Payload& p, Tag t) : tag(t) {
|
1335 |
+
if (isTensor()) {
|
1336 |
+
new (&payload.as_tensor) at::Tensor(p.as_tensor);
|
1337 |
+
} else {
|
1338 |
+
payload.u = p.u;
|
1339 |
+
}
|
1340 |
+
}
|
1341 |
+
|
1342 |
+
template <typename T>
|
1343 |
+
struct TagType {};
|
1344 |
+
|
1345 |
+
friend MaybeOwnedTraits<IValue>;
|
1346 |
+
|
1347 |
+
Payload payload;
|
1348 |
+
Tag tag{IValue::Tag::None};
|
1349 |
+
friend struct WeakIValue;
|
1350 |
+
};
|
1351 |
+
|
1352 |
+
struct TORCH_API WeakIValue final {
|
1353 |
+
WeakIValue() = default;
|
1354 |
+
|
1355 |
+
WeakIValue(const WeakIValue& rhs)
|
1356 |
+
: payload(rhs.payload),
|
1357 |
+
tag(rhs.tag),
|
1358 |
+
is_intrusive_ptr(rhs.is_intrusive_ptr) {
|
1359 |
+
if (is_intrusive_ptr &&
|
1360 |
+
payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
1361 |
+
c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
|
1362 |
+
}
|
1363 |
+
}
|
1364 |
+
WeakIValue(const IValue& rhs)
|
1365 |
+
: tag(rhs.tag), is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) {
|
1366 |
+
if (rhs.isTensor()) {
|
1367 |
+
payload.as_intrusive_ptr = rhs.unsafeToTensorImpl();
|
1368 |
+
is_intrusive_ptr = true;
|
1369 |
+
} else {
|
1370 |
+
payload = rhs.payload.u;
|
1371 |
+
}
|
1372 |
+
if (is_intrusive_ptr) {
|
1373 |
+
if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
1374 |
+
c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
|
1375 |
+
}
|
1376 |
+
}
|
1377 |
+
}
|
1378 |
+
WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() {
|
1379 |
+
swap(rhs);
|
1380 |
+
}
|
1381 |
+
~WeakIValue() {
|
1382 |
+
if (is_intrusive_ptr &&
|
1383 |
+
payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
|
1384 |
+
c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr);
|
1385 |
+
}
|
1386 |
+
}
|
1387 |
+
WeakIValue& operator=(WeakIValue&& rhs) & noexcept {
|
1388 |
+
WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None
|
1389 |
+
return *this;
|
1390 |
+
}
|
1391 |
+
WeakIValue& operator=(WeakIValue const& rhs) & {
|
1392 |
+
WeakIValue(rhs).swap(*this);
|
1393 |
+
return *this;
|
1394 |
+
}
|
1395 |
+
void swap(WeakIValue& rhs) noexcept {
|
1396 |
+
std::swap(payload, rhs.payload);
|
1397 |
+
std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
|
1398 |
+
std::swap(tag, rhs.tag);
|
1399 |
+
}
|
1400 |
+
|
1401 |
+
bool isSameIdentity(const WeakIValue& rhs) const {
|
1402 |
+
return payload.as_int == rhs.payload.as_int && tag == rhs.tag &&
|
1403 |
+
is_intrusive_ptr == rhs.is_intrusive_ptr;
|
1404 |
+
}
|
1405 |
+
|
1406 |
+
IValue lock() const {
|
1407 |
+
if (!is_intrusive_ptr) {
|
1408 |
+
IValue::Payload newPayload;
|
1409 |
+
newPayload.u = payload;
|
1410 |
+
return IValue(newPayload, tag);
|
1411 |
+
}
|
1412 |
+
if (IValue::Tag::Tensor == tag) {
|
1413 |
+
auto temp =
|
1414 |
+
c10::weak_intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl>::
|
1415 |
+
reclaim(static_cast<at::TensorImpl*>(payload.as_intrusive_ptr));
|
1416 |
+
c10::intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl> ip(
|
1417 |
+
temp.lock());
|
1418 |
+
temp.release();
|
1419 |
+
if (!ip) {
|
1420 |
+
return IValue();
|
1421 |
+
} else {
|
1422 |
+
return IValue(at::Tensor(std::move(ip)));
|
1423 |
+
}
|
1424 |
+
} else {
|
1425 |
+
auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target>::reclaim(
|
1426 |
+
payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
|
1427 |
+
? nullptr
|
1428 |
+
: payload.as_intrusive_ptr);
|
1429 |
+
IValue::Payload pl;
|
1430 |
+
pl.u.as_intrusive_ptr = temp.lock().release();
|
1431 |
+
temp.release();
|
1432 |
+
if (!pl.u.as_intrusive_ptr) {
|
1433 |
+
return IValue();
|
1434 |
+
} else {
|
1435 |
+
return IValue(pl, tag);
|
1436 |
+
}
|
1437 |
+
}
|
1438 |
+
}
|
1439 |
+
|
1440 |
+
size_t use_count() const noexcept {
|
1441 |
+
if (!is_intrusive_ptr) {
|
1442 |
+
return 1;
|
1443 |
+
}
|
1444 |
+
auto temp = c10::weak_intrusive_ptr<
|
1445 |
+
c10::intrusive_ptr_target,
|
1446 |
+
c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
|
1447 |
+
size_t result = temp.use_count();
|
1448 |
+
temp.release();
|
1449 |
+
return result;
|
1450 |
+
}
|
1451 |
+
|
1452 |
+
size_t weak_use_count() const noexcept {
|
1453 |
+
if (!is_intrusive_ptr) {
|
1454 |
+
return 1;
|
1455 |
+
}
|
1456 |
+
auto temp = c10::weak_intrusive_ptr<
|
1457 |
+
c10::intrusive_ptr_target,
|
1458 |
+
c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
|
1459 |
+
size_t result = temp.weak_use_count();
|
1460 |
+
temp.release();
|
1461 |
+
return result;
|
1462 |
+
}
|
1463 |
+
size_t hash() const {
|
1464 |
+
return payload.as_int;
|
1465 |
+
}
|
1466 |
+
|
1467 |
+
private:
|
1468 |
+
using Payload = IValue::Payload::TriviallyCopyablePayload;
|
1469 |
+
Payload payload;
|
1470 |
+
IValue::Tag tag{IValue::Tag::None};
|
1471 |
+
bool is_intrusive_ptr{false};
|
1472 |
+
};
|
1473 |
+
|
1474 |
+
// An owning pointer to a type. When the type is class type, it requires a pair
|
1475 |
+
// of shared_ptrs to the class type and its owning CU, so that the class type is
|
1476 |
+
// guaranteed to stay alive as long as we hold this object.
|
1477 |
+
struct TORCH_API StrongTypePtr {
|
1478 |
+
StrongTypePtr(std::shared_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
|
1479 |
+
|
1480 |
+
std::shared_ptr<torch::jit::CompilationUnit> cu_;
|
1481 |
+
TypePtr type_;
|
1482 |
+
};
|
1483 |
+
|
1484 |
+
// [Constant Object Weak CompilationUnit Reference]
|
1485 |
+
// A non owning pointer to a type. When a class get inserted as a constant
|
1486 |
+
// into a graph, if we used a strong pointer we would have a circular reference
|
1487 |
+
// from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the
|
1488 |
+
// Constant Object)
|
1489 |
+
struct TORCH_API WeakTypePtr {
|
1490 |
+
WeakTypePtr(std::weak_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
|
1491 |
+
|
1492 |
+
std::weak_ptr<torch::jit::CompilationUnit> cu_;
|
1493 |
+
TypePtr type_;
|
1494 |
+
};
|
1495 |
+
|
1496 |
+
// internal build errors with std::variant :/
|
1497 |
+
struct WeakOrStrongCompilationUnit {
|
1498 |
+
explicit WeakOrStrongCompilationUnit(
|
1499 |
+
std::shared_ptr<torch::jit::CompilationUnit> shared_cu)
|
1500 |
+
: strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {}
|
1501 |
+
|
1502 |
+
explicit WeakOrStrongCompilationUnit(
|
1503 |
+
std::weak_ptr<torch::jit::CompilationUnit> weak_cu)
|
1504 |
+
: strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {}
|
1505 |
+
|
1506 |
+
std::shared_ptr<torch::jit::CompilationUnit> getStrongRefOrThrow() const {
|
1507 |
+
TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt);
|
1508 |
+
return *strong_ptr_;
|
1509 |
+
}
|
1510 |
+
|
1511 |
+
std::weak_ptr<torch::jit::CompilationUnit> getWeakRefOrThrow() const {
|
1512 |
+
TORCH_INTERNAL_ASSERT(weak_ptr_ != c10::nullopt);
|
1513 |
+
return *weak_ptr_;
|
1514 |
+
}
|
1515 |
+
|
1516 |
+
bool holdingStrongRef() const {
|
1517 |
+
return strong_ptr_ != c10::nullopt;
|
1518 |
+
}
|
1519 |
+
|
1520 |
+
bool holdingEmptyStrongRef() const {
|
1521 |
+
return holdingStrongRef() && *strong_ptr_ == nullptr;
|
1522 |
+
}
|
1523 |
+
|
1524 |
+
c10::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
|
1525 |
+
c10::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
|
1526 |
+
};
|
1527 |
+
|
1528 |
+
// An Object will hold a non-owning Compilation Unit reference if it is a
|
1529 |
+
// Constant in the graph and a Owning reference otherwise
|
1530 |
+
struct TORCH_API WeakOrStrongTypePtr {
|
1531 |
+
explicit WeakOrStrongTypePtr(WeakTypePtr weak)
|
1532 |
+
: cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))),
|
1533 |
+
type_(std::move(weak.type_)) {}
|
1534 |
+
explicit WeakOrStrongTypePtr(StrongTypePtr strong)
|
1535 |
+
: cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))),
|
1536 |
+
type_(std::move(strong.type_)) {}
|
1537 |
+
explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type)
|
1538 |
+
: cu_(std::move(cu)), type_(std::move(type)) {}
|
1539 |
+
WeakTypePtr asWeakTypePtr() const;
|
1540 |
+
|
1541 |
+
WeakOrStrongCompilationUnit cu_;
|
1542 |
+
TypePtr type_;
|
1543 |
+
|
1544 |
+
bool holds_strong_ref() const {
|
1545 |
+
return cu_.holdingStrongRef();
|
1546 |
+
}
|
1547 |
+
|
1548 |
+
bool holds_empty_strong_ref() const {
|
1549 |
+
return cu_.holdingEmptyStrongRef();
|
1550 |
+
}
|
1551 |
+
};
|
1552 |
+
|
1553 |
+
} // namespace c10
|
1554 |
+
|
1555 |
+
#include <ATen/core/ivalue_inl.h> // IWYU pragma: keep
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h
ADDED
@@ -0,0 +1,2545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <condition_variable>
|
4 |
+
#include <memory>
|
5 |
+
#include <type_traits>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
#include <ATen/core/Dict.h>
|
9 |
+
#include <ATen/core/List.h>
|
10 |
+
#include <ATen/core/IListRef.h>
|
11 |
+
#include <ATen/core/functional.h>
|
12 |
+
#include <ATen/core/jit_type.h>
|
13 |
+
#include <ATen/core/qualified_name.h>
|
14 |
+
#include <ATen/core/rref_interface.h>
|
15 |
+
#include <ATen/core/symbol.h>
|
16 |
+
#include <c10/core/DeviceGuard.h>
|
17 |
+
#include <c10/core/Event.h>
|
18 |
+
#include <c10/core/Scalar.h>
|
19 |
+
#include <c10/core/Stream.h>
|
20 |
+
#include <c10/core/StreamGuard.h>
|
21 |
+
#include <c10/core/TensorImpl.h>
|
22 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
23 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
24 |
+
#include <c10/util/FunctionRef.h>
|
25 |
+
#include <c10/util/Logging.h>
|
26 |
+
#include <c10/util/hash.h>
|
27 |
+
#include <c10/util/intrusive_ptr.h>
|
28 |
+
#include <c10/util/irange.h>
|
29 |
+
|
30 |
+
namespace torch {
|
31 |
+
namespace jit {
|
32 |
+
struct Function;
|
33 |
+
struct CompilationUnit;
|
34 |
+
} // namespace jit
|
35 |
+
TORCH_API bool isCustomClass(const c10::IValue& v);
|
36 |
+
} // namespace torch
|
37 |
+
namespace c10 {
|
38 |
+
struct IValue;
|
39 |
+
struct ClassType;
|
40 |
+
struct TupleType;
|
41 |
+
struct EnumType;
|
42 |
+
struct InferredType;
|
43 |
+
|
44 |
+
// For custom class __init__ registration, we need to pass in a function
|
45 |
+
// that looks like this: [](IValue x, args...)
|
46 |
+
|
47 |
+
// However, make_boxed_from_unboxed_functor.h automatically sets the input types
|
48 |
+
// of the function by introspecting the types of the functor (which is IValue in
|
49 |
+
// this case). However, we need the type it binds to be Foo.
|
50 |
+
|
51 |
+
// Instead, we pass in a lambda [](ivalue_holder<CurClass> x, args...) from
|
52 |
+
// which getTypePtr can recover the original class pointer.
|
53 |
+
|
54 |
+
template <typename TaggedCapsuleType>
|
55 |
+
struct tagged_capsule {
|
56 |
+
IValue ivalue;
|
57 |
+
};
|
58 |
+
|
59 |
+
template <class T, class NullType>
|
60 |
+
c10::intrusive_ptr<T, NullType> IValue::moveToIntrusivePtr() {
|
61 |
+
auto t = c10::intrusive_ptr<T, NullType>::reclaim(
|
62 |
+
payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
|
63 |
+
? NullType::singleton()
|
64 |
+
: static_cast<T*>(payload.u.as_intrusive_ptr));
|
65 |
+
clearToNone();
|
66 |
+
return t;
|
67 |
+
}
|
68 |
+
template <typename T, class NullType>
|
69 |
+
c10::intrusive_ptr<T, NullType> IValue::toIntrusivePtr() const {
|
70 |
+
if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
|
71 |
+
return c10::intrusive_ptr<T, NullType>();
|
72 |
+
}
|
73 |
+
c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
|
74 |
+
return c10::intrusive_ptr<T, NullType>::reclaim(
|
75 |
+
static_cast<T*>(payload.u.as_intrusive_ptr));
|
76 |
+
}
|
77 |
+
|
78 |
+
template <class T, class U>
|
79 |
+
intrusive_ptr<T> static_intrusive_pointer_cast(intrusive_ptr<U> r) {
|
80 |
+
return intrusive_ptr<T>::reclaim(static_cast<T*>(r.release()));
|
81 |
+
}
|
82 |
+
|
83 |
+
template <class T, class U>
|
84 |
+
intrusive_ptr<T> dynamic_intrusive_pointer_cast(intrusive_ptr<U> r) {
|
85 |
+
return intrusive_ptr<T>::reclaim(dynamic_cast<T*>(r.release()));
|
86 |
+
}
|
87 |
+
|
88 |
+
inline c10::intrusive_ptr<ivalue::Future> IValue::toFuture() && {
|
89 |
+
AT_ASSERT(isFuture(), "Expected Future but got ", tagKind());
|
90 |
+
return moveToIntrusivePtr<ivalue::Future>();
|
91 |
+
}
|
92 |
+
inline c10::intrusive_ptr<ivalue::Future> IValue::toFuture() const& {
|
93 |
+
AT_ASSERT(isFuture(), "Expected Future but got ", tagKind());
|
94 |
+
return toIntrusivePtr<ivalue::Future>();
|
95 |
+
}
|
96 |
+
inline c10::intrusive_ptr<ivalue::Await> IValue::toAwait() && {
|
97 |
+
AT_ASSERT(isAwait(), "Expected Await but got ", tagKind());
|
98 |
+
return moveToIntrusivePtr<ivalue::Await>();
|
99 |
+
}
|
100 |
+
inline c10::intrusive_ptr<ivalue::Await> IValue::toAwait() const& {
|
101 |
+
AT_ASSERT(isAwait(), "Expected Await but got ", tagKind());
|
102 |
+
return toIntrusivePtr<ivalue::Await>();
|
103 |
+
}
|
104 |
+
inline c10::intrusive_ptr<c10::RRefInterface> IValue::toRRef() && {
|
105 |
+
AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind());
|
106 |
+
return moveToIntrusivePtr<c10::RRefInterface>();
|
107 |
+
}
|
108 |
+
inline c10::intrusive_ptr<c10::RRefInterface> IValue::toRRef() const& {
|
109 |
+
AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind());
|
110 |
+
return toIntrusivePtr<c10::RRefInterface>();
|
111 |
+
}
|
112 |
+
inline c10::intrusive_ptr<at::Quantizer> IValue::toQuantizer() && {
|
113 |
+
AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind());
|
114 |
+
return moveToIntrusivePtr<at::Quantizer>();
|
115 |
+
}
|
116 |
+
inline c10::intrusive_ptr<at::Quantizer> IValue::toQuantizer() const& {
|
117 |
+
AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind());
|
118 |
+
return toIntrusivePtr<at::Quantizer>();
|
119 |
+
}
|
120 |
+
inline c10::intrusive_ptr<ivalue::ConstantString> IValue::toString() && {
|
121 |
+
AT_ASSERT(isString(), "Expected String but got ", tagKind());
|
122 |
+
return moveToIntrusivePtr<ivalue::ConstantString>();
|
123 |
+
}
|
124 |
+
inline c10::intrusive_ptr<ivalue::ConstantString> IValue::toString() const& {
|
125 |
+
AT_ASSERT(isString(), "Expected String but got ", tagKind());
|
126 |
+
return toIntrusivePtr<ivalue::ConstantString>();
|
127 |
+
}
|
128 |
+
inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() && {
|
129 |
+
AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
|
130 |
+
return moveToIntrusivePtr<ivalue::Object>();
|
131 |
+
}
|
132 |
+
inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() const& {
|
133 |
+
AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
|
134 |
+
return toIntrusivePtr<ivalue::Object>();
|
135 |
+
}
|
136 |
+
inline c10::intrusive_ptr<ivalue::PyObjectHolder> IValue::
|
137 |
+
toPyObjectHolder() && {
|
138 |
+
TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind());
|
139 |
+
return moveToIntrusivePtr<ivalue::PyObjectHolder>();
|
140 |
+
}
|
141 |
+
inline c10::intrusive_ptr<ivalue::PyObjectHolder> IValue::toPyObjectHolder()
|
142 |
+
const& {
|
143 |
+
TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind());
|
144 |
+
return toIntrusivePtr<ivalue::PyObjectHolder>();
|
145 |
+
}
|
146 |
+
inline c10::intrusive_ptr<ivalue::EnumHolder> IValue::toEnumHolder() && {
|
147 |
+
TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind());
|
148 |
+
return moveToIntrusivePtr<ivalue::EnumHolder>();
|
149 |
+
}
|
150 |
+
inline c10::intrusive_ptr<ivalue::EnumHolder> IValue::toEnumHolder() const& {
|
151 |
+
TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind());
|
152 |
+
return toIntrusivePtr<ivalue::EnumHolder>();
|
153 |
+
}
|
154 |
+
inline c10::complex<double> IValue::toComplexDouble() const {
|
155 |
+
TORCH_INTERNAL_ASSERT(isComplexDouble(), "Expected ComplexDouble but got ", tagKind());
|
156 |
+
auto ptr = toIntrusivePtr<ivalue::ComplexHolder>();
|
157 |
+
return (*ptr).val;
|
158 |
+
}
|
159 |
+
inline at::Tensor IValue::toTensor() && {
|
160 |
+
if (C10_UNLIKELY(!isTensor())) {
|
161 |
+
reportToTensorTypeError();
|
162 |
+
}
|
163 |
+
auto result = std::move(payload.as_tensor);
|
164 |
+
// As far as I can tell, omitting the usual explicit destructor call
|
165 |
+
// is not UB in and of itself, and it's a slight perf win. The
|
166 |
+
// destructor is a no-op, because the moved-from Tensor is
|
167 |
+
// effectively an intrusive_ptr in the null state, so we don't need
|
168 |
+
// the behavior for correctness reasons either. Leaving this
|
169 |
+
// explanatory comment, including commented-out destructor call, to
|
170 |
+
// make this abundantly clear.
|
171 |
+
//
|
172 |
+
// payload.as_tensor.~Tensor();
|
173 |
+
clearToNone();
|
174 |
+
return result;
|
175 |
+
}
|
176 |
+
inline at::Tensor& IValue::toTensor() & {
|
177 |
+
if (C10_UNLIKELY(!isTensor())) {
|
178 |
+
reportToTensorTypeError();
|
179 |
+
}
|
180 |
+
return payload.as_tensor;
|
181 |
+
}
|
182 |
+
inline const at::Tensor& IValue::toTensor() const& {
|
183 |
+
if (C10_UNLIKELY(!isTensor())) {
|
184 |
+
reportToTensorTypeError();
|
185 |
+
}
|
186 |
+
return payload.as_tensor;
|
187 |
+
}
|
188 |
+
inline c10::Storage IValue::toStorage() && {
|
189 |
+
AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind());
|
190 |
+
return c10::Storage(
|
191 |
+
moveToIntrusivePtr<at::StorageImpl>());
|
192 |
+
}
|
193 |
+
inline c10::Storage IValue::toStorage() const& {
|
194 |
+
AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind());
|
195 |
+
return c10::Storage(toIntrusivePtr<at::StorageImpl>());
|
196 |
+
}
|
197 |
+
inline c10::Stream IValue::toStream() && {
|
198 |
+
AT_ASSERT(isStream(), "Expected Stream but got ", tagKind());
|
199 |
+
auto ptr = toIntrusivePtr<ivalue::StreamData3Holder>();
|
200 |
+
return c10::Stream::unpack3((*ptr).val.stream_id,
|
201 |
+
(*ptr).val.device_index,
|
202 |
+
(*ptr).val.device_type);
|
203 |
+
}
|
204 |
+
inline c10::Stream IValue::toStream() const& {
|
205 |
+
AT_ASSERT(isStream(), "Expected Stream but got ", tagKind());
|
206 |
+
auto ptr = toIntrusivePtr<ivalue::StreamData3Holder>();
|
207 |
+
return c10::Stream::unpack3((*ptr).val.stream_id,
|
208 |
+
(*ptr).val.device_index,
|
209 |
+
(*ptr).val.device_type);
|
210 |
+
}
|
211 |
+
inline c10::intrusive_ptr<caffe2::Blob> IValue::toBlob() && {
|
212 |
+
AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind());
|
213 |
+
return moveToIntrusivePtr<caffe2::Blob>();
|
214 |
+
}
|
215 |
+
inline c10::intrusive_ptr<caffe2::Blob> IValue::toBlob() const& {
|
216 |
+
AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind());
|
217 |
+
return toIntrusivePtr<caffe2::Blob>();
|
218 |
+
;
|
219 |
+
}
|
220 |
+
inline c10::intrusive_ptr<torch::CustomClassHolder> IValue::toCapsule() && {
|
221 |
+
TORCH_INTERNAL_ASSERT(isCapsule());
|
222 |
+
return moveToIntrusivePtr<torch::CustomClassHolder>();
|
223 |
+
}
|
224 |
+
inline c10::intrusive_ptr<torch::CustomClassHolder> IValue::toCapsule() const& {
|
225 |
+
TORCH_INTERNAL_ASSERT(isCapsule());
|
226 |
+
return toIntrusivePtr<torch::CustomClassHolder>();
|
227 |
+
}
|
228 |
+
inline at::Generator IValue::toGenerator() && {
|
229 |
+
AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind());
|
230 |
+
return at::Generator(moveToIntrusivePtr<at::GeneratorImpl>());
|
231 |
+
}
|
232 |
+
inline at::Generator IValue::toGenerator() const& {
|
233 |
+
AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind());
|
234 |
+
return at::Generator(toIntrusivePtr<at::GeneratorImpl>());
|
235 |
+
}
|
236 |
+
inline c10::SymInt IValue::toSymInt() && {
|
237 |
+
AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind());
|
238 |
+
if (isSymInt()) {
|
239 |
+
return c10::SymInt(moveToIntrusivePtr<c10::SymNodeImpl>());
|
240 |
+
} else {
|
241 |
+
return c10::SymInt(payload.u.as_int);
|
242 |
+
}
|
243 |
+
}
|
244 |
+
inline c10::SymInt IValue::toSymInt() const& {
|
245 |
+
AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind());
|
246 |
+
if (isSymInt()) {
|
247 |
+
return c10::SymInt(toIntrusivePtr<c10::SymNodeImpl>());
|
248 |
+
} else {
|
249 |
+
return c10::SymInt(payload.u.as_int);
|
250 |
+
}
|
251 |
+
}
|
252 |
+
inline c10::SymFloat IValue::toSymFloat() && {
|
253 |
+
AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind());
|
254 |
+
if (isSymFloat()) {
|
255 |
+
return c10::SymFloat(moveToIntrusivePtr<c10::SymNodeImpl>());
|
256 |
+
} else {
|
257 |
+
return c10::SymFloat(payload.u.as_double);
|
258 |
+
}
|
259 |
+
}
|
260 |
+
inline c10::SymFloat IValue::toSymFloat() const& {
|
261 |
+
AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind());
|
262 |
+
if (isSymFloat()) {
|
263 |
+
return c10::SymFloat(toIntrusivePtr<c10::SymNodeImpl>());
|
264 |
+
} else {
|
265 |
+
return c10::SymFloat(payload.u.as_double);
|
266 |
+
}
|
267 |
+
}
|
268 |
+
inline c10::SymBool IValue::toSymBool() && {
|
269 |
+
AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind());
|
270 |
+
if (isSymBool()) {
|
271 |
+
return c10::SymBool(moveToIntrusivePtr<c10::SymNodeImpl>());
|
272 |
+
} else {
|
273 |
+
return c10::SymBool(payload.u.as_bool);
|
274 |
+
}
|
275 |
+
}
|
276 |
+
|
277 |
+
inline c10::SymBool IValue::toSymBool() const& {
|
278 |
+
AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind());
|
279 |
+
if (isSymBool()) {
|
280 |
+
return c10::SymBool(toIntrusivePtr<c10::SymNodeImpl>());
|
281 |
+
} else {
|
282 |
+
return c10::SymBool(payload.u.as_bool);
|
283 |
+
}
|
284 |
+
}
|
285 |
+
|
286 |
+
namespace ivalue {
|
287 |
+
|
288 |
+
void TORCH_API
|
289 |
+
checkCustomClassType(const ClassType* expected_type, const Type* actual_type);
|
290 |
+
|
291 |
+
template <typename T>
|
292 |
+
using Shared = c10::intrusive_ptr<T>;
|
293 |
+
|
294 |
+
// string
|
295 |
+
struct TORCH_API ConstantString final : c10::intrusive_ptr_target {
|
296 |
+
private:
|
297 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
298 |
+
const std::string str_;
|
299 |
+
|
300 |
+
public:
|
301 |
+
ConstantString(std::string str) : str_(std::move(str)) {}
|
302 |
+
ConstantString(c10::string_view str) : str_(std::string(str)) {}
|
303 |
+
static c10::intrusive_ptr<ConstantString> create(std::string str_);
|
304 |
+
static c10::intrusive_ptr<ConstantString> create(c10::string_view str_);
|
305 |
+
static c10::intrusive_ptr<ConstantString> create(const char* str_);
|
306 |
+
|
307 |
+
const std::string& string() const {
|
308 |
+
return str_;
|
309 |
+
}
|
310 |
+
c10::string_view string_view() const {
|
311 |
+
return str_;
|
312 |
+
}
|
313 |
+
|
314 |
+
operator const std::string&() const {
|
315 |
+
return string();
|
316 |
+
}
|
317 |
+
TORCH_API friend std::ostream& operator<<(
|
318 |
+
std::ostream& out,
|
319 |
+
const ConstantString& v);
|
320 |
+
};
|
321 |
+
|
322 |
+
struct Future;
|
323 |
+
|
324 |
+
struct TORCH_API TupleElements {
|
325 |
+
private:
|
326 |
+
size_t inlineSize_;
|
327 |
+
// We represent TupleElements this way to save doing a heap
|
328 |
+
// allocation in the common (at least for unpickling) case where we
|
329 |
+
// have only 3 elements. We have our own union instead of
|
330 |
+
// c10::SmallVector<IValue> because c10::SmallVector<IValue> always
|
331 |
+
// stores the begin/end/capacity pointers, which would be a waste of
|
332 |
+
// space in our use case.
|
333 |
+
union {
|
334 |
+
std::vector<IValue> elementsVector_;
|
335 |
+
// Don't want to declare a std::array because the convenient
|
336 |
+
// iteration and size members are a footgun in this case -- the
|
337 |
+
// actual size of the array may be smaller than 3!
|
338 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
339 |
+
IValue elementsInline_[3];
|
340 |
+
};
|
341 |
+
|
342 |
+
void destroyInline() {
|
343 |
+
for (const auto ii : c10::irange(inlineSize_)) {
|
344 |
+
elementsInline_[ii].~IValue();
|
345 |
+
}
|
346 |
+
}
|
347 |
+
public:
|
348 |
+
|
349 |
+
using iterator = IValue*;
|
350 |
+
using const_iterator = const IValue*;
|
351 |
+
|
352 |
+
TupleElements() : inlineSize_(0) {
|
353 |
+
new (&elementsVector_) std::vector<IValue>();
|
354 |
+
}
|
355 |
+
|
356 |
+
explicit TupleElements(std::vector<IValue> elements)
|
357 |
+
: inlineSize_(0), elementsVector_(std::move(elements)) {}
|
358 |
+
|
359 |
+
explicit TupleElements(c10::ArrayRef<IValue> elements)
|
360 |
+
: inlineSize_(elements.size() <= 3 ? elements.size() : 0) {
|
361 |
+
switch (inlineSize_) {
|
362 |
+
case 3:
|
363 |
+
new (&elementsInline_[2]) IValue(elements[2]);
|
364 |
+
[[fallthrough]];
|
365 |
+
case 2:
|
366 |
+
new (&elementsInline_[1]) IValue(elements[1]);
|
367 |
+
[[fallthrough]];
|
368 |
+
case 1:
|
369 |
+
new (&elementsInline_[0]) IValue(elements[0]);
|
370 |
+
break;
|
371 |
+
case 0:
|
372 |
+
new (&elementsVector_) std::vector<IValue>(elements.begin(), elements.end());
|
373 |
+
break;
|
374 |
+
}
|
375 |
+
}
|
376 |
+
|
377 |
+
explicit TupleElements(IValue&& e1)
|
378 |
+
: inlineSize_(1) {
|
379 |
+
new (&elementsInline_[0]) IValue(std::move(e1));
|
380 |
+
}
|
381 |
+
|
382 |
+
explicit TupleElements(IValue&& e1, IValue&& e2)
|
383 |
+
: inlineSize_(2) {
|
384 |
+
new (&elementsInline_[0]) IValue(std::move(e1));
|
385 |
+
new (&elementsInline_[1]) IValue(std::move(e2));
|
386 |
+
}
|
387 |
+
|
388 |
+
explicit TupleElements(IValue&& e1, IValue&& e2, IValue&& e3)
|
389 |
+
: inlineSize_(3) {
|
390 |
+
new (&elementsInline_[0]) IValue(std::move(e1));
|
391 |
+
new (&elementsInline_[1]) IValue(std::move(e2));
|
392 |
+
new (&elementsInline_[2]) IValue(std::move(e3));
|
393 |
+
}
|
394 |
+
|
395 |
+
~TupleElements() {
|
396 |
+
if (inlineSize_) {
|
397 |
+
destroyInline();
|
398 |
+
} else {
|
399 |
+
elementsVector_.~vector();
|
400 |
+
}
|
401 |
+
}
|
402 |
+
|
403 |
+
// It would be nice to make this noncopyable to prevent people from
|
404 |
+
// writing code like `auto output =
|
405 |
+
// forward(...).toTupleRef().elements()` (which does refcount bumps on
|
406 |
+
// each element, unlike the more efficient but verbose
|
407 |
+
// ```
|
408 |
+
// auto outputIntrusivePtr = forward(...).toTuple();
|
409 |
+
// const auto& output = outputIntrusivePtr->elements();
|
410 |
+
// ```
|
411 |
+
// ), but there is simply an overwhelming amount of code that does
|
412 |
+
// it the inefficient way.
|
413 |
+
// See also operator std::vector below.
|
414 |
+
TupleElements(const TupleElements& rhs)
|
415 |
+
: inlineSize_(rhs.inlineSize_) {
|
416 |
+
if (rhs.inlineSize_) {
|
417 |
+
for (const auto ii : c10::irange(inlineSize_)) {
|
418 |
+
new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
|
419 |
+
}
|
420 |
+
} else {
|
421 |
+
new (&elementsVector_) std::vector<IValue>(rhs.elementsVector_);
|
422 |
+
}
|
423 |
+
}
|
424 |
+
|
425 |
+
TupleElements& operator=(const TupleElements& rhs) {
|
426 |
+
if (inlineSize_) {
|
427 |
+
if (rhs.inlineSize_) {
|
428 |
+
for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) {
|
429 |
+
elementsInline_[ii] = rhs.elementsInline_[ii];
|
430 |
+
}
|
431 |
+
if (rhs.inlineSize_ > inlineSize_) {
|
432 |
+
for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) {
|
433 |
+
new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
|
434 |
+
}
|
435 |
+
} else {
|
436 |
+
for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) {
|
437 |
+
elementsInline_[ii].~IValue();
|
438 |
+
}
|
439 |
+
}
|
440 |
+
} else {
|
441 |
+
destroyInline();
|
442 |
+
new (&elementsVector_) std::vector<IValue>(rhs.elementsVector_);
|
443 |
+
}
|
444 |
+
} else {
|
445 |
+
if (rhs.inlineSize_) {
|
446 |
+
elementsVector_.~vector();
|
447 |
+
for (const auto ii : c10::irange(rhs.inlineSize_)) {
|
448 |
+
new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
|
449 |
+
}
|
450 |
+
} else {
|
451 |
+
elementsVector_ = rhs.elementsVector_;
|
452 |
+
}
|
453 |
+
}
|
454 |
+
inlineSize_ = rhs.inlineSize_;
|
455 |
+
return *this;
|
456 |
+
}
|
457 |
+
|
458 |
+
TupleElements(TupleElements&& rhs) noexcept
|
459 |
+
: inlineSize_(rhs.inlineSize_) {
|
460 |
+
if (inlineSize_) {
|
461 |
+
for (const auto ii : c10::irange(inlineSize_)) {
|
462 |
+
new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
|
463 |
+
}
|
464 |
+
} else {
|
465 |
+
new (&elementsVector_) std::vector<IValue>(std::move(rhs.elementsVector_));
|
466 |
+
}
|
467 |
+
}
|
468 |
+
|
469 |
+
TupleElements& operator=(TupleElements&& rhs) noexcept {
|
470 |
+
if (inlineSize_) {
|
471 |
+
if (rhs.inlineSize_) {
|
472 |
+
for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) {
|
473 |
+
elementsInline_[ii] = std::move(rhs.elementsInline_[ii]);
|
474 |
+
}
|
475 |
+
if (rhs.inlineSize_ > inlineSize_) {
|
476 |
+
for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) {
|
477 |
+
new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
|
478 |
+
}
|
479 |
+
} else {
|
480 |
+
for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) {
|
481 |
+
elementsInline_[ii].~IValue();
|
482 |
+
}
|
483 |
+
}
|
484 |
+
} else {
|
485 |
+
destroyInline();
|
486 |
+
new (&elementsVector_) std::vector<IValue>(std::move(rhs.elementsVector_));
|
487 |
+
}
|
488 |
+
} else {
|
489 |
+
if (rhs.inlineSize_) {
|
490 |
+
elementsVector_.~vector();
|
491 |
+
for (const auto ii : c10::irange(rhs.inlineSize_)) {
|
492 |
+
new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
|
493 |
+
}
|
494 |
+
} else {
|
495 |
+
elementsVector_ = std::move(rhs.elementsVector_);
|
496 |
+
}
|
497 |
+
}
|
498 |
+
inlineSize_ = rhs.inlineSize_;
|
499 |
+
return *this;
|
500 |
+
}
|
501 |
+
|
502 |
+
C10_NODISCARD c10::ArrayRef<IValue> asArrayRef() const {
|
503 |
+
if (inlineSize_) {
|
504 |
+
return c10::ArrayRef<IValue>(elementsInline_, inlineSize_);
|
505 |
+
} else {
|
506 |
+
return elementsVector_;
|
507 |
+
}
|
508 |
+
}
|
509 |
+
|
510 |
+
// Mimic implicit conversion from std::vector to ArrayRef.
|
511 |
+
operator c10::ArrayRef<IValue>() const {
|
512 |
+
return asArrayRef();
|
513 |
+
}
|
514 |
+
|
515 |
+
static size_t hash(const TupleElements& v) {
|
516 |
+
return c10::hash<c10::ArrayRef<IValue>>()(v.asArrayRef());
|
517 |
+
}
|
518 |
+
|
519 |
+
void setContents(std::vector<IValue>&& contents) {
|
520 |
+
if (inlineSize_) {
|
521 |
+
destroyInline();
|
522 |
+
new (&elementsVector_) std::vector<IValue>(std::move(contents));
|
523 |
+
inlineSize_ = 0;
|
524 |
+
} else {
|
525 |
+
elementsVector_ = std::move(contents);
|
526 |
+
}
|
527 |
+
}
|
528 |
+
|
529 |
+
C10_NODISCARD bool empty() const {
|
530 |
+
return inlineSize_ ? false : elementsVector_.empty();
|
531 |
+
}
|
532 |
+
|
533 |
+
C10_NODISCARD size_t size() const {
|
534 |
+
return inlineSize_ ? inlineSize_ : elementsVector_.size();
|
535 |
+
}
|
536 |
+
|
537 |
+
C10_NODISCARD IValue& operator[](size_t idx) {
|
538 |
+
if (inlineSize_) {
|
539 |
+
return elementsInline_[idx];
|
540 |
+
} else {
|
541 |
+
return elementsVector_[idx];
|
542 |
+
}
|
543 |
+
}
|
544 |
+
|
545 |
+
C10_NODISCARD const IValue& operator[](size_t idx) const {
|
546 |
+
if (inlineSize_) {
|
547 |
+
return elementsInline_[idx];
|
548 |
+
} else {
|
549 |
+
return elementsVector_[idx];
|
550 |
+
}
|
551 |
+
}
|
552 |
+
|
553 |
+
C10_NODISCARD IValue& at(size_t idx) {
|
554 |
+
if (inlineSize_) {
|
555 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
556 |
+
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
557 |
+
return elementsInline_[idx];
|
558 |
+
} else {
|
559 |
+
return elementsVector_.at(idx);
|
560 |
+
}
|
561 |
+
}
|
562 |
+
|
563 |
+
C10_NODISCARD const IValue& at(size_t idx) const {
|
564 |
+
if (inlineSize_) {
|
565 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
566 |
+
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
567 |
+
return elementsInline_[idx];
|
568 |
+
} else {
|
569 |
+
TORCH_CHECK(idx < elementsVector_.size(), "TupleElements: invalid index Index = ", idx, "; Length = ", elementsVector_.size());
|
570 |
+
return elementsVector_.at(idx);
|
571 |
+
}
|
572 |
+
}
|
573 |
+
|
574 |
+
C10_NODISCARD iterator begin() {
|
575 |
+
if (inlineSize_) {
|
576 |
+
return elementsInline_;
|
577 |
+
} else {
|
578 |
+
return elementsVector_.data();
|
579 |
+
}
|
580 |
+
}
|
581 |
+
|
582 |
+
C10_NODISCARD iterator end() {
|
583 |
+
if (inlineSize_) {
|
584 |
+
return elementsInline_ + inlineSize_;
|
585 |
+
} else {
|
586 |
+
return elementsVector_.data() + elementsVector_.size();
|
587 |
+
}
|
588 |
+
}
|
589 |
+
|
590 |
+
C10_NODISCARD const_iterator begin() const {
|
591 |
+
if (inlineSize_) {
|
592 |
+
return elementsInline_;
|
593 |
+
} else {
|
594 |
+
return elementsVector_.data();
|
595 |
+
}
|
596 |
+
}
|
597 |
+
|
598 |
+
C10_NODISCARD const_iterator end() const {
|
599 |
+
if (inlineSize_) {
|
600 |
+
return elementsInline_ + inlineSize_;
|
601 |
+
} else {
|
602 |
+
return elementsVector_.data() + elementsVector_.size();
|
603 |
+
}
|
604 |
+
}
|
605 |
+
|
606 |
+
C10_NODISCARD const_iterator cbegin() const {
|
607 |
+
return begin();
|
608 |
+
}
|
609 |
+
|
610 |
+
C10_NODISCARD const_iterator cend() const {
|
611 |
+
return end();
|
612 |
+
}
|
613 |
+
|
614 |
+
C10_NODISCARD std::vector<IValue> vec() const & {
|
615 |
+
return asArrayRef().vec();
|
616 |
+
}
|
617 |
+
|
618 |
+
C10_NODISCARD IValue& back() {
|
619 |
+
return *(end() - 1);
|
620 |
+
}
|
621 |
+
|
622 |
+
C10_NODISCARD const IValue& back() const {
|
623 |
+
return *(end() - 1);
|
624 |
+
}
|
625 |
+
|
626 |
+
C10_NODISCARD std::vector<IValue> vec() && {
|
627 |
+
std::vector<IValue> result;
|
628 |
+
result.reserve(size());
|
629 |
+
for (auto&& iv : *this) {
|
630 |
+
result.push_back(std::move(iv));
|
631 |
+
}
|
632 |
+
return result;
|
633 |
+
}
|
634 |
+
|
635 |
+
// More compatibility shims for the overwhelming amount of code that
|
636 |
+
// likes to copy tuple elements into a vector; see comment above the
|
637 |
+
// copy constructor.
|
638 |
+
operator std::vector<IValue>() const & {
|
639 |
+
return vec();
|
640 |
+
}
|
641 |
+
|
642 |
+
operator std::vector<IValue>() && {
|
643 |
+
return vec();
|
644 |
+
}
|
645 |
+
};
|
646 |
+
|
647 |
+
template <typename T>
|
648 |
+
struct TupleTypeFactory {};
|
649 |
+
|
650 |
+
template <>
|
651 |
+
struct TORCH_API TupleTypeFactory<TupleType> {
|
652 |
+
static TupleTypePtr create(std::vector<TypePtr> types) {
|
653 |
+
return TupleType::create(std::move(types));
|
654 |
+
}
|
655 |
+
static TupleTypePtr fallback(const Type& type);
|
656 |
+
};
|
657 |
+
|
658 |
+
template <>
|
659 |
+
struct TORCH_API TupleTypeFactory<c10::DynamicType> {
|
660 |
+
static DynamicTypePtr create(const std::vector<TypePtr>& elemTypes);
|
661 |
+
static DynamicTypePtr fallback(const Type&);
|
662 |
+
};
|
663 |
+
|
664 |
+
struct TORCH_API Tuple : c10::intrusive_ptr_target {
|
665 |
+
private:
|
666 |
+
TupleElements elements_;
|
667 |
+
mutable c10::TypePtr type_; // lazily computed for unnamed tuples
|
668 |
+
|
669 |
+
public:
|
670 |
+
// named tuples have additional type information, so we
|
671 |
+
// directly create them tagged
|
672 |
+
static c10::intrusive_ptr<Tuple> createNamed(
|
673 |
+
std::vector<IValue> elements_,
|
674 |
+
c10::TypePtr type_) {
|
675 |
+
return c10::make_intrusive<Tuple>(std::move(elements_), std::move(type_));
|
676 |
+
}
|
677 |
+
|
678 |
+
static c10::intrusive_ptr<Tuple> createNamed(
|
679 |
+
TupleElements elements_,
|
680 |
+
std::shared_ptr<TupleType> type_) {
|
681 |
+
return c10::make_intrusive<Tuple>(std::move(elements_), std::move(type_));
|
682 |
+
}
|
683 |
+
|
684 |
+
static c10::intrusive_ptr<Tuple> createNamed(
|
685 |
+
std::initializer_list<IValue> elements_,
|
686 |
+
std::shared_ptr<TupleType> type_) {
|
687 |
+
return createNamed(TupleElements(c10::ArrayRef<IValue>(elements_)), std::move(type_));
|
688 |
+
}
|
689 |
+
|
690 |
+
// MSVC apparently can't disambiguate the other two overloads of
|
691 |
+
// create when passed an initializer_list without this.
|
692 |
+
static c10::intrusive_ptr<Tuple> create(std::initializer_list<IValue> elements_) {
|
693 |
+
return create(c10::ArrayRef<IValue>(elements_));
|
694 |
+
}
|
695 |
+
|
696 |
+
static c10::intrusive_ptr<Tuple> create(std::vector<IValue> elements_) {
|
697 |
+
return c10::make_intrusive<Tuple>(std::move(elements_));
|
698 |
+
}
|
699 |
+
|
700 |
+
static c10::intrusive_ptr<Tuple> create(TupleElements elements_) {
|
701 |
+
return c10::make_intrusive<Tuple>(std::move(elements_));
|
702 |
+
}
|
703 |
+
|
704 |
+
static c10::intrusive_ptr<Tuple> create(c10::ArrayRef<IValue> elements_) {
|
705 |
+
return create(TupleElements(elements_));
|
706 |
+
}
|
707 |
+
|
708 |
+
static c10::intrusive_ptr<Tuple> create(IValue e1) {
|
709 |
+
return c10::make_intrusive<Tuple>(std::move(e1));
|
710 |
+
}
|
711 |
+
|
712 |
+
static c10::intrusive_ptr<Tuple> create(IValue e1, IValue e2) {
|
713 |
+
return c10::make_intrusive<Tuple>(std::move(e1), std::move(e2));
|
714 |
+
}
|
715 |
+
|
716 |
+
static c10::intrusive_ptr<Tuple> create(IValue e1, IValue e2, IValue e3) {
|
717 |
+
return c10::make_intrusive<Tuple>(std::move(e1), std::move(e2), std::move(e3));
|
718 |
+
}
|
719 |
+
|
720 |
+
private:
|
721 |
+
// Workaround inability to use `>` operator in template argument list.
|
722 |
+
template <typename... Args>
|
723 |
+
static constexpr bool hasMoreThanThreeArgs() {
|
724 |
+
return sizeof...(Args) > 3;
|
725 |
+
}
|
726 |
+
|
727 |
+
public:
|
728 |
+
template <typename... Args>
|
729 |
+
static c10::intrusive_ptr<Tuple> create(Args&&... elements_) {
|
730 |
+
switch (sizeof...(Args)) {
|
731 |
+
case 1:
|
732 |
+
case 2:
|
733 |
+
case 3:
|
734 |
+
return create(IValue(std::forward<Args>(elements_))...);
|
735 |
+
default:
|
736 |
+
return create(
|
737 |
+
std::vector<IValue>{IValue(std::forward<Args>(elements_))...});
|
738 |
+
}
|
739 |
+
}
|
740 |
+
|
741 |
+
// Again, it would be nice to make this noncopyable, but there's a
|
742 |
+
// lot of extant code that copies Tuples.
|
743 |
+
// Tuple(const Tuple& rhs) = delete;
|
744 |
+
|
745 |
+
const TupleElements& elements() const& {
|
746 |
+
return elements_;
|
747 |
+
}
|
748 |
+
|
749 |
+
TupleElements elements() && {
|
750 |
+
return std::move(elements_);
|
751 |
+
}
|
752 |
+
|
753 |
+
void setElements(std::vector<IValue>&& elements) {
|
754 |
+
elements_.setContents(std::move(elements));
|
755 |
+
}
|
756 |
+
|
757 |
+
void setElements(TupleElements&& elements) {
|
758 |
+
elements_ = std::move(elements);
|
759 |
+
}
|
760 |
+
|
761 |
+
void unsafeSetElement(size_t idx, const IValue& element) {
|
762 |
+
elements_[idx] = element;
|
763 |
+
}
|
764 |
+
|
765 |
+
void unsafeSetElement(size_t idx, IValue&& element) {
|
766 |
+
elements_[idx] = std::move(element);
|
767 |
+
}
|
768 |
+
|
769 |
+
size_t size() const {
|
770 |
+
return elements_.size();
|
771 |
+
}
|
772 |
+
|
773 |
+
template <typename T = c10::TupleType>
|
774 |
+
std::shared_ptr<T> type() const {
|
775 |
+
if (!type_) {
|
776 |
+
type_ = TupleTypeFactory<T>::create(fmap(elements(), [&](const IValue& v) {
|
777 |
+
return v.type<typename T::ElementType>();
|
778 |
+
}));
|
779 |
+
}
|
780 |
+
if (auto t = type_->cast<T>()) {
|
781 |
+
return t;
|
782 |
+
}
|
783 |
+
return TupleTypeFactory<T>::fallback(*type_);
|
784 |
+
}
|
785 |
+
|
786 |
+
static size_t hash(const Tuple& t) {
|
787 |
+
return c10::get_hash(t.elements());
|
788 |
+
}
|
789 |
+
|
790 |
+
TORCH_API friend bool operator==(
|
791 |
+
const ivalue::Tuple& lhs,
|
792 |
+
const ivalue::Tuple& rhs);
|
793 |
+
|
794 |
+
private:
|
795 |
+
// NOTE: If we try to avoid the overloads without
|
796 |
+
// `std::shared_ptr<TupleType> type` by defaulting it to nullptr, we
|
797 |
+
// end up having to call (part of) the shared_ptr destructor for
|
798 |
+
// `type` even though we should know statically it won't do
|
799 |
+
// anything.
|
800 |
+
explicit Tuple(std::vector<IValue> elements)
|
801 |
+
: elements_(std::move(elements)){}
|
802 |
+
|
803 |
+
explicit Tuple(std::vector<IValue> elements, c10::TypePtr type)
|
804 |
+
: elements_(std::move(elements)), type_(std::move(type)) {}
|
805 |
+
|
806 |
+
explicit Tuple(TupleElements&& elements)
|
807 |
+
: elements_(std::move(elements)) {}
|
808 |
+
|
809 |
+
explicit Tuple(TupleElements&& elements, std::shared_ptr<TupleType> type)
|
810 |
+
: elements_(std::move(elements)), type_(std::move(type)) {}
|
811 |
+
|
812 |
+
explicit Tuple(IValue&& e1)
|
813 |
+
: elements_(std::move(e1)) {}
|
814 |
+
|
815 |
+
explicit Tuple(IValue&& e1, std::shared_ptr<TupleType> type)
|
816 |
+
: elements_(std::move(e1)), type_(std::move(type)) {}
|
817 |
+
|
818 |
+
explicit Tuple(IValue&& e1, IValue&& e2)
|
819 |
+
: elements_(std::move(e1), std::move(e2)) {}
|
820 |
+
|
821 |
+
explicit Tuple(IValue&& e1, IValue&& e2, std::shared_ptr<TupleType> type)
|
822 |
+
: elements_(std::move(e1), std::move(e2)), type_(std::move(type)) {}
|
823 |
+
|
824 |
+
explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3)
|
825 |
+
: elements_(std::move(e1), std::move(e2), std::move(e3)) {}
|
826 |
+
|
827 |
+
explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3, std::shared_ptr<TupleType> type)
|
828 |
+
: elements_(std::move(e1), std::move(e2), std::move(e3)), type_(std::move(type)) {}
|
829 |
+
|
830 |
+
friend class c10::intrusive_ptr<Tuple>;
|
831 |
+
};
|
832 |
+
|
833 |
+
struct Object;
|
834 |
+
struct PyObjectHolder;
|
835 |
+
struct EnumHolder;
|
836 |
+
} // namespace ivalue
|
837 |
+
|
838 |
+
// Future
|
839 |
+
struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target {
|
840 |
+
private:
|
841 |
+
// Keep this private in order to force users to go through make_intrusive and
|
842 |
+
// thus prevent creating a Future that's not held by an intrusive_ptr.
|
843 |
+
explicit Future(TypePtr type, std::vector<c10::Device> devices={})
|
844 |
+
: type_(std::move(type)),
|
845 |
+
impl_(getTypeOfDevices(devices)),
|
846 |
+
devices_(sortAndDeduplicateDevices(impl_, std::move(devices))) {}
|
847 |
+
|
848 |
+
friend c10::intrusive_ptr<Future>;
|
849 |
+
|
850 |
+
struct FutureCallback {
|
851 |
+
std::function<void(Future&)> callback;
|
852 |
+
bool uses_future; // whether the Future& passed in is actually used
|
853 |
+
|
854 |
+
template <typename T>
|
855 |
+
FutureCallback(T callback, bool uses_future)
|
856 |
+
: callback(std::move(callback)), uses_future(uses_future) {}
|
857 |
+
};
|
858 |
+
|
859 |
+
public:
|
860 |
+
Future(const Future&) = delete;
|
861 |
+
Future(Future&&) = delete;
|
862 |
+
Future& operator=(const Future&) = delete;
|
863 |
+
Future& operator=(Future&&) = delete;
|
864 |
+
|
865 |
+
struct TORCH_API FutureError final : public std::exception {
|
866 |
+
explicit FutureError(std::string&& error_msg_)
|
867 |
+
: error_msg(std::move(error_msg_)) {}
|
868 |
+
|
869 |
+
FutureError() = default;
|
870 |
+
|
871 |
+
const char* what() const noexcept override {
|
872 |
+
return error_msg.c_str();
|
873 |
+
}
|
874 |
+
|
875 |
+
std::string error_msg;
|
876 |
+
};
|
877 |
+
|
878 |
+
/**
|
879 |
+
* Wait on the future until it completes.
|
880 |
+
*/
|
881 |
+
void wait() {
|
882 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
883 |
+
finished_cv_.wait(lock, [&]() -> bool { return completed_; });
|
884 |
+
synchronizeWithCurrentStreams();
|
885 |
+
}
|
886 |
+
|
887 |
+
/**
|
888 |
+
* Wait on the future until it completes and throw an
|
889 |
+
* exception if an error exists.
|
890 |
+
*/
|
891 |
+
void waitAndThrow() {
|
892 |
+
wait();
|
893 |
+
|
894 |
+
if (eptr_) {
|
895 |
+
std::rethrow_exception(eptr_);
|
896 |
+
}
|
897 |
+
}
|
898 |
+
|
899 |
+
/**
|
900 |
+
* Explicitly mark the future as completed with the output value. Optionally,
|
901 |
+
* the storages for all tensors in IValue can be passed as well. The DataPtrs
|
902 |
+
* of these storages are used to synchronize CUDA streams. If storages isn't
|
903 |
+
* given we will attempt to extract it from the value, if we need to (this
|
904 |
+
* happens if a non-empty set of devices was given to the constructor). Thus
|
905 |
+
* one only needs to provide storages when 1) they cannot be extracted through
|
906 |
+
* IValue::getSubValues() or through pickling in case of Python object; or
|
907 |
+
* when 2) customized storage extraction is more efficient.
|
908 |
+
*/
|
909 |
+
using WeakStorage = c10::weak_intrusive_ptr<c10::StorageImpl>;
|
910 |
+
void markCompleted(
|
911 |
+
IValue value,
|
912 |
+
c10::optional<std::vector<WeakStorage>> storages = c10::nullopt) {
|
913 |
+
// Start by performing all steps that can throw, before setting any field.
|
914 |
+
// Do this before even acquiring the mutex, because extractStorages might
|
915 |
+
// acquire the GIL, which could lead to a lock inversion with our mutex.
|
916 |
+
// See https://github.com/pytorch/pytorch/issues/58239.
|
917 |
+
std::vector<WeakStorage> actualStorages;
|
918 |
+
std::vector<c10::Device> usedDevices;
|
919 |
+
try {
|
920 |
+
// FIXME We should always extract DataPtrs, in order to catch the case of
|
921 |
+
// users using CUDA values but forgetting to set devices, which currently
|
922 |
+
// leads to a silent synchronization/correctness issue. However, as this
|
923 |
+
// might worsen perf in CPU-only cases, we should only do so after careful
|
924 |
+
// benchmarks.
|
925 |
+
if (impl_.type() != c10::kCPU) {
|
926 |
+
actualStorages =
|
927 |
+
storages.has_value() ? std::move(*storages) : extractStorages(value);
|
928 |
+
usedDevices = getDevicesOfStorages(impl_, actualStorages);
|
929 |
+
ensureIsSubsetOfDevices(usedDevices, devices_);
|
930 |
+
}
|
931 |
+
} catch (const std::exception&) {
|
932 |
+
setError(std::current_exception());
|
933 |
+
return;
|
934 |
+
}
|
935 |
+
|
936 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
937 |
+
TORCH_CHECK(
|
938 |
+
!completed(),
|
939 |
+
"Attempting to mark a completed Future as complete again. Note that "
|
940 |
+
"a Future can only be marked completed once.");
|
941 |
+
|
942 |
+
// Only set value_ and completed_ flag once all checks and preparation steps
|
943 |
+
// have returned successfully to allow for proper error propagation.
|
944 |
+
value_ = std::move(value);
|
945 |
+
completed_ = true;
|
946 |
+
|
947 |
+
currentDevice_ = impl_.getDevice();
|
948 |
+
storages_ = std::move(actualStorages);
|
949 |
+
for (const c10::Device& device : usedDevices) {
|
950 |
+
c10::Event event(impl_.type());
|
951 |
+
event.record(impl_.getStream(device));
|
952 |
+
events_.push_back(std::move(event));
|
953 |
+
}
|
954 |
+
|
955 |
+
std::vector<FutureCallback> cbs;
|
956 |
+
cbs.swap(callbacks_);
|
957 |
+
lock.unlock();
|
958 |
+
|
959 |
+
finished_cv_.notify_all();
|
960 |
+
for (auto& callback : cbs) {
|
961 |
+
invokeCallback(std::move(callback.callback), callback.uses_future);
|
962 |
+
}
|
963 |
+
}
|
964 |
+
|
965 |
+
void markCompleted() {
|
966 |
+
markCompleted(IValue{});
|
967 |
+
}
|
968 |
+
|
969 |
+
void setError(std::exception_ptr eptr) {
|
970 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
971 |
+
setErrorInternal(std::move(eptr), lock);
|
972 |
+
}
|
973 |
+
|
974 |
+
void setErrorIfNeeded(std::exception_ptr eptr) {
|
975 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
976 |
+
if (completed_) {
|
977 |
+
// This should be rare and shouldn't cause log spew. Its important to
|
978 |
+
// log errors and thats why we have this log here.
|
979 |
+
std::string msg = c10::str(
|
980 |
+
"Skipping setting following error on the Future since "
|
981 |
+
"it is already marked completed (this is not necessarily "
|
982 |
+
"an error):\n",
|
983 |
+
tryRetrieveErrorMessageInternal(std::move(eptr)));
|
984 |
+
if (eptr_) {
|
985 |
+
msg += c10::str(
|
986 |
+
", \nOriginal exception:\n",
|
987 |
+
tryRetrieveErrorMessageInternal(eptr_));
|
988 |
+
}
|
989 |
+
LOG(INFO) << msg;
|
990 |
+
return;
|
991 |
+
} else {
|
992 |
+
setErrorInternal(std::move(eptr), lock);
|
993 |
+
}
|
994 |
+
}
|
995 |
+
|
996 |
+
// Get the result of the current future.
|
997 |
+
IValue value() {
|
998 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
999 |
+
AT_ASSERT(completed());
|
1000 |
+
if (eptr_) {
|
1001 |
+
std::rethrow_exception(eptr_);
|
1002 |
+
}
|
1003 |
+
return value_;
|
1004 |
+
}
|
1005 |
+
|
1006 |
+
// This accessor should only be used if we know that the future is
|
1007 |
+
// completed() with no error.
|
1008 |
+
const IValue& constValue() const {
|
1009 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1010 |
+
AT_ASSERT(completed());
|
1011 |
+
TORCH_INTERNAL_ASSERT(
|
1012 |
+
!eptr_,
|
1013 |
+
"value() accessor should only be used when future is not completed with ",
|
1014 |
+
"an error, but future had the following error: ",
|
1015 |
+
tryRetrieveErrorMessageInternal(eptr_)
|
1016 |
+
);
|
1017 |
+
return value_;
|
1018 |
+
}
|
1019 |
+
|
1020 |
+
// This accessor should only be used if we know that the future is
|
1021 |
+
// completed() with no error.
|
1022 |
+
const std::vector<WeakStorage>& storages() const {
|
1023 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1024 |
+
AT_ASSERT(completed());
|
1025 |
+
AT_ASSERT(!eptr_);
|
1026 |
+
return storages_;
|
1027 |
+
}
|
1028 |
+
|
1029 |
+
/**
|
1030 |
+
* Add a callback to the future.
|
1031 |
+
* The callbacks will be executed once the future completes.
|
1032 |
+
* If the future has already completed,
|
1033 |
+
* this function will execute the callback immediately.
|
1034 |
+
*/
|
1035 |
+
template <typename T>
|
1036 |
+
void addCallback(T callback, bool uses_future = true) {
|
1037 |
+
#if __cpp_lib_is_invocable >= 201703
|
1038 |
+
static_assert(
|
1039 |
+
std::is_invocable_r<void, T, Future&>::value,
|
1040 |
+
"The callback must have signature void(Future&)");
|
1041 |
+
#endif
|
1042 |
+
|
1043 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1044 |
+
if (completed()) {
|
1045 |
+
lock.unlock();
|
1046 |
+
invokeCallback(std::move(callback), uses_future);
|
1047 |
+
return;
|
1048 |
+
}
|
1049 |
+
callbacks_.emplace_back(std::move(callback), uses_future);
|
1050 |
+
}
|
1051 |
+
|
1052 |
+
/**
|
1053 |
+
* Add a callback to the future, and return another Future to hold the return
|
1054 |
+
* value of the callback. This is necessary when the callback provider needs
|
1055 |
+
* to know for sure when the callback has finished.
|
1056 |
+
*/
|
1057 |
+
template <typename T>
|
1058 |
+
c10::intrusive_ptr<Future> then(T callback, TypePtr type) {
|
1059 |
+
using IValueWithStorages = std::tuple<IValue, std::vector<WeakStorage>>;
|
1060 |
+
#if __cpp_lib_is_invocable >= 201703
|
1061 |
+
static_assert(
|
1062 |
+
std::disjunction<
|
1063 |
+
std::is_invocable_r<IValue, T, Future&>,
|
1064 |
+
std::is_invocable_r<IValueWithStorages, T, Future&>>::value,
|
1065 |
+
"The callback must have signature IValue(Future&) or "
|
1066 |
+
"std::tuple<IValue, std::vector<Storage>>(Future&)");
|
1067 |
+
#endif
|
1068 |
+
auto childFut = createInstance(::std::move(type));
|
1069 |
+
addCallback([childFut,
|
1070 |
+
cb = std::move(callback)](Future& parentFut) mutable {
|
1071 |
+
try {
|
1072 |
+
if constexpr (::std::is_convertible_v<typename c10::invoke_result_t<T &&, Future&>, IValueWithStorages>) {
|
1073 |
+
auto [ivalue, storages] = cb(parentFut);
|
1074 |
+
childFut->markCompleted(::std::move(ivalue), ::std::move(storages));
|
1075 |
+
} else {
|
1076 |
+
childFut->markCompleted(cb(parentFut));
|
1077 |
+
}
|
1078 |
+
} catch (std::exception&) {
|
1079 |
+
childFut->setError(std::current_exception());
|
1080 |
+
}
|
1081 |
+
});
|
1082 |
+
return childFut;
|
1083 |
+
}
|
1084 |
+
|
1085 |
+
template <typename T>
|
1086 |
+
c10::intrusive_ptr<Future> thenAsync(T callback, TypePtr type) {
|
1087 |
+
#if __cpp_lib_is_invocable >= 201703
|
1088 |
+
static_assert(
|
1089 |
+
std::is_invocable_r<c10::intrusive_ptr<Future>, T, Future&>::value,
|
1090 |
+
"The callback must have signature c10::intrusive_ptr<Future>(Future&)");
|
1091 |
+
#endif
|
1092 |
+
auto childFut = createInstance(std::move(type));
|
1093 |
+
addCallback(
|
1094 |
+
[childFut, cb = std::move(callback)](Future& parentFut) mutable {
|
1095 |
+
c10::intrusive_ptr<Future> intermediateFut;
|
1096 |
+
try {
|
1097 |
+
intermediateFut = cb(parentFut);
|
1098 |
+
} catch (std::exception&) {
|
1099 |
+
childFut->setError(std::current_exception());
|
1100 |
+
return;
|
1101 |
+
}
|
1102 |
+
intermediateFut->addCallback(
|
1103 |
+
[childFut = std::move(childFut)](Future& intermediateFut) {
|
1104 |
+
if (intermediateFut.hasError()) {
|
1105 |
+
childFut->setError(intermediateFut.exception_ptr());
|
1106 |
+
} else {
|
1107 |
+
childFut->markCompleted(
|
1108 |
+
intermediateFut.value(), intermediateFut.storages());
|
1109 |
+
}
|
1110 |
+
});
|
1111 |
+
});
|
1112 |
+
return childFut;
|
1113 |
+
}
|
1114 |
+
|
1115 |
+
// Tries to retrieve the error message from std::exception_ptr.
|
1116 |
+
std::string tryRetrieveErrorMessage() const {
|
1117 |
+
TORCH_CHECK(hasError(), "No error present on the future.");
|
1118 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1119 |
+
return tryRetrieveErrorMessageInternal(eptr_);
|
1120 |
+
}
|
1121 |
+
|
1122 |
+
// Check if the current future has completed
|
1123 |
+
bool completed() const {
|
1124 |
+
return completed_;
|
1125 |
+
}
|
1126 |
+
|
1127 |
+
bool hasValue() const {
|
1128 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1129 |
+
return completed_ && !eptr_;
|
1130 |
+
}
|
1131 |
+
|
1132 |
+
bool hasError() const {
|
1133 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1134 |
+
return eptr_ ? true : false;
|
1135 |
+
}
|
1136 |
+
|
1137 |
+
std::exception_ptr exception_ptr() const {
|
1138 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
1139 |
+
return eptr_;
|
1140 |
+
}
|
1141 |
+
|
1142 |
+
TORCH_API friend std::ostream& operator<<(
|
1143 |
+
std::ostream& out,
|
1144 |
+
const Future& v);
|
1145 |
+
|
1146 |
+
const TypePtr& elementType() const {
|
1147 |
+
return type_;
|
1148 |
+
}
|
1149 |
+
|
1150 |
+
const std::vector<c10::Device>& devices() const {
|
1151 |
+
return devices_;
|
1152 |
+
}
|
1153 |
+
|
1154 |
+
// This method should be used when one intends to manually create a child
|
1155 |
+
// future, for example when implementing a customized version of then().
|
1156 |
+
c10::intrusive_ptr<Future> createInstance(at::TypePtr type) {
|
1157 |
+
return c10::make_intrusive<Future>(std::move(type), devices_);
|
1158 |
+
}
|
1159 |
+
|
1160 |
+
private:
|
1161 |
+
|
1162 |
+
// This method should always be used when invoking a callback (regardless of
|
1163 |
+
// how/when that happens) as it will ensure that the proper "environment" is
|
1164 |
+
// set up before running the callback, as in, it will set up the CUDA streams,
|
1165 |
+
// synchronize them with the value, and so on (if needed).
|
1166 |
+
template<typename T>
|
1167 |
+
void invokeCallback(T callback, bool uses_future) {
|
1168 |
+
#if __cpp_lib_is_invocable >= 201703
|
1169 |
+
static_assert(
|
1170 |
+
std::is_invocable_r<void, T, Future&>::value,
|
1171 |
+
"The callback must have signature void(Future&)");
|
1172 |
+
#endif
|
1173 |
+
|
1174 |
+
// The synchronization performed below shouldn't be needed when the future
|
1175 |
+
// is not used by the callback.
|
1176 |
+
if (uses_future) {
|
1177 |
+
c10::OptionalDeviceGuard deviceGuard(currentDevice_);
|
1178 |
+
|
1179 |
+
std::vector<c10::Stream> streams;
|
1180 |
+
streams.reserve(devices_.size());
|
1181 |
+
for (const c10::Device& device : devices_) {
|
1182 |
+
streams.push_back(impl_.getStreamFromGlobalPool(device));
|
1183 |
+
}
|
1184 |
+
c10::MultiStreamGuard streamGuard(streams);
|
1185 |
+
synchronizeWithCurrentStreams();
|
1186 |
+
callback(*this);
|
1187 |
+
} else {
|
1188 |
+
callback(*this);
|
1189 |
+
}
|
1190 |
+
}
|
1191 |
+
|
1192 |
+
// This method should be called before this future's value is used, as it
|
1193 |
+
// ensures that the CUDA streams that are "current" at the callsite properly
|
1194 |
+
// synchronize with the value.
|
1195 |
+
void synchronizeWithCurrentStreams() {
|
1196 |
+
for (c10::Event& event : events_) {
|
1197 |
+
event.block(impl_.getStream(event.device()));
|
1198 |
+
}
|
1199 |
+
|
1200 |
+
for (const WeakStorage& weak_storage : storages_) {
|
1201 |
+
c10::intrusive_ptr<c10::StorageImpl> storage = weak_storage.lock();
|
1202 |
+
if (!storage) {
|
1203 |
+
continue;
|
1204 |
+
}
|
1205 |
+
if (!storage->device().is_cpu()) {
|
1206 |
+
impl_.recordDataPtrOnStream(
|
1207 |
+
storage->data_ptr(), impl_.getStream(storage->device()));
|
1208 |
+
}
|
1209 |
+
}
|
1210 |
+
}
|
1211 |
+
|
1212 |
+
void setErrorInternal(
|
1213 |
+
std::exception_ptr eptr,
|
1214 |
+
std::unique_lock<std::mutex>& lock) {
|
1215 |
+
TORCH_CHECK(
|
1216 |
+
!eptr_,
|
1217 |
+
"Error already set on this Future: ",
|
1218 |
+
tryRetrieveErrorMessageInternal(eptr_),
|
1219 |
+
", trying to set error: ",
|
1220 |
+
tryRetrieveErrorMessageInternal(eptr));
|
1221 |
+
TORCH_INTERNAL_ASSERT(!completed(), "Future is already marked completed");
|
1222 |
+
completed_ = true;
|
1223 |
+
eptr_ = std::move(eptr);
|
1224 |
+
|
1225 |
+
std::vector<FutureCallback> cbs;
|
1226 |
+
cbs.swap(callbacks_);
|
1227 |
+
lock.unlock();
|
1228 |
+
|
1229 |
+
finished_cv_.notify_all();
|
1230 |
+
for (auto& callback : cbs) {
|
1231 |
+
invokeCallback(std::move(callback.callback), callback.uses_future);
|
1232 |
+
}
|
1233 |
+
}
|
1234 |
+
|
1235 |
+
// Tries to retrieve the error message from std::exception_ptr.
|
1236 |
+
std::string tryRetrieveErrorMessageInternal(std::exception_ptr eptr) const {
|
1237 |
+
try {
|
1238 |
+
std::rethrow_exception(std::move(eptr));
|
1239 |
+
} catch (const std::exception& e) {
|
1240 |
+
return e.what();
|
1241 |
+
} catch (...) {
|
1242 |
+
return "Unknown Exception Type";
|
1243 |
+
}
|
1244 |
+
}
|
1245 |
+
|
1246 |
+
// Defined in ivalue.cpp.
|
1247 |
+
static std::vector<WeakStorage> extractStorages(
|
1248 |
+
const at::IValue& value);
|
1249 |
+
|
1250 |
+
static std::vector<c10::Device> getDevicesOfStorages(
|
1251 |
+
const c10::impl::VirtualGuardImpl& impl,
|
1252 |
+
const std::vector<WeakStorage>& storages) {
|
1253 |
+
c10::DeviceIndex deviceCount = impl.deviceCount();
|
1254 |
+
std::vector<bool> isDeviceUsed(deviceCount, false);
|
1255 |
+
for (const WeakStorage& weak_storage : storages) {
|
1256 |
+
c10::intrusive_ptr<c10::StorageImpl> storage = weak_storage.lock();
|
1257 |
+
if (!storage) {
|
1258 |
+
continue;
|
1259 |
+
}
|
1260 |
+
c10::Device device = storage->device();
|
1261 |
+
if (!device.is_cpu()) {
|
1262 |
+
TORCH_CHECK_VALUE(
|
1263 |
+
device.type() == impl.type(),
|
1264 |
+
"Expected all data ptrs to be on a device of type ",
|
1265 |
+
impl.type(),
|
1266 |
+
", got one on device ",
|
1267 |
+
device);
|
1268 |
+
isDeviceUsed[device.index()] = true;
|
1269 |
+
}
|
1270 |
+
}
|
1271 |
+
std::vector<c10::Device> devices;
|
1272 |
+
for (c10::DeviceIndex idx = 0; idx < deviceCount; idx++) {
|
1273 |
+
if (isDeviceUsed[idx]) {
|
1274 |
+
devices.emplace_back(impl.type(), idx);
|
1275 |
+
}
|
1276 |
+
}
|
1277 |
+
return devices;
|
1278 |
+
}
|
1279 |
+
|
1280 |
+
static std::string formatSetOfDevices(
|
1281 |
+
const std::vector<c10::Device>& devices) {
|
1282 |
+
if (devices.empty()) {
|
1283 |
+
return "(none)";
|
1284 |
+
}
|
1285 |
+
std::ostringstream oss;
|
1286 |
+
oss << devices[0];
|
1287 |
+
for (const auto idx : c10::irange(1, devices.size())) {
|
1288 |
+
if (idx == devices.size() - 1) {
|
1289 |
+
oss << " and ";
|
1290 |
+
} else {
|
1291 |
+
oss << ", ";
|
1292 |
+
}
|
1293 |
+
oss << devices[idx];
|
1294 |
+
}
|
1295 |
+
return oss.str();
|
1296 |
+
}
|
1297 |
+
|
1298 |
+
static c10::DeviceType getTypeOfDevices(
|
1299 |
+
const std::vector<c10::Device>& devices) {
|
1300 |
+
if (devices.empty()) {
|
1301 |
+
return c10::kCPU;
|
1302 |
+
}
|
1303 |
+
c10::DeviceType deviceType = devices[0].type();
|
1304 |
+
for (const auto idx : c10::irange(1, devices.size())) {
|
1305 |
+
TORCH_CHECK_VALUE(
|
1306 |
+
devices[idx].type() == deviceType,
|
1307 |
+
"Expected all devices to be of the same type, but got a mismatch between ",
|
1308 |
+
devices[0],
|
1309 |
+
" and ",
|
1310 |
+
devices[idx]);
|
1311 |
+
}
|
1312 |
+
return deviceType;
|
1313 |
+
}
|
1314 |
+
|
1315 |
+
// We need devices to be sorted in order to use ensureIsSubsetOfDevices.
|
1316 |
+
static std::vector<c10::Device> sortAndDeduplicateDevices(
|
1317 |
+
const c10::impl::VirtualGuardImpl& /*impl*/,
|
1318 |
+
std::vector<c10::Device> devices) {
|
1319 |
+
std::sort(
|
1320 |
+
devices.begin(), devices.end(),
|
1321 |
+
[](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); });
|
1322 |
+
// Deduplicate by compacting.
|
1323 |
+
size_t targetIdx = 0;
|
1324 |
+
for (const auto sourceIdx : c10::irange(devices.size())) {
|
1325 |
+
TORCH_CHECK_VALUE(
|
1326 |
+
devices[sourceIdx].has_index(),
|
1327 |
+
"Expected devices to have indices, got ", devices[sourceIdx]);
|
1328 |
+
if (targetIdx > 0 && devices[targetIdx - 1].index() == devices[sourceIdx].index()) {
|
1329 |
+
// It's a duplicate, skip it.
|
1330 |
+
continue;
|
1331 |
+
}
|
1332 |
+
if (sourceIdx != targetIdx) {
|
1333 |
+
devices[targetIdx] = devices[sourceIdx];
|
1334 |
+
}
|
1335 |
+
targetIdx++;
|
1336 |
+
}
|
1337 |
+
// If there were duplicates there's now a gap at the end: trim it. Resizing
|
1338 |
+
// requires the item type to be default-constructible (which c10::Device is
|
1339 |
+
// not) because in principle it could be required to create new items. Since
|
1340 |
+
// we know we'll shrink the vector, we provide a custom dummy value instead.
|
1341 |
+
devices.resize(targetIdx, c10::Device(c10::kCPU));
|
1342 |
+
return devices;
|
1343 |
+
}
|
1344 |
+
|
1345 |
+
static void ensureIsSubsetOfDevices(
|
1346 |
+
const std::vector<c10::Device>& subset,
|
1347 |
+
const std::vector<c10::Device>& superset) {
|
1348 |
+
// We assume the devices in both vectors have the same consistent type, and
|
1349 |
+
// their indices are unique and sorted.
|
1350 |
+
std::vector<c10::Device> excessDevices;
|
1351 |
+
std::set_difference(
|
1352 |
+
subset.begin(),
|
1353 |
+
subset.end(),
|
1354 |
+
superset.begin(),
|
1355 |
+
superset.end(),
|
1356 |
+
std::back_inserter(excessDevices),
|
1357 |
+
[](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); });
|
1358 |
+
TORCH_CHECK_VALUE(
|
1359 |
+
excessDevices.empty(),
|
1360 |
+
"The result contained tensors residing on device(s) ",
|
1361 |
+
formatSetOfDevices(excessDevices),
|
1362 |
+
" which are not among the expected device(s) ",
|
1363 |
+
formatSetOfDevices(superset));
|
1364 |
+
}
|
1365 |
+
|
1366 |
+
mutable std::mutex mutex_;
|
1367 |
+
std::atomic_bool completed_ = {false}; // is this future complete
|
1368 |
+
std::condition_variable finished_cv_;
|
1369 |
+
|
1370 |
+
IValue value_; // when finished the value
|
1371 |
+
TypePtr type_;
|
1372 |
+
std::vector<FutureCallback> callbacks_;
|
1373 |
+
std::exception_ptr eptr_;
|
1374 |
+
|
1375 |
+
// An upcast pointer to a virtual class which allows us to manipulate events,
|
1376 |
+
// streams, ... in a generic way, without an explicit dependency on CUDA.
|
1377 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
1378 |
+
const c10::impl::VirtualGuardImpl impl_;
|
1379 |
+
|
1380 |
+
// The device that was current when markCompleted was called, which we'll
|
1381 |
+
// restore when invoking callbacks. It's optional because we'll only store it
|
1382 |
+
// if the future completes successfully.
|
1383 |
+
optional<c10::Device> currentDevice_;
|
1384 |
+
|
1385 |
+
// The events that correspond to the completion of the async I/O kernels. They
|
1386 |
+
// are recorded on the appropriate streams when the future is marked completed
|
1387 |
+
// and can then be queried/waited/blocked on. There is one event for each
|
1388 |
+
// distinct device on which the value's tensors reside.
|
1389 |
+
std::vector<c10::Event> events_;
|
1390 |
+
|
1391 |
+
// A cached version of the storages extracted from the value when the future
|
1392 |
+
// is first marked completed.
|
1393 |
+
std::vector<WeakStorage> storages_;
|
1394 |
+
|
1395 |
+
// The bounding set of devices that this future, and any of its children, is
|
1396 |
+
// allowed to use. This is a superset of the set of devices used by the events
|
1397 |
+
// above. We need this to know what streams (for which devices) to set as
|
1398 |
+
// current when invoking a callback, thus allowing the callback to use devices
|
1399 |
+
// that the parent future didn't use. This field is set to the value provided
|
1400 |
+
// in the constructor and will be "inherited" by all child futures.
|
1401 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
1402 |
+
const std::vector<c10::Device> devices_;
|
1403 |
+
};
|
1404 |
+
|
1405 |
+
struct C10_EXPORT ivalue::Await final : c10::intrusive_ptr_target {
|
1406 |
+
private:
|
1407 |
+
explicit Await(TypePtr elType, std::function<IValue()> fn)
|
1408 |
+
: elType_(std::move(elType)), type_(AwaitType::create(elType_)), fn_(std::move(fn)) {}
|
1409 |
+
|
1410 |
+
explicit Await(TypePtr elType) : elType_(std::move(elType)), type_(AwaitType::create(elType_)) { }
|
1411 |
+
|
1412 |
+
friend c10::intrusive_ptr<Await>;
|
1413 |
+
|
1414 |
+
public:
|
1415 |
+
Await(const Await&) = delete;
|
1416 |
+
Await(Await&&) = delete;
|
1417 |
+
Await& operator=(const Await&) = delete;
|
1418 |
+
Await& operator=(Await&&) = delete;
|
1419 |
+
|
1420 |
+
IValue wait() {
|
1421 |
+
if (!completed_) {
|
1422 |
+
TORCH_CHECK(fn_, "Incompleted Await: fn can't be None");
|
1423 |
+
value_ = fn_();
|
1424 |
+
completed_ = true;
|
1425 |
+
args_ = {};
|
1426 |
+
}
|
1427 |
+
return value_;
|
1428 |
+
}
|
1429 |
+
|
1430 |
+
IValue value() {
|
1431 |
+
TORCH_CHECK(completed_, "Await must be completed");
|
1432 |
+
return value_;
|
1433 |
+
}
|
1434 |
+
|
1435 |
+
void setFn(std::function<IValue()> fn) {
|
1436 |
+
fn_ = std::move(fn);
|
1437 |
+
}
|
1438 |
+
|
1439 |
+
bool completed() {
|
1440 |
+
return completed_;
|
1441 |
+
}
|
1442 |
+
|
1443 |
+
void markCompleted(IValue value) {
|
1444 |
+
value_ = std::move(value);
|
1445 |
+
completed_ = true;
|
1446 |
+
}
|
1447 |
+
|
1448 |
+
TORCH_API friend std::ostream& operator<<(
|
1449 |
+
std::ostream& out,
|
1450 |
+
const Await& v);
|
1451 |
+
|
1452 |
+
const TypePtr& elementType() const {
|
1453 |
+
return elType_;
|
1454 |
+
}
|
1455 |
+
|
1456 |
+
const TypePtr& type() const {
|
1457 |
+
return type_;
|
1458 |
+
}
|
1459 |
+
|
1460 |
+
void setArgs(std::vector<IValue> args) {
|
1461 |
+
args_ = std::move(args);
|
1462 |
+
}
|
1463 |
+
|
1464 |
+
std::vector<IValue>& args() {
|
1465 |
+
return args_;
|
1466 |
+
}
|
1467 |
+
|
1468 |
+
private:
|
1469 |
+
TypePtr elType_;
|
1470 |
+
TypePtr type_;
|
1471 |
+
std::vector<IValue> args_;
|
1472 |
+
std::function<IValue()> fn_;
|
1473 |
+
IValue value_;
|
1474 |
+
bool completed_{};
|
1475 |
+
};
|
1476 |
+
|
1477 |
+
// Input is a list of Futures with the same target type.
|
1478 |
+
// Output is a Future to the List of completed Futures.
|
1479 |
+
TORCH_API intrusive_ptr<ivalue::Future> collectAll(
|
1480 |
+
const c10::List<c10::intrusive_ptr<ivalue::Future>>& srcs);
|
1481 |
+
// Input is a List of Futures with the same target type.
|
1482 |
+
// Output is a Future that will be updated with a seen value.
|
1483 |
+
TORCH_API intrusive_ptr<ivalue::Future> collectAny(
|
1484 |
+
const c10::List<c10::intrusive_ptr<ivalue::Future>>& srcs);
|
1485 |
+
|
1486 |
+
// User-defined object.
|
1487 |
+
struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target {
|
1488 |
+
public:
|
1489 |
+
// In general, class types hold a shared_ptr to its owning CompilationUnit,
|
1490 |
+
// so that its type and methods do not get deallocated while the class exists.
|
1491 |
+
// However, the CompilationUnit holds ownership of the type's graphs, so
|
1492 |
+
// inserting a constant object into a Graph would create a reference cycle if
|
1493 |
+
// that constant object held a shared_ptr to its CU. For these objects we
|
1494 |
+
// instatiate them with non-owning references to its CU
|
1495 |
+
Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) {
|
1496 |
+
slots_.resize(numSlots);
|
1497 |
+
}
|
1498 |
+
|
1499 |
+
Object(StrongTypePtr type, size_t numSlots)
|
1500 |
+
: type_(WeakOrStrongTypePtr(std::move(type))) {
|
1501 |
+
slots_.resize(numSlots);
|
1502 |
+
}
|
1503 |
+
|
1504 |
+
static c10::intrusive_ptr<Object> create(
|
1505 |
+
WeakOrStrongTypePtr type,
|
1506 |
+
size_t numSlots) {
|
1507 |
+
return c10::make_intrusive<Object>(std::move(type), numSlots);
|
1508 |
+
}
|
1509 |
+
|
1510 |
+
static c10::intrusive_ptr<Object> create(
|
1511 |
+
StrongTypePtr type,
|
1512 |
+
size_t numSlots) {
|
1513 |
+
return c10::make_intrusive<Object>(std::move(type), numSlots);
|
1514 |
+
}
|
1515 |
+
|
1516 |
+
static c10::intrusive_ptr<Object> create(ClassTypePtr classType, size_t numSlots);
|
1517 |
+
|
1518 |
+
/**
|
1519 |
+
* Slot API.
|
1520 |
+
*
|
1521 |
+
* Attributes are stored as a simple vector so that lookups are fast at
|
1522 |
+
* runtime. A "slot" is just an index into that vector, which can be computed
|
1523 |
+
* statically if you have access to the class type. Use this API if you are
|
1524 |
+
* writing compiler stuff.
|
1525 |
+
*/
|
1526 |
+
void setSlot(size_t slot, IValue v) {
|
1527 |
+
if (slot >= slots_.size()) {
|
1528 |
+
// for module types, it is possible that the members of the class have
|
1529 |
+
// expanded after the object was created. In this case, we expand
|
1530 |
+
// the slots to the right size
|
1531 |
+
resizeObject(slot);
|
1532 |
+
}
|
1533 |
+
slots_[slot] = std::move(v);
|
1534 |
+
}
|
1535 |
+
|
1536 |
+
const IValue& getSlot(size_t slot) const {
|
1537 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(slot < slots_.size());
|
1538 |
+
// NOTE: This lookup is fairly hot, so we use unchecked access to the
|
1539 |
+
// vector. Errors should still be detectable with ASan.
|
1540 |
+
return slots_[slot];
|
1541 |
+
}
|
1542 |
+
|
1543 |
+
void unsafeRemoveSlot(size_t slot) {
|
1544 |
+
TORCH_CHECK(slot < slots_.size());
|
1545 |
+
slots_.erase(slots_.begin() + static_cast<std::ptrdiff_t>(slot));
|
1546 |
+
}
|
1547 |
+
|
1548 |
+
/**
|
1549 |
+
* Attribute API.
|
1550 |
+
*
|
1551 |
+
* Wrappers around the slot stuff so that users can access attributes
|
1552 |
+
* directly. Use this API if you are a user.
|
1553 |
+
*
|
1554 |
+
* Note: Unlike in Python, TorchScript must make a distinction between
|
1555 |
+
* attributes (which are IValues) and methods (which are Methods). If you
|
1556 |
+
* want a method, use `obj.type()->getMethod()`
|
1557 |
+
*/
|
1558 |
+
IValue getAttr(const std::string& name) const;
|
1559 |
+
void setAttr(const std::string& name, IValue v);
|
1560 |
+
// Remove attribute by name, caller is responsible for
|
1561 |
+
// the safety of this operation
|
1562 |
+
// We didn't remove the attribute in the type because the type
|
1563 |
+
// might be shared by multiple objects.
|
1564 |
+
// Therefore after removing attribute, the object is in an inconsistent
|
1565 |
+
// state where it has more attribute types in its Type than
|
1566 |
+
// the attribute slots it has, user needs to make sure the object
|
1567 |
+
// has consistent by removing the attribute in type as well
|
1568 |
+
void unsafeRemoveAttr(const std::string& name);
|
1569 |
+
|
1570 |
+
std::string name() const;
|
1571 |
+
|
1572 |
+
const std::vector<IValue>& slots() const {
|
1573 |
+
return slots_;
|
1574 |
+
}
|
1575 |
+
std::shared_ptr<ClassType> type() const;
|
1576 |
+
|
1577 |
+
std::shared_ptr<torch::jit::CompilationUnit> compilation_unit() {
|
1578 |
+
if (type_.holds_strong_ref()) {
|
1579 |
+
return type_.cu_.getStrongRefOrThrow();
|
1580 |
+
} else {
|
1581 |
+
auto weak_ptr = type_.cu_.getWeakRefOrThrow();
|
1582 |
+
return std::shared_ptr<torch::jit::CompilationUnit>(weak_ptr);
|
1583 |
+
}
|
1584 |
+
}
|
1585 |
+
|
1586 |
+
c10::intrusive_ptr<Object> copy_to_weak_compilation_ref() const;
|
1587 |
+
|
1588 |
+
void unsafe_make_weak_compilation_ref() {
|
1589 |
+
type_ = WeakOrStrongTypePtr(type_.asWeakTypePtr());
|
1590 |
+
}
|
1591 |
+
|
1592 |
+
c10::intrusive_ptr<Object> copy() const;
|
1593 |
+
|
1594 |
+
c10::intrusive_ptr<Object> deepcopy(
|
1595 |
+
c10::optional<at::Device> device = c10::nullopt) const;
|
1596 |
+
|
1597 |
+
c10::intrusive_ptr<Object> deepcopy(
|
1598 |
+
IValue::HashAliasedIValueMap& memo,
|
1599 |
+
c10::optional<at::Device> device = c10::nullopt) const;
|
1600 |
+
|
1601 |
+
bool is_weak_compilation_ref() const {
|
1602 |
+
return !type_.holds_strong_ref();
|
1603 |
+
}
|
1604 |
+
|
1605 |
+
bool is_empty_strong_compilation_ref() const {
|
1606 |
+
return type_.holds_empty_strong_ref();
|
1607 |
+
}
|
1608 |
+
|
1609 |
+
private:
|
1610 |
+
void resizeObject(size_t slot);
|
1611 |
+
WeakOrStrongTypePtr type_;
|
1612 |
+
std::vector<IValue> slots_;
|
1613 |
+
};
|
1614 |
+
|
1615 |
+
// virtual ivalue PyObjectHolder that hold a py::object, we make this virtual
|
1616 |
+
// because the py::object and refcounting logic should happen in libtorch_python
|
1617 |
+
// see concrete implementation in python_ivalue.h
|
1618 |
+
struct ivalue::PyObjectHolder : c10::intrusive_ptr_target {
|
1619 |
+
public:
|
1620 |
+
virtual PyObject* getPyObject() = 0;
|
1621 |
+
virtual c10::InferredType tryToInferType() = 0;
|
1622 |
+
virtual IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt) = 0;
|
1623 |
+
virtual std::string toStr() = 0;
|
1624 |
+
virtual std::vector<at::Tensor> extractTensors() = 0;
|
1625 |
+
|
1626 |
+
~PyObjectHolder() override = default;
|
1627 |
+
};
|
1628 |
+
|
1629 |
+
struct ivalue::EnumHolder : c10::intrusive_ptr_target {
|
1630 |
+
public:
|
1631 |
+
EnumHolder(std::shared_ptr<EnumType> type, std::string name, IValue value)
|
1632 |
+
: type_(std::move(type)),
|
1633 |
+
name_(std::move(name)),
|
1634 |
+
value_(std::move(value)) {}
|
1635 |
+
|
1636 |
+
bool is(const ivalue::EnumHolder& rhs) {
|
1637 |
+
return *this == rhs;
|
1638 |
+
}
|
1639 |
+
|
1640 |
+
friend bool operator==(
|
1641 |
+
const ivalue::EnumHolder& lhs,
|
1642 |
+
const ivalue::EnumHolder& rhs);
|
1643 |
+
|
1644 |
+
TORCH_API friend std::ostream& operator<<(
|
1645 |
+
std::ostream& out,
|
1646 |
+
const ivalue::EnumHolder& v);
|
1647 |
+
|
1648 |
+
TORCH_API const std::string& qualifiedClassName() const;
|
1649 |
+
|
1650 |
+
const std::string& unqualifiedClassName() const;
|
1651 |
+
|
1652 |
+
const std::string& name() const {
|
1653 |
+
return name_;
|
1654 |
+
}
|
1655 |
+
|
1656 |
+
const IValue& value() const {
|
1657 |
+
return value_;
|
1658 |
+
}
|
1659 |
+
|
1660 |
+
std::shared_ptr<EnumType> type() const {
|
1661 |
+
return type_;
|
1662 |
+
}
|
1663 |
+
|
1664 |
+
private:
|
1665 |
+
std::shared_ptr<EnumType> type_;
|
1666 |
+
std::string name_;
|
1667 |
+
IValue value_;
|
1668 |
+
};
|
1669 |
+
|
1670 |
+
#undef TORCH_FORALL_TAGS
|
1671 |
+
|
1672 |
+
namespace detail {
|
1673 |
+
|
1674 |
+
struct _guarded_unsigned_long_unique_dummy final {
|
1675 |
+
_guarded_unsigned_long_unique_dummy(int64_t){};
|
1676 |
+
};
|
1677 |
+
using _guarded_unsigned_long = std::conditional_t<
|
1678 |
+
std::is_same<unsigned long, uint32_t>::value ||
|
1679 |
+
std::is_same<unsigned long, uint64_t>::value,
|
1680 |
+
_guarded_unsigned_long_unique_dummy,
|
1681 |
+
unsigned long>;
|
1682 |
+
|
1683 |
+
} // namespace detail
|
1684 |
+
|
1685 |
+
inline ivalue::Object& IValue::toObjectRef() const {
|
1686 |
+
AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
|
1687 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), "Attempted to create null reference");
|
1688 |
+
return *static_cast<c10::ivalue::Object*>(payload.u.as_intrusive_ptr);
|
1689 |
+
}
|
1690 |
+
|
1691 |
+
// note: when adding a DEFINE_TO case here you should also add a
|
1692 |
+
// toX method to IValue. These named methods are much more discoverable
|
1693 |
+
// than the to templated function.
|
1694 |
+
|
1695 |
+
#define DEFINE_TO(T, method_name) \
|
1696 |
+
template <> \
|
1697 |
+
inline T IValue::to<T>()&& { \
|
1698 |
+
return static_cast<T>(std::move(*this).method_name()); \
|
1699 |
+
} \
|
1700 |
+
template <> \
|
1701 |
+
inline c10::detail::ivalue_to_const_ref_overload_return<T>::type IValue::to<T>() const& { \
|
1702 |
+
typedef c10::detail::ivalue_to_const_ref_overload_return<T>::type return_type; \
|
1703 |
+
return static_cast<return_type>(this->method_name()); \
|
1704 |
+
}
|
1705 |
+
|
1706 |
+
DEFINE_TO(at::Tensor, toTensor)
|
1707 |
+
DEFINE_TO(at::Storage, toStorage)
|
1708 |
+
DEFINE_TO(c10::Stream, toStream)
|
1709 |
+
DEFINE_TO(float, toDouble)
|
1710 |
+
DEFINE_TO(double, toDouble)
|
1711 |
+
DEFINE_TO(c10::complex<double>, toComplexDouble)
|
1712 |
+
DEFINE_TO(unsigned char, toInt)
|
1713 |
+
DEFINE_TO(signed char, toInt)
|
1714 |
+
DEFINE_TO(unsigned short, toInt)
|
1715 |
+
DEFINE_TO(short, toInt)
|
1716 |
+
DEFINE_TO(int, toInt)
|
1717 |
+
DEFINE_TO(uint32_t, toInt)
|
1718 |
+
DEFINE_TO(uint64_t, toInt)
|
1719 |
+
DEFINE_TO(detail::_guarded_unsigned_long, toInt)
|
1720 |
+
DEFINE_TO(int64_t, toInt)
|
1721 |
+
DEFINE_TO(bool, toBool)
|
1722 |
+
DEFINE_TO(c10::intrusive_ptr<caffe2::Blob>, toBlob);
|
1723 |
+
DEFINE_TO(c10::intrusive_ptr<ivalue::ConstantString>, toString)
|
1724 |
+
DEFINE_TO(c10::intrusive_ptr<ivalue::Object>, toObject)
|
1725 |
+
DEFINE_TO(at::Scalar, toScalar)
|
1726 |
+
DEFINE_TO(c10::List<int64_t>, toIntList)
|
1727 |
+
DEFINE_TO(c10::List<double>, toDoubleList)
|
1728 |
+
DEFINE_TO(c10::List<c10::complex<double>>, toComplexDoubleList)
|
1729 |
+
DEFINE_TO(c10::List<bool>, toBoolList)
|
1730 |
+
DEFINE_TO(c10::List<at::Tensor>, toTensorList)
|
1731 |
+
DEFINE_TO(c10::impl::GenericList, toList)
|
1732 |
+
DEFINE_TO(c10::impl::GenericDict, toGenericDict)
|
1733 |
+
DEFINE_TO(c10::intrusive_ptr<ivalue::Tuple>, toTuple)
|
1734 |
+
DEFINE_TO(std::string, toStringRef)
|
1735 |
+
DEFINE_TO(c10::string_view, toStringView)
|
1736 |
+
DEFINE_TO(c10::intrusive_ptr<ivalue::Future>, toFuture)
|
1737 |
+
DEFINE_TO(c10::intrusive_ptr<ivalue::Await>, toAwait)
|
1738 |
+
DEFINE_TO(c10::intrusive_ptr<c10::RRefInterface>, toRRef)
|
1739 |
+
DEFINE_TO(c10::intrusive_ptr<at::Quantizer>, toQuantizer)
|
1740 |
+
DEFINE_TO(IValue, toIValue)
|
1741 |
+
DEFINE_TO(c10::Device, toDevice)
|
1742 |
+
DEFINE_TO(at::ScalarType, toScalarType)
|
1743 |
+
DEFINE_TO(at::Layout, toLayout)
|
1744 |
+
DEFINE_TO(at::MemoryFormat, toMemoryFormat)
|
1745 |
+
DEFINE_TO(at::QScheme, toQScheme)
|
1746 |
+
DEFINE_TO(at::Dimname, toDimname)
|
1747 |
+
DEFINE_TO(at::Generator, toGenerator)
|
1748 |
+
DEFINE_TO(c10::SymInt, toSymInt)
|
1749 |
+
DEFINE_TO(c10::SymFloat, toSymFloat)
|
1750 |
+
DEFINE_TO(c10::SymBool, toSymBool)
|
1751 |
+
|
1752 |
+
template <class T>
|
1753 |
+
struct _fake_type {};
|
1754 |
+
|
1755 |
+
// generic_to<T> converts an IValue from a generic list or generic dict
|
1756 |
+
// to a concrete list/dict type likelike List<T>, Dict<...> or optional<T>.
|
1757 |
+
// Note that in the case of lists, this only works for IValue-based lists,
|
1758 |
+
// i.e. not for int64_t, double, ...
|
1759 |
+
// generic_to<T> is an implementation detail of IValue::to<T> and not
|
1760 |
+
// supposed to be called directly.
|
1761 |
+
// The _fake_type<T> parameter allows us to overload
|
1762 |
+
// based on the return type.
|
1763 |
+
template <class Elem>
|
1764 |
+
// TODO this is deprecated but we don't throw a warning because a lot of ops in
|
1765 |
+
// native_functions.yaml still return std::vector.
|
1766 |
+
// C10_DEPRECATED_MESSAGE("IValues based on std::vector<T> are potentially slow
|
1767 |
+
// and deprecated. Please use torch::List<T> instead.")
|
1768 |
+
std::vector<Elem> generic_to(IValue ivalue, _fake_type<std::vector<Elem>>) {
|
1769 |
+
// We need to do a deep copy of the vector because there might be other
|
1770 |
+
// references to this same IValue that also use the list. We can't just
|
1771 |
+
// move the elements out.
|
1772 |
+
auto list = std::move(ivalue).to<List<Elem>>();
|
1773 |
+
std::vector<Elem> result;
|
1774 |
+
result.reserve(list.size());
|
1775 |
+
for (Elem v : list) {
|
1776 |
+
result.push_back(std::move(v));
|
1777 |
+
}
|
1778 |
+
return result;
|
1779 |
+
}
|
1780 |
+
|
1781 |
+
template <typename T>
|
1782 |
+
c10::intrusive_ptr<T> IValue::toCustomClass() && {
|
1783 |
+
static_assert(
|
1784 |
+
std::is_base_of<torch::CustomClassHolder, T>::value == true,
|
1785 |
+
"toCustomClass requires that template parameter T must inherit "
|
1786 |
+
"from torch::CustomClassHolder");
|
1787 |
+
auto obj = toObject();
|
1788 |
+
TORCH_CHECK(
|
1789 |
+
obj->slots().size() == 1,
|
1790 |
+
"Tried to cast IValue to custom class but it did "
|
1791 |
+
"not contain a custom class!");
|
1792 |
+
const auto* expected_type = c10::getCustomClassType<c10::intrusive_ptr<T>>().get();
|
1793 |
+
ivalue::checkCustomClassType(expected_type, type().get());
|
1794 |
+
auto userObj =
|
1795 |
+
c10::static_intrusive_pointer_cast<T>(obj->getSlot(0).toCapsule());
|
1796 |
+
return userObj;
|
1797 |
+
}
|
1798 |
+
|
1799 |
+
template <typename T>
|
1800 |
+
c10::intrusive_ptr<T> IValue::toCustomClass() const& {
|
1801 |
+
static_assert(
|
1802 |
+
std::is_base_of<torch::CustomClassHolder, T>::value == true,
|
1803 |
+
"toCustomClass requires that template parameter T must inherit "
|
1804 |
+
"from torch::CustomClassHolder");
|
1805 |
+
auto obj = toObject();
|
1806 |
+
TORCH_CHECK(
|
1807 |
+
obj->slots().size() == 1,
|
1808 |
+
"Tried to cast IValue to custom class but it did "
|
1809 |
+
"not contain a custom class!");
|
1810 |
+
const auto* expected_type = c10::getCustomClassType<c10::intrusive_ptr<T>>().get();
|
1811 |
+
ivalue::checkCustomClassType(expected_type, type().get());
|
1812 |
+
auto userObj =
|
1813 |
+
c10::static_intrusive_pointer_cast<T>(obj->getSlot(0).toCapsule());
|
1814 |
+
return userObj;
|
1815 |
+
}
|
1816 |
+
|
1817 |
+
template <typename T>
|
1818 |
+
T generic_to(IValue ivalue, _fake_type<T>) {
|
1819 |
+
using ElemType = typename std::remove_pointer<T>::type::element_type;
|
1820 |
+
return std::move(ivalue).toCustomClass<ElemType>();
|
1821 |
+
}
|
1822 |
+
|
1823 |
+
template <typename T>
|
1824 |
+
tagged_capsule<T> generic_to(IValue ivalue, _fake_type<tagged_capsule<T>>) {
|
1825 |
+
return tagged_capsule<T>{std::move(ivalue)};
|
1826 |
+
}
|
1827 |
+
|
1828 |
+
template <typename Elem>
|
1829 |
+
c10::List<Elem> generic_to(IValue ivalue, _fake_type<c10::List<Elem>>) {
|
1830 |
+
return impl::toTypedList<Elem>(std::move(ivalue).toList());
|
1831 |
+
}
|
1832 |
+
|
1833 |
+
template <typename T>
|
1834 |
+
static T createVectorLikeFromList(const c10::detail::ListImpl* impl) {
|
1835 |
+
T result;
|
1836 |
+
result.reserve(impl->list.size());
|
1837 |
+
for (const auto & i : impl->list) {
|
1838 |
+
result.push_back(i.to<typename T::value_type>());
|
1839 |
+
}
|
1840 |
+
return result;
|
1841 |
+
}
|
1842 |
+
|
1843 |
+
template <typename T>
|
1844 |
+
static std::vector<T> createVectorFromList(const c10::detail::ListImpl* impl) {
|
1845 |
+
return createVectorLikeFromList<std::vector<T>>(impl);
|
1846 |
+
}
|
1847 |
+
|
1848 |
+
template <typename T>
|
1849 |
+
std::vector<T> createVectorFromList(const c10::List<T>& impl) {
|
1850 |
+
std::vector<T> result;
|
1851 |
+
result.reserve(impl.size());
|
1852 |
+
for (size_t i = 0, N = impl.size(); i < N; ++i) {
|
1853 |
+
result.push_back(impl[i]);
|
1854 |
+
}
|
1855 |
+
return result;
|
1856 |
+
}
|
1857 |
+
|
1858 |
+
template <typename T>
|
1859 |
+
OptionalArray<T> generic_to(IValue ivalue, _fake_type<OptionalArray<T>>) {
|
1860 |
+
if (ivalue.isNone()) {
|
1861 |
+
return {};
|
1862 |
+
}
|
1863 |
+
return createVectorFromList<T>(
|
1864 |
+
std::move(ivalue).to<c10::List<T>>()
|
1865 |
+
);
|
1866 |
+
}
|
1867 |
+
|
1868 |
+
namespace detail {
|
1869 |
+
template <typename Elem, size_t... I>
|
1870 |
+
std::array<Elem, sizeof...(I)> generic_to_array(
|
1871 |
+
IValue ivalue,
|
1872 |
+
_fake_type<std::array<Elem, sizeof...(I)>>,
|
1873 |
+
std::index_sequence<I...>) {
|
1874 |
+
// We need to do a deep copy of the array because there might be other
|
1875 |
+
// references to this same IValue that also use the list. We can't just
|
1876 |
+
// move the elements out.
|
1877 |
+
auto list = std::move(ivalue).to<List<Elem>>();
|
1878 |
+
TORCH_CHECK(
|
1879 |
+
list.size() == sizeof...(I),
|
1880 |
+
"Tried to convert a List with ",
|
1881 |
+
list.size(),
|
1882 |
+
" elements to a fixed-size array of size ",
|
1883 |
+
sizeof...(I));
|
1884 |
+
return {list[I]...};
|
1885 |
+
}
|
1886 |
+
} // namespace detail
|
1887 |
+
|
1888 |
+
template <typename Elem, size_t N>
|
1889 |
+
std::array<Elem, N> generic_to(
|
1890 |
+
IValue ivalue,
|
1891 |
+
_fake_type<std::array<Elem, N>> ft) {
|
1892 |
+
return detail::generic_to_array(ivalue, ft, std::make_index_sequence<N>());
|
1893 |
+
}
|
1894 |
+
|
1895 |
+
template <typename Key, typename Value>
|
1896 |
+
c10::Dict<Key, Value> generic_to(
|
1897 |
+
IValue ivalue,
|
1898 |
+
_fake_type<c10::Dict<Key, Value>>) {
|
1899 |
+
return impl::toTypedDict<Key, Value>(std::move(ivalue).toGenericDict());
|
1900 |
+
}
|
1901 |
+
|
1902 |
+
template <typename K, typename V>
|
1903 |
+
C10_DEPRECATED_MESSAGE(
|
1904 |
+
"IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
1905 |
+
std::unordered_map<K, V> generic_to(
|
1906 |
+
IValue ivalue,
|
1907 |
+
_fake_type<std::unordered_map<K, V>>) {
|
1908 |
+
std::unordered_map<K, V> specialized_dict;
|
1909 |
+
|
1910 |
+
for (const auto& item : std::move(ivalue).toGenericDict()) {
|
1911 |
+
specialized_dict[item.key().template to<K>()] = item.value().template to<V>();
|
1912 |
+
}
|
1913 |
+
|
1914 |
+
return specialized_dict;
|
1915 |
+
}
|
1916 |
+
|
1917 |
+
template <typename T>
|
1918 |
+
c10::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
|
1919 |
+
if (ivalue.isNone()) {
|
1920 |
+
return c10::nullopt;
|
1921 |
+
}
|
1922 |
+
return std::move(ivalue).to<T>();
|
1923 |
+
}
|
1924 |
+
|
1925 |
+
namespace detail {
|
1926 |
+
template <typename Tuple, std::size_t... INDEX>
|
1927 |
+
Tuple generic_to_tuple_impl(
|
1928 |
+
const ivalue::TupleElements& t,
|
1929 |
+
std::index_sequence<INDEX...>) {
|
1930 |
+
return std::make_tuple(
|
1931 |
+
t[INDEX].to<typename std::tuple_element<INDEX, Tuple>::type>()...);
|
1932 |
+
}
|
1933 |
+
} // namespace detail
|
1934 |
+
|
1935 |
+
template <
|
1936 |
+
typename... Args,
|
1937 |
+
typename Indices = std::make_index_sequence<sizeof...(Args)>,
|
1938 |
+
std::enable_if_t<
|
1939 |
+
!std::disjunction<
|
1940 |
+
std::is_lvalue_reference<Args>...,
|
1941 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
1942 |
+
std::nullptr_t> = nullptr>
|
1943 |
+
std::tuple<Args...> generic_to(const IValue& ivalue, _fake_type<std::tuple<Args...>>) {
|
1944 |
+
const auto& vals = ivalue.toTupleRef().elements();
|
1945 |
+
TORCH_CHECK(vals.size() == sizeof...(Args));
|
1946 |
+
return detail::generic_to_tuple_impl<std::tuple<Args...>>(vals, Indices{});
|
1947 |
+
}
|
1948 |
+
|
1949 |
+
template <typename T>
|
1950 |
+
inline T IValue::to() && {
|
1951 |
+
return generic_to(std::move(*this), _fake_type<T>{});
|
1952 |
+
}
|
1953 |
+
|
1954 |
+
template <>
|
1955 |
+
inline c10::optional<c10::string_view> IValue::to() && {
|
1956 |
+
// In the default implementation, the IValue is destroyed with std::move.
|
1957 |
+
// But if the unboxed type is optional<string_view> we cannot destroy
|
1958 |
+
// the IValue.
|
1959 |
+
return generic_to(*this, _fake_type<c10::optional<c10::string_view>>{});
|
1960 |
+
}
|
1961 |
+
|
1962 |
+
template <typename T>
|
1963 |
+
inline typename c10::detail::ivalue_to_const_ref_overload_return<T>::type IValue::to() const& {
|
1964 |
+
return generic_to(*this, _fake_type<T>{});
|
1965 |
+
}
|
1966 |
+
|
1967 |
+
inline c10::List<int64_t> IValue::toIntList() && {
|
1968 |
+
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
1969 |
+
return c10::List<int64_t>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
1970 |
+
}
|
1971 |
+
inline c10::List<int64_t> IValue::toIntList() const& {
|
1972 |
+
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
1973 |
+
return c10::List<int64_t>(toIntrusivePtr<c10::detail::ListImpl>());
|
1974 |
+
}
|
1975 |
+
inline std::vector<int64_t> IValue::toIntVector() const {
|
1976 |
+
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
1977 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
1978 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
1979 |
+
"called toIntVector on null intrusive_ptr IValue");
|
1980 |
+
return createVectorFromList<int64_t>(
|
1981 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
1982 |
+
}
|
1983 |
+
inline std::vector<c10::SymInt> IValue::toSymIntVector() const {
|
1984 |
+
AT_ASSERT(isSymIntList() || isIntList(), "Expected SymIntList or IntList but got ", tagKind());
|
1985 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
1986 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
1987 |
+
"called toSymIntVector on null intrusive_ptr IValue");
|
1988 |
+
return createVectorFromList<c10::SymInt>(
|
1989 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
1990 |
+
}
|
1991 |
+
inline at::DimVector IValue::toDimVector() const {
|
1992 |
+
AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
|
1993 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
1994 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
1995 |
+
"called toDimVector on null intrusive_ptr IValue");
|
1996 |
+
return createVectorLikeFromList<at::DimVector>(
|
1997 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
1998 |
+
}
|
1999 |
+
inline c10::List<double> IValue::toDoubleList() && {
|
2000 |
+
AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
|
2001 |
+
return c10::List<double>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2002 |
+
}
|
2003 |
+
inline c10::List<double> IValue::toDoubleList() const& {
|
2004 |
+
AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
|
2005 |
+
return c10::List<double>(toIntrusivePtr<c10::detail::ListImpl>());
|
2006 |
+
}
|
2007 |
+
inline std::vector<double> IValue::toDoubleVector() const {
|
2008 |
+
AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
|
2009 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2010 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2011 |
+
"called toDoubleVector on null intrusive_ptr IValue");
|
2012 |
+
return createVectorFromList<double>(
|
2013 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
2014 |
+
}
|
2015 |
+
inline c10::List<c10::complex<double>> IValue::toComplexDoubleList() && {
|
2016 |
+
AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
|
2017 |
+
return c10::List<c10::complex<double>>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2018 |
+
}
|
2019 |
+
inline c10::List<c10::complex<double>> IValue::toComplexDoubleList() const& {
|
2020 |
+
AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
|
2021 |
+
return c10::List<c10::complex<double>>(toIntrusivePtr<c10::detail::ListImpl>());
|
2022 |
+
}
|
2023 |
+
inline std::vector<c10::complex<double>> IValue::toComplexDoubleVector() const {
|
2024 |
+
AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
|
2025 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2026 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2027 |
+
"called toComplexDoubleVector on null intrusive_ptr IValue");
|
2028 |
+
return createVectorFromList<c10::complex<double>>(
|
2029 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
2030 |
+
}
|
2031 |
+
inline c10::List<bool> IValue::toBoolList() && {
|
2032 |
+
AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind());
|
2033 |
+
return c10::List<bool>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2034 |
+
}
|
2035 |
+
inline c10::List<bool> IValue::toBoolList() const& {
|
2036 |
+
AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind());
|
2037 |
+
return c10::List<bool>(toIntrusivePtr<c10::detail::ListImpl>());
|
2038 |
+
}
|
2039 |
+
inline c10::List<at::Tensor> IValue::toTensorList() && {
|
2040 |
+
AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
|
2041 |
+
return c10::List<at::Tensor>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2042 |
+
}
|
2043 |
+
inline c10::List<at::Tensor> IValue::toTensorList() const& {
|
2044 |
+
AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
|
2045 |
+
return c10::List<at::Tensor>(toIntrusivePtr<c10::detail::ListImpl>());
|
2046 |
+
}
|
2047 |
+
inline std::vector<at::Tensor> IValue::toTensorVector() const {
|
2048 |
+
AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
|
2049 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2050 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2051 |
+
"called toTensorVector on null intrusive_ptr IValue");
|
2052 |
+
return createVectorFromList<at::Tensor>(
|
2053 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
2054 |
+
}
|
2055 |
+
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() && {
|
2056 |
+
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
2057 |
+
return c10::List<c10::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2058 |
+
}
|
2059 |
+
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() const& {
|
2060 |
+
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
2061 |
+
return c10::List<c10::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>());
|
2062 |
+
}
|
2063 |
+
inline std::vector<c10::optional<at::Tensor>> IValue::toOptionalTensorVector() const {
|
2064 |
+
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
2065 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2066 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2067 |
+
"called toOptionalTensorVector on null intrusive_ptr IValue");
|
2068 |
+
return createVectorFromList<c10::optional<at::Tensor>>(
|
2069 |
+
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
2070 |
+
}
|
2071 |
+
inline c10::List<IValue> IValue::toList() && {
|
2072 |
+
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
2073 |
+
return c10::List<IValue>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
2074 |
+
}
|
2075 |
+
inline c10::List<IValue> IValue::toList() const& {
|
2076 |
+
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
2077 |
+
return c10::List<IValue>(toIntrusivePtr<c10::detail::ListImpl>());
|
2078 |
+
}
|
2079 |
+
inline c10::ArrayRef<IValue> IValue::toListRef() const {
|
2080 |
+
AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
|
2081 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2082 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2083 |
+
"called toListRef on null intrusive_ptr IValue");
|
2084 |
+
return static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr)
|
2085 |
+
->list;
|
2086 |
+
}
|
2087 |
+
inline c10::Dict<IValue, IValue> IValue::toGenericDict() && {
|
2088 |
+
AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind());
|
2089 |
+
return c10::Dict<IValue, IValue>(moveToIntrusivePtr<c10::detail::DictImpl>());
|
2090 |
+
}
|
2091 |
+
inline c10::Dict<IValue, IValue> IValue::toGenericDict() const& {
|
2092 |
+
AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind());
|
2093 |
+
return c10::Dict<IValue, IValue>(toIntrusivePtr<c10::detail::DictImpl>());
|
2094 |
+
}
|
2095 |
+
inline c10::intrusive_ptr<ivalue::Tuple> IValue::toTuple() && {
|
2096 |
+
AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
|
2097 |
+
return moveToIntrusivePtr<ivalue::Tuple>();
|
2098 |
+
}
|
2099 |
+
inline c10::intrusive_ptr<ivalue::Tuple> IValue::toTuple() const& {
|
2100 |
+
AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
|
2101 |
+
return toIntrusivePtr<ivalue::Tuple>();
|
2102 |
+
}
|
2103 |
+
inline ivalue::Tuple& IValue::toTupleRef() const {
|
2104 |
+
AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
|
2105 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2106 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2107 |
+
"called toTupleRef on null intrusive_ptr IValue");
|
2108 |
+
return *static_cast<c10::ivalue::Tuple*>(
|
2109 |
+
payload.u.as_intrusive_ptr);
|
2110 |
+
}
|
2111 |
+
|
2112 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::Tuple> v)
|
2113 |
+
: tag(Tag::Tuple) {
|
2114 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2115 |
+
}
|
2116 |
+
template <
|
2117 |
+
typename... Args,
|
2118 |
+
std::enable_if_t<
|
2119 |
+
!std::disjunction<
|
2120 |
+
std::is_lvalue_reference<Args>...,
|
2121 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
2122 |
+
std::nullptr_t>>
|
2123 |
+
inline IValue::IValue(const std::tuple<Args...>& t)
|
2124 |
+
: IValue(c10::guts::apply(c10::ivalue::Tuple::create<const Args&...>, t)) {
|
2125 |
+
}
|
2126 |
+
|
2127 |
+
template <
|
2128 |
+
typename... Args,
|
2129 |
+
std::enable_if_t<
|
2130 |
+
!std::disjunction<
|
2131 |
+
std::is_lvalue_reference<Args>...,
|
2132 |
+
std::negation<std::is_constructible<IValue, Args>>...>::value,
|
2133 |
+
std::nullptr_t>>
|
2134 |
+
inline IValue::IValue(std::tuple<Args...>&& t)
|
2135 |
+
: IValue(c10::guts::apply(c10::ivalue::Tuple::create<Args&&...>, std::move(t))) {
|
2136 |
+
}
|
2137 |
+
|
2138 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::ConstantString> v)
|
2139 |
+
: tag(Tag::String) {
|
2140 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2141 |
+
}
|
2142 |
+
inline IValue::IValue(std::string v)
|
2143 |
+
: IValue(ivalue::ConstantString::create(std::move(v))) {}
|
2144 |
+
|
2145 |
+
inline IValue::IValue(c10::impl::GenericList v)
|
2146 |
+
: tag(Tag::GenericList) {
|
2147 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release());
|
2148 |
+
}
|
2149 |
+
|
2150 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2151 |
+
inline IValue::IValue(c10::List<T>&& v) : IValue(impl::toList<T>(std::move(v))) {}
|
2152 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2153 |
+
inline IValue::IValue(const c10::List<T>& v) : IValue(impl::toList<T>(v)) {}
|
2154 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2155 |
+
inline IValue::IValue(at::ArrayRef<T> v) : IValue(c10::List<T>()) {
|
2156 |
+
auto list = to<c10::List<T>>();
|
2157 |
+
list.reserve(v.size());
|
2158 |
+
for (const auto& e : v) {
|
2159 |
+
list.push_back(e);
|
2160 |
+
}
|
2161 |
+
}
|
2162 |
+
template <class T, IValue::enable_if_symint<T>>
|
2163 |
+
inline IValue::IValue(at::ArrayRef<T> v) : IValue() {
|
2164 |
+
auto vi = c10::asIntArrayRefSlowOpt(v);
|
2165 |
+
if (vi.has_value()) {
|
2166 |
+
// This list is entirely integers; ensure it is typed as
|
2167 |
+
// an IntList so toIntList works
|
2168 |
+
*this = IValue(*vi);
|
2169 |
+
} else {
|
2170 |
+
// This list has SymInts; type it as a SymInt
|
2171 |
+
*this = IValue(impl::toList<c10::SymInt>(c10::List<c10::SymInt>()));
|
2172 |
+
auto list = to<c10::List<c10::SymInt>>();
|
2173 |
+
list.reserve(v.size());
|
2174 |
+
for (const auto& e : v) {
|
2175 |
+
list.push_back(e);
|
2176 |
+
}
|
2177 |
+
}
|
2178 |
+
}
|
2179 |
+
template <class T, IValue::enable_if_symint<T>>
|
2180 |
+
inline IValue::IValue(at::OptionalArrayRef<T> mb_v) : IValue() {
|
2181 |
+
if (!mb_v.has_value()) return;
|
2182 |
+
*this = IValue(*mb_v);
|
2183 |
+
}
|
2184 |
+
template <class T, IValue::enable_if_symint<T>>
|
2185 |
+
inline IValue::IValue(const std::vector<T>& v) : IValue() {
|
2186 |
+
*this = IValue(at::ArrayRef<T>(v));
|
2187 |
+
}
|
2188 |
+
template <class T, IValue::enable_if_symint<T>>
|
2189 |
+
inline IValue::IValue(std::vector<T>&& v) : IValue() {
|
2190 |
+
auto vi = c10::asIntArrayRefSlowOpt(v);
|
2191 |
+
if (vi.has_value()) {
|
2192 |
+
// This list is entirely integers; ensure it is typed as
|
2193 |
+
// an IntList so toIntList works
|
2194 |
+
*this = IValue(*vi);
|
2195 |
+
} else {
|
2196 |
+
// This list has SymInts; type it as a SymInt
|
2197 |
+
*this = IValue(impl::toList<c10::SymInt>(c10::List<c10::SymInt>()));
|
2198 |
+
auto list = to<c10::List<c10::SymInt>>();
|
2199 |
+
list.reserve(v.size());
|
2200 |
+
for (auto& e : v) {
|
2201 |
+
list.push_back(std::move(e));
|
2202 |
+
}
|
2203 |
+
}
|
2204 |
+
}
|
2205 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2206 |
+
inline IValue::IValue(const std::vector<T>& v) : IValue(c10::List<T>()) {
|
2207 |
+
auto list = to<c10::List<T>>();
|
2208 |
+
list.reserve(v.size());
|
2209 |
+
for (const auto& e : v) {
|
2210 |
+
list.push_back(e);
|
2211 |
+
}
|
2212 |
+
}
|
2213 |
+
|
2214 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2215 |
+
inline IValue::IValue(std::vector<T>&& v) : IValue(c10::List<T>()) {
|
2216 |
+
auto list = to<c10::List<T>>();
|
2217 |
+
list.reserve(v.size());
|
2218 |
+
if constexpr (std::is_same_v<T, bool>) {
|
2219 |
+
for (auto e : v) {
|
2220 |
+
list.push_back(e);
|
2221 |
+
}
|
2222 |
+
} else {
|
2223 |
+
for (auto& e : v) {
|
2224 |
+
list.push_back(std::move(e));
|
2225 |
+
}
|
2226 |
+
}
|
2227 |
+
}
|
2228 |
+
|
2229 |
+
template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
|
2230 |
+
inline IValue::IValue(c10::OptionalArrayRef<T> v) : IValue() {
|
2231 |
+
if (v.has_value()) {
|
2232 |
+
*this = IValue(std::move(*v));
|
2233 |
+
}
|
2234 |
+
}
|
2235 |
+
|
2236 |
+
template <class T, size_t N>
|
2237 |
+
inline IValue::IValue(std::array<T, N> v) : IValue(c10::List<T>()) {
|
2238 |
+
auto list = to<c10::List<T>>();
|
2239 |
+
list.reserve(v.size());
|
2240 |
+
for (auto& e : v) {
|
2241 |
+
list.push_back(std::move(e));
|
2242 |
+
}
|
2243 |
+
}
|
2244 |
+
|
2245 |
+
template <class T, IValue::enable_if_ilist_is_ivalue_constructible<T>>
|
2246 |
+
inline IValue::IValue(c10::IListRef<T> v) : IValue() {
|
2247 |
+
constexpr bool boxed_type_constructs_ivalue =
|
2248 |
+
std::is_constructible<IValue, typename c10::IListRef<T>::boxed_type>::value;
|
2249 |
+
// First, we try to use the boxed value.
|
2250 |
+
// If we fail (either it's not in the boxed state, or its boxed type
|
2251 |
+
// can not construct an IValue), we fallback to copying the list.
|
2252 |
+
if (boxed_type_constructs_ivalue && v.isBoxed()) {
|
2253 |
+
*this = IValue(impl::toList(v.toBoxed()));
|
2254 |
+
} else {
|
2255 |
+
c10::List<T> list;
|
2256 |
+
list.reserve(v.size());
|
2257 |
+
for (const auto& t : v) {
|
2258 |
+
list.push_back(t);
|
2259 |
+
}
|
2260 |
+
*this = IValue(impl::toList(std::move(list)));
|
2261 |
+
}
|
2262 |
+
}
|
2263 |
+
|
2264 |
+
inline IValue::IValue(c10::impl::GenericDict v)
|
2265 |
+
: tag(Tag::GenericDict) {
|
2266 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release());
|
2267 |
+
}
|
2268 |
+
template <class Key, class Value>
|
2269 |
+
inline IValue::IValue(c10::Dict<Key, Value> v)
|
2270 |
+
: IValue(impl::toGenericDict(std::move(v))) {}
|
2271 |
+
|
2272 |
+
template <class Key, class Value>
|
2273 |
+
inline IValue::IValue(std::unordered_map<Key, Value> v)
|
2274 |
+
: IValue(Dict<Key, Value>()) {
|
2275 |
+
auto dict = to<c10::Dict<Key, Value>>();
|
2276 |
+
dict.reserve(v.size());
|
2277 |
+
for (auto& e : v) {
|
2278 |
+
dict.insert(std::move(e.first), std::move(e.second));
|
2279 |
+
}
|
2280 |
+
}
|
2281 |
+
|
2282 |
+
template <class T, IValue::enable_if_ivalue_constructible<T>>
|
2283 |
+
inline IValue::IValue(c10::optional<T> v) : IValue() {
|
2284 |
+
if (v.has_value()) {
|
2285 |
+
*this = IValue(std::move(*v));
|
2286 |
+
}
|
2287 |
+
}
|
2288 |
+
|
2289 |
+
inline IValue::IValue(c10::nullopt_t) : IValue() {}
|
2290 |
+
|
2291 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::Object> v)
|
2292 |
+
: tag(Tag::Object) {
|
2293 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2294 |
+
}
|
2295 |
+
|
2296 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v)
|
2297 |
+
: tag(Tag::PyObject) {
|
2298 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2299 |
+
}
|
2300 |
+
|
2301 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::EnumHolder> v)
|
2302 |
+
: tag(Tag::Enum) {
|
2303 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2304 |
+
}
|
2305 |
+
|
2306 |
+
inline IValue IValue::make_capsule(
|
2307 |
+
intrusive_ptr<torch::CustomClassHolder> blob) {
|
2308 |
+
IValue iv;
|
2309 |
+
iv.tag = Tag::Capsule;
|
2310 |
+
iv.payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
|
2311 |
+
return iv;
|
2312 |
+
}
|
2313 |
+
|
2314 |
+
template <
|
2315 |
+
typename T,
|
2316 |
+
std::enable_if_t<std::is_base_of<torch::CustomClassHolder, T>::value, int>>
|
2317 |
+
IValue::IValue(c10::intrusive_ptr<T> custom_class) : tag(Tag::Object) {
|
2318 |
+
auto classType = []() {
|
2319 |
+
try {
|
2320 |
+
return c10::getCustomClassType<c10::intrusive_ptr<T>>();
|
2321 |
+
} catch (const c10::Error&) {
|
2322 |
+
throw c10::Error(
|
2323 |
+
"Trying to instantiate a class that isn't a registered custom class: " +
|
2324 |
+
std::string(c10::util::get_fully_qualified_type_name<T>()),
|
2325 |
+
"");
|
2326 |
+
}
|
2327 |
+
}();
|
2328 |
+
auto ivalue_obj = c10::ivalue::Object::create(std::move(classType), /* numSlots */1);
|
2329 |
+
ivalue_obj->setSlot(0, IValue::make_capsule(std::move(custom_class)));
|
2330 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(ivalue_obj.release());
|
2331 |
+
|
2332 |
+
}
|
2333 |
+
|
2334 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::Future> v)
|
2335 |
+
: tag(Tag::Future) {
|
2336 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2337 |
+
}
|
2338 |
+
|
2339 |
+
inline IValue::IValue(c10::intrusive_ptr<ivalue::Await> v)
|
2340 |
+
: tag(Tag::Await) {
|
2341 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2342 |
+
}
|
2343 |
+
|
2344 |
+
inline IValue::IValue(c10::intrusive_ptr<c10::RRefInterface> v)
|
2345 |
+
: tag(Tag::RRef) {
|
2346 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2347 |
+
}
|
2348 |
+
|
2349 |
+
inline IValue::IValue(c10::intrusive_ptr<at::Quantizer> v)
|
2350 |
+
: tag(Tag::Quantizer) {
|
2351 |
+
payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
|
2352 |
+
}
|
2353 |
+
|
2354 |
+
template <typename T>
|
2355 |
+
inline IValue::IValue(c10::complex<T> c)
|
2356 |
+
: tag(Tag::ComplexDouble) {
|
2357 |
+
auto v = c10::make_intrusive<ivalue::ComplexHolder>(c);
|
2358 |
+
payload.u.as_intrusive_ptr = v.release();
|
2359 |
+
}
|
2360 |
+
|
2361 |
+
inline const std::string& IValue::toStringRef() const {
|
2362 |
+
AT_ASSERT(isString(), "Expected String but got ", tagKind());
|
2363 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2364 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2365 |
+
"called toStringRef on null intrusive_ptr IValue");
|
2366 |
+
return static_cast<const c10::ivalue::ConstantString*>(
|
2367 |
+
payload.u.as_intrusive_ptr)
|
2368 |
+
->string();
|
2369 |
+
}
|
2370 |
+
inline c10::optional<std::reference_wrapper<const std::string>> IValue::
|
2371 |
+
toOptionalStringRef() const {
|
2372 |
+
if (isNone()) {
|
2373 |
+
return c10::nullopt;
|
2374 |
+
}
|
2375 |
+
AT_ASSERT(isString(), "Expected optional<string> but got ", tagKind());
|
2376 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2377 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2378 |
+
"called toOptionalStringRef on null intrusive_ptr IValue");
|
2379 |
+
return std::reference_wrapper<const std::string>(
|
2380 |
+
static_cast<const c10::ivalue::ConstantString*>(payload.u.as_intrusive_ptr)
|
2381 |
+
->string());
|
2382 |
+
}
|
2383 |
+
|
2384 |
+
inline c10::string_view IValue::toStringView() const {
|
2385 |
+
AT_ASSERT(isString(), "Expected String but got ", tagKind());
|
2386 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
2387 |
+
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
2388 |
+
"called toStringView on null intrusive_ptr IValue");
|
2389 |
+
return static_cast<const c10::ivalue::ConstantString*>(
|
2390 |
+
payload.u.as_intrusive_ptr)
|
2391 |
+
->string_view();
|
2392 |
+
}
|
2393 |
+
|
2394 |
+
inline PyObject* IValue::toPyObject() const {
|
2395 |
+
return toPyObjectHolder()->getPyObject();
|
2396 |
+
}
|
2397 |
+
|
2398 |
+
template <typename T>
|
2399 |
+
inline optional<T> IValue::toOptional() {
|
2400 |
+
if (this->isNone()) {
|
2401 |
+
return nullopt;
|
2402 |
+
}
|
2403 |
+
return this->to<T>();
|
2404 |
+
}
|
2405 |
+
|
2406 |
+
template <typename T>
|
2407 |
+
inline optional<T> IValue::toOptional() const {
|
2408 |
+
if (this->isNone()) {
|
2409 |
+
return nullopt;
|
2410 |
+
}
|
2411 |
+
return this->to<T>();
|
2412 |
+
}
|
2413 |
+
|
2414 |
+
inline bool IValue::isCustomClass() const {
|
2415 |
+
return torch::isCustomClass(*this);
|
2416 |
+
}
|
2417 |
+
|
2418 |
+
inline bool IValue::isSameIdentity(const IValue& rhs) const {
|
2419 |
+
// We choose to not use memcmp for payload check due to potential random
|
2420 |
+
// padding characters on union type
|
2421 |
+
|
2422 |
+
// Semantics:
|
2423 |
+
// 1. Immutable primitive values of the same type (Int, Double, None, Bool,
|
2424 |
+
// Str) return value equality
|
2425 |
+
// 2. If it is a tensor type, we need to take undefined tensor into account
|
2426 |
+
// 3. Undefined_tensor is None and vice versa should be true
|
2427 |
+
// 4. If it is a reference type (i.e. isIntrusivePtr()), then is True when
|
2428 |
+
// the pointed-to object is the same.
|
2429 |
+
// 5. False for all other comparisons.
|
2430 |
+
if (this->isNone() && rhs.isNone()) {
|
2431 |
+
return true;
|
2432 |
+
} else if (this->isBool() && rhs.isBool()) {
|
2433 |
+
// for bool type, do equality check
|
2434 |
+
return this->toBool() == rhs.toBool();
|
2435 |
+
} else if (this->isTensor() && rhs.isTensor()) {
|
2436 |
+
return this->payload.as_tensor.is_same(rhs.payload.as_tensor);
|
2437 |
+
} else if (this->isTensor() && rhs.isNone()) {
|
2438 |
+
// special case: undefined tensor and None are the same identity
|
2439 |
+
return !this->payload.as_tensor.defined();
|
2440 |
+
} else if (this->isNone() && rhs.isTensor()) {
|
2441 |
+
// special case: undefined tensor and None are the same identity
|
2442 |
+
return !rhs.payload.as_tensor.defined();
|
2443 |
+
} else if (this->isInt() && rhs.isInt()) {
|
2444 |
+
return this->toInt() == rhs.toInt();
|
2445 |
+
} else if (this->isDouble() && rhs.isDouble()) {
|
2446 |
+
return this->toDouble() == rhs.toDouble();
|
2447 |
+
} else if (this->isString() && rhs.isString()) {
|
2448 |
+
return this->toStringRef() == rhs.toStringRef();
|
2449 |
+
} else {
|
2450 |
+
// for objects holding in IValue, do shallow compare on pointer address to
|
2451 |
+
// testify the identity
|
2452 |
+
return this->isIntrusivePtr() && rhs.isIntrusivePtr() &&
|
2453 |
+
this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
|
2454 |
+
}
|
2455 |
+
}
|
2456 |
+
|
2457 |
+
namespace ivalue {
|
2458 |
+
namespace detail {
|
2459 |
+
|
2460 |
+
template <typename T>
|
2461 |
+
IValue from_(T&& x, std::true_type) {
|
2462 |
+
return IValue(std::forward<T>(x));
|
2463 |
+
}
|
2464 |
+
template <typename T>
|
2465 |
+
IValue from_(c10::intrusive_ptr<T> x, std::false_type) {
|
2466 |
+
return IValue(std::move(x));
|
2467 |
+
}
|
2468 |
+
template <typename T>
|
2469 |
+
IValue from_(T&& /*x*/, std::false_type) {
|
2470 |
+
static_assert(
|
2471 |
+
guts::false_t<T>::value,
|
2472 |
+
"You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)");
|
2473 |
+
return IValue();
|
2474 |
+
}
|
2475 |
+
} // namespace detail
|
2476 |
+
|
2477 |
+
template <typename T>
|
2478 |
+
IValue from(T&& x) {
|
2479 |
+
return detail::from_(
|
2480 |
+
std::forward<T>(x), typename std::is_constructible<IValue, T>::type{});
|
2481 |
+
}
|
2482 |
+
|
2483 |
+
} // namespace ivalue
|
2484 |
+
|
2485 |
+
|
2486 |
+
template <>
|
2487 |
+
struct MaybeOwnedTraits<IValue> {
|
2488 |
+
using owned_type = IValue;
|
2489 |
+
using borrow_type = IValue;
|
2490 |
+
|
2491 |
+
static borrow_type createBorrow(const owned_type& from) {
|
2492 |
+
if (!from.isPtrType()) {
|
2493 |
+
return from;
|
2494 |
+
}
|
2495 |
+
if (from.isTensor()) {
|
2496 |
+
return IValue(MaybeOwnedTraits<at::Tensor>::createBorrow(from.toTensor()));
|
2497 |
+
} else {
|
2498 |
+
return IValue(from.payload, from.tag);
|
2499 |
+
}
|
2500 |
+
}
|
2501 |
+
|
2502 |
+
static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
|
2503 |
+
lhs.clearToNone();
|
2504 |
+
if (!rhs.isPtrType()) {
|
2505 |
+
lhs = rhs;
|
2506 |
+
} else if (rhs.isTensor()) {
|
2507 |
+
lhs = IValue(MaybeOwnedTraits<at::Tensor>::createBorrow(rhs.toTensor()));
|
2508 |
+
} else {
|
2509 |
+
lhs = IValue(rhs.payload, rhs.tag);
|
2510 |
+
}
|
2511 |
+
}
|
2512 |
+
|
2513 |
+
static void destroyBorrow(borrow_type& toDestroy) {
|
2514 |
+
toDestroy.clearToNone();
|
2515 |
+
}
|
2516 |
+
|
2517 |
+
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
|
2518 |
+
return borrow;
|
2519 |
+
}
|
2520 |
+
|
2521 |
+
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
|
2522 |
+
return &borrow;
|
2523 |
+
}
|
2524 |
+
|
2525 |
+
static bool debugBorrowIsValid(const borrow_type&) {
|
2526 |
+
return true;
|
2527 |
+
}
|
2528 |
+
};
|
2529 |
+
|
2530 |
+
template <>
|
2531 |
+
struct IValue::TagType<c10::Type> {
|
2532 |
+
static TORCH_API c10::TypePtr get(const IValue&);
|
2533 |
+
};
|
2534 |
+
|
2535 |
+
template <>
|
2536 |
+
struct IValue::TagType<c10::DynamicType> {
|
2537 |
+
static TORCH_API c10::TypePtr get(const IValue&);
|
2538 |
+
};
|
2539 |
+
|
2540 |
+
template <typename T>
|
2541 |
+
TypePtr IValue::type() const {
|
2542 |
+
return IValue::TagType<T>::get(*this);
|
2543 |
+
}
|
2544 |
+
|
2545 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <string>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
class Tensor;
|
7 |
+
} // namespace at
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
struct IValue;
|
11 |
+
namespace detail {
|
12 |
+
// Determine the return type of `IValue::to() const &`. It's a const
|
13 |
+
// reference when possible and a copy otherwise. It is in this
|
14 |
+
// separate header so that List can use it as well.
|
15 |
+
template<typename T>
|
16 |
+
struct ivalue_to_const_ref_overload_return {
|
17 |
+
using type = T;
|
18 |
+
};
|
19 |
+
|
20 |
+
template<>
|
21 |
+
struct ivalue_to_const_ref_overload_return<at::Tensor> {
|
22 |
+
using type = const at::Tensor&;
|
23 |
+
};
|
24 |
+
|
25 |
+
template<>
|
26 |
+
struct ivalue_to_const_ref_overload_return<std::string> {
|
27 |
+
using type = const std::string&;
|
28 |
+
};
|
29 |
+
|
30 |
+
template<>
|
31 |
+
struct ivalue_to_const_ref_overload_return<IValue> {
|
32 |
+
using type = const IValue&;
|
33 |
+
};
|
34 |
+
|
35 |
+
} // namespace detail
|
36 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/qualified_name.h
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/ArrayRef.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
#include <c10/util/StringUtil.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <string>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
|
11 |
+
// Represents a name of the form "foo.bar.baz"
|
12 |
+
struct QualifiedName {
|
13 |
+
QualifiedName() = default;
|
14 |
+
|
15 |
+
// `name` can be a dotted string, like "foo.bar.baz", or just a bare name.
|
16 |
+
/* implicit */ QualifiedName(const std::string& name) {
|
17 |
+
TORCH_CHECK(!name.empty());
|
18 |
+
// split the string into its atoms.
|
19 |
+
size_t startSearchFrom = 0;
|
20 |
+
size_t pos = name.find(delimiter_, startSearchFrom);
|
21 |
+
|
22 |
+
while (pos != std::string::npos) {
|
23 |
+
auto atom = name.substr(startSearchFrom, pos - startSearchFrom);
|
24 |
+
TORCH_INTERNAL_ASSERT(
|
25 |
+
!atom.empty(), "Invalid name for qualified name: '", name, "'");
|
26 |
+
atoms_.push_back(std::move(atom));
|
27 |
+
startSearchFrom = pos + 1;
|
28 |
+
pos = name.find(delimiter_, startSearchFrom);
|
29 |
+
}
|
30 |
+
|
31 |
+
auto finalAtom = name.substr(startSearchFrom);
|
32 |
+
TORCH_INTERNAL_ASSERT(
|
33 |
+
!finalAtom.empty(), "Invalid name for qualified name: '", name, "'");
|
34 |
+
atoms_.emplace_back(std::move(finalAtom));
|
35 |
+
|
36 |
+
cacheAccessors();
|
37 |
+
}
|
38 |
+
|
39 |
+
explicit QualifiedName(std::vector<std::string> atoms) : atoms_(std::move(atoms)) {
|
40 |
+
for (const auto& atom : atoms_) {
|
41 |
+
TORCH_CHECK(!atom.empty(), "Atom cannot be empty");
|
42 |
+
TORCH_CHECK(
|
43 |
+
atom.find(delimiter_) == std::string::npos,
|
44 |
+
"Delimiter not allowed in atom");
|
45 |
+
}
|
46 |
+
|
47 |
+
cacheAccessors();
|
48 |
+
}
|
49 |
+
// Unnecessary copy. Ideally we'd use something like std::string_view.
|
50 |
+
/* implicit */ QualifiedName(const char* name)
|
51 |
+
: QualifiedName(std::string(name)) {}
|
52 |
+
|
53 |
+
// `name` must be a bare name (no dots!)
|
54 |
+
explicit QualifiedName(const QualifiedName& prefix, std::string name) {
|
55 |
+
TORCH_INTERNAL_ASSERT(!name.empty());
|
56 |
+
TORCH_INTERNAL_ASSERT(name.find(delimiter_) == std::string::npos);
|
57 |
+
atoms_.insert(atoms_.begin(), prefix.atoms_.begin(), prefix.atoms_.end());
|
58 |
+
atoms_.push_back(std::move(name));
|
59 |
+
|
60 |
+
cacheAccessors();
|
61 |
+
}
|
62 |
+
|
63 |
+
// Is `this` a prefix of `other`?
|
64 |
+
// For example, "foo.bar" is a prefix of "foo.bar.baz"
|
65 |
+
bool isPrefixOf(const QualifiedName& other) const {
|
66 |
+
const auto& thisAtoms = atoms_;
|
67 |
+
const auto& otherAtoms = other.atoms_;
|
68 |
+
|
69 |
+
if (thisAtoms.size() > otherAtoms.size()) {
|
70 |
+
// Can't be a prefix if it's bigger
|
71 |
+
return false;
|
72 |
+
}
|
73 |
+
for (const auto i : c10::irange(thisAtoms.size())) {
|
74 |
+
if (thisAtoms[i] != otherAtoms[i]) {
|
75 |
+
return false;
|
76 |
+
}
|
77 |
+
}
|
78 |
+
return true;
|
79 |
+
}
|
80 |
+
|
81 |
+
// The fully qualified name, like "foo.bar.baz"
|
82 |
+
const std::string& qualifiedName() const {
|
83 |
+
return qualifiedName_;
|
84 |
+
}
|
85 |
+
|
86 |
+
// The leading qualifier, like "foo.bar"
|
87 |
+
const std::string& prefix() const {
|
88 |
+
return prefix_;
|
89 |
+
}
|
90 |
+
|
91 |
+
// The base name, like "baz"
|
92 |
+
const std::string& name() const {
|
93 |
+
return name_;
|
94 |
+
}
|
95 |
+
|
96 |
+
const std::vector<std::string>& atoms() const {
|
97 |
+
return atoms_;
|
98 |
+
}
|
99 |
+
|
100 |
+
bool operator==(const QualifiedName& other) const {
|
101 |
+
return this->qualifiedName_ == other.qualifiedName_;
|
102 |
+
}
|
103 |
+
|
104 |
+
bool operator!=(const QualifiedName& other) const {
|
105 |
+
return !(*this == other);
|
106 |
+
}
|
107 |
+
|
108 |
+
private:
|
109 |
+
static constexpr char delimiter_ = '.';
|
110 |
+
|
111 |
+
// Helper for cacheAccessors() below.
|
112 |
+
template<typename T>
|
113 |
+
std::string join(char delimiter, const T& v) {
|
114 |
+
std::string out;
|
115 |
+
size_t reserve = 0;
|
116 |
+
for (const auto& e : v) {
|
117 |
+
reserve += e.size() + 1;
|
118 |
+
}
|
119 |
+
out.reserve(reserve);
|
120 |
+
for (const auto i : c10::irange(v.size())) {
|
121 |
+
if (i != 0) {
|
122 |
+
out.push_back(delimiter);
|
123 |
+
}
|
124 |
+
out.append(v[i]);
|
125 |
+
}
|
126 |
+
return out;
|
127 |
+
}
|
128 |
+
|
129 |
+
void cacheAccessors() {
|
130 |
+
qualifiedName_ = join(delimiter_, atoms_);
|
131 |
+
if (atoms_.size() > 1) {
|
132 |
+
ArrayRef<std::string> view(atoms_);
|
133 |
+
const auto prefixView = view.slice(0, view.size() - 1);
|
134 |
+
prefix_ = join(delimiter_, prefixView);
|
135 |
+
}
|
136 |
+
|
137 |
+
if (!atoms_.empty()) {
|
138 |
+
name_ = atoms_.back();
|
139 |
+
}
|
140 |
+
}
|
141 |
+
|
142 |
+
// The actual list of names, like "{foo, bar, baz}"
|
143 |
+
std::vector<std::string> atoms_;
|
144 |
+
|
145 |
+
/*
|
146 |
+
* Cached accessors, derived from `atoms_`.
|
147 |
+
*/
|
148 |
+
std::string qualifiedName_;
|
149 |
+
std::string prefix_;
|
150 |
+
std::string name_;
|
151 |
+
};
|
152 |
+
} // namespace c10
|
153 |
+
|
154 |
+
namespace std {
|
155 |
+
template <>
|
156 |
+
struct hash<c10::QualifiedName> {
|
157 |
+
size_t operator()(const c10::QualifiedName& n) const noexcept {
|
158 |
+
return std::hash<std::string>()(n.qualifiedName());
|
159 |
+
}
|
160 |
+
};
|
161 |
+
} // namespace std
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/type_factory.h
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <type_traits>
|
4 |
+
#include <unordered_map>
|
5 |
+
|
6 |
+
#include <ATen/core/dynamic_type.h>
|
7 |
+
#include <ATen/core/jit_type_base.h>
|
8 |
+
#include <c10/macros/Macros.h>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
template <typename T>
|
13 |
+
struct TORCH_API TypeFactoryBase {};
|
14 |
+
|
15 |
+
template <>
|
16 |
+
struct TORCH_API TypeFactoryBase<c10::DynamicType> {
|
17 |
+
template <typename T, typename... Args>
|
18 |
+
static c10::DynamicTypePtr create(TypePtr ty, Args&&... args) {
|
19 |
+
return std::make_shared<c10::DynamicType>(
|
20 |
+
c10::DynamicTypeTrait<T>::tagValue(),
|
21 |
+
c10::DynamicType::Arguments(c10::ArrayRef<c10::TypePtr>(
|
22 |
+
{std::move(ty), std::forward<Args>(args)...})));
|
23 |
+
}
|
24 |
+
template <typename T>
|
25 |
+
static c10::DynamicTypePtr create(const std::vector<c10::TypePtr>& types) {
|
26 |
+
return std::make_shared<c10::DynamicType>(
|
27 |
+
c10::DynamicTypeTrait<T>::tagValue(),
|
28 |
+
c10::DynamicType::Arguments(types));
|
29 |
+
}
|
30 |
+
static c10::DynamicTypePtr createNamedTuple(
|
31 |
+
const std::string& name,
|
32 |
+
const std::vector<c10::string_view>& fields,
|
33 |
+
const std::vector<c10::TypePtr>& types) {
|
34 |
+
return std::make_shared<c10::DynamicType>(
|
35 |
+
c10::DynamicType::Tag::Tuple,
|
36 |
+
name,
|
37 |
+
c10::DynamicType::Arguments(fields, types));
|
38 |
+
}
|
39 |
+
template <typename T>
|
40 |
+
C10_ERASE static c10::DynamicTypePtr createNamed(const std::string& name) {
|
41 |
+
return std::make_shared<c10::DynamicType>(
|
42 |
+
c10::DynamicTypeTrait<T>::tagValue(),
|
43 |
+
name,
|
44 |
+
c10::DynamicType::Arguments{});
|
45 |
+
}
|
46 |
+
template <typename T>
|
47 |
+
C10_ERASE static c10::DynamicTypePtr get() {
|
48 |
+
return DynamicTypeTrait<T>::getBaseType();
|
49 |
+
}
|
50 |
+
static const std::unordered_map<std::string, c10::TypePtr>& basePythonTypes();
|
51 |
+
};
|
52 |
+
|
53 |
+
using DynamicTypeFactory = TypeFactoryBase<c10::DynamicType>;
|
54 |
+
|
55 |
+
// Helper functions for constructing DynamicTypes inline.
|
56 |
+
template <
|
57 |
+
typename T,
|
58 |
+
std::enable_if_t<DynamicTypeTrait<T>::isBaseType, int> = 0>
|
59 |
+
C10_ERASE DynamicTypePtr dynT() {
|
60 |
+
return DynamicTypeFactory::get<T>();
|
61 |
+
}
|
62 |
+
|
63 |
+
template <
|
64 |
+
typename T,
|
65 |
+
typename... Args,
|
66 |
+
std::enable_if_t<!DynamicTypeTrait<T>::isBaseType, int> = 0>
|
67 |
+
C10_ERASE DynamicTypePtr dynT(Args&&... args) {
|
68 |
+
return DynamicTypeFactory::create<T>(std::forward<Args>(args)...);
|
69 |
+
}
|
70 |
+
|
71 |
+
template <>
|
72 |
+
struct TORCH_API TypeFactoryBase<c10::Type> {
|
73 |
+
template <typename T, typename... Args>
|
74 |
+
static c10::TypePtr create(TypePtr ty, Args&&... args) {
|
75 |
+
return T::create(std::move(ty), std::forward<Args>(args)...);
|
76 |
+
}
|
77 |
+
template <typename T>
|
78 |
+
static c10::TypePtr create(std::vector<c10::TypePtr> types) {
|
79 |
+
return T::create(std::move(types));
|
80 |
+
}
|
81 |
+
static c10::TypePtr createNamedTuple(
|
82 |
+
const std::string& name,
|
83 |
+
const std::vector<c10::string_view>& fields,
|
84 |
+
const std::vector<c10::TypePtr>& types);
|
85 |
+
template <typename T>
|
86 |
+
C10_ERASE static c10::TypePtr createNamed(const std::string& name) {
|
87 |
+
return T::create(name);
|
88 |
+
}
|
89 |
+
static const std::unordered_map<std::string, c10::TypePtr>& basePythonTypes();
|
90 |
+
template <typename T>
|
91 |
+
C10_ERASE static c10::TypePtr get() {
|
92 |
+
return T::get();
|
93 |
+
}
|
94 |
+
};
|
95 |
+
|
96 |
+
using DefaultTypeFactory = TypeFactoryBase<c10::Type>;
|
97 |
+
|
98 |
+
using PlatformType =
|
99 |
+
#ifdef C10_MOBILE
|
100 |
+
c10::DynamicType
|
101 |
+
#else
|
102 |
+
c10::Type
|
103 |
+
#endif
|
104 |
+
;
|
105 |
+
|
106 |
+
using TypeFactory = TypeFactoryBase<PlatformType>;
|
107 |
+
|
108 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <memory>
|
4 |
+
#include <type_traits>
|
5 |
+
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
#include <c10/util/MaybeOwned.h>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
|
11 |
+
// Compatibility wrapper around a raw pointer so that existing code
|
12 |
+
// written to deal with a shared_ptr can keep working.
|
13 |
+
template <typename T>
|
14 |
+
class SingletonTypePtr {
|
15 |
+
public:
|
16 |
+
/* implicit */ SingletonTypePtr(T* p) : repr_(p) {}
|
17 |
+
|
18 |
+
// We need this to satisfy Pybind11, but it shouldn't be hit.
|
19 |
+
explicit SingletonTypePtr(std::shared_ptr<T>) { TORCH_CHECK(false); }
|
20 |
+
|
21 |
+
using element_type = typename std::shared_ptr<T>::element_type;
|
22 |
+
|
23 |
+
template <typename U = T, std::enable_if_t<!std::is_same<std::remove_const_t<U>, void>::value, bool> = true>
|
24 |
+
T& operator*() const {
|
25 |
+
return *repr_;
|
26 |
+
}
|
27 |
+
|
28 |
+
T* get() const {
|
29 |
+
return repr_;
|
30 |
+
}
|
31 |
+
|
32 |
+
T* operator->() const {
|
33 |
+
return repr_;
|
34 |
+
}
|
35 |
+
|
36 |
+
operator bool() const {
|
37 |
+
return repr_ != nullptr;
|
38 |
+
}
|
39 |
+
|
40 |
+
private:
|
41 |
+
T* repr_{nullptr};
|
42 |
+
};
|
43 |
+
|
44 |
+
template <typename T, typename U>
|
45 |
+
bool operator==(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) {
|
46 |
+
return (void*)lhs.get() == (void*)rhs.get();
|
47 |
+
}
|
48 |
+
|
49 |
+
template <typename T, typename U>
|
50 |
+
bool operator!=(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) {
|
51 |
+
return !(lhs == rhs);
|
52 |
+
}
|
53 |
+
|
54 |
+
} // namespace c10
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/typeid.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <c10/util/typeid.h>
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h
ADDED
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
4 |
+
// See Note [Do not compile initializers with AVX]
|
5 |
+
|
6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
8 |
+
#include <c10/util/irange.h>
|
9 |
+
|
10 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
11 |
+
#include <sleef.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#pragma GCC diagnostic push
|
15 |
+
#pragma GCC diagnostic ignored "-Wignored-qualifiers"
|
16 |
+
|
17 |
+
namespace at::vec {
|
18 |
+
// See Note [CPU_CAPABILITY namespace]
|
19 |
+
inline namespace CPU_CAPABILITY {
|
20 |
+
|
21 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
22 |
+
|
23 |
+
// bfloat16 conversion
|
24 |
+
static inline void cvtbf16_fp32(const __m128i& a, __m256& o) {
|
25 |
+
o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16));
|
26 |
+
}
|
27 |
+
|
28 |
+
static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
|
29 |
+
__m128i lo = _mm256_extractf128_si256(a, 0);
|
30 |
+
__m128i hi = _mm256_extractf128_si256(a, 1);
|
31 |
+
cvtbf16_fp32(lo, o1);
|
32 |
+
cvtbf16_fp32(hi, o2);
|
33 |
+
}
|
34 |
+
static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) {
|
35 |
+
__m256i lo = _mm256_castps_si256(a);
|
36 |
+
__m256i hi = _mm256_castps_si256(b);
|
37 |
+
__m256i nan = _mm256_set1_epi32(0xffff);
|
38 |
+
__m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q));
|
39 |
+
__m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q));
|
40 |
+
__m256i ones = _mm256_set1_epi32(0x1);
|
41 |
+
__m256i vec_bias = _mm256_set1_epi32(0x7fff);
|
42 |
+
// uint32_t lsb = (input >> 16) & 1;
|
43 |
+
auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones);
|
44 |
+
auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones);
|
45 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
46 |
+
t_lo = _mm256_add_epi32(t_lo, vec_bias);
|
47 |
+
t_hi = _mm256_add_epi32(t_hi, vec_bias);
|
48 |
+
// input += rounding_bias;
|
49 |
+
t_lo = _mm256_add_epi32(t_lo, lo);
|
50 |
+
t_hi = _mm256_add_epi32(t_hi, hi);
|
51 |
+
// input = input >> 16;
|
52 |
+
t_lo = _mm256_srli_epi32(t_lo, 16);
|
53 |
+
t_hi = _mm256_srli_epi32(t_hi, 16);
|
54 |
+
// Check NaN before converting back to bf16
|
55 |
+
t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo);
|
56 |
+
t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi);
|
57 |
+
|
58 |
+
t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
|
59 |
+
return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00
|
60 |
+
}
|
61 |
+
|
62 |
+
static inline __m256i merge_compare_result(const __m256& a, const __m256& b) {
|
63 |
+
__m256i lo = _mm256_castps_si256(a);
|
64 |
+
__m256i hi = _mm256_castps_si256(b);
|
65 |
+
lo = _mm256_srli_epi32(lo, 16);
|
66 |
+
hi = _mm256_srli_epi32(hi, 16);
|
67 |
+
auto out = _mm256_packus_epi32(lo, hi);
|
68 |
+
return _mm256_permute4x64_epi64(out, 0xd8);
|
69 |
+
}
|
70 |
+
|
71 |
+
// float16 conversion
|
72 |
+
static inline void cvtfp16_fp32(const __m128i& a, __m256& o) {
|
73 |
+
o = _mm256_cvtph_ps(a);
|
74 |
+
}
|
75 |
+
|
76 |
+
static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
|
77 |
+
__m128i lo = _mm256_extractf128_si256(a, 0);
|
78 |
+
__m128i hi = _mm256_extractf128_si256(a, 1);
|
79 |
+
cvtfp16_fp32(lo, o1);
|
80 |
+
cvtfp16_fp32(hi, o2);
|
81 |
+
}
|
82 |
+
|
83 |
+
static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) {
|
84 |
+
__m128i lo = _mm256_cvtps_ph(
|
85 |
+
a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
86 |
+
__m128i hi = _mm256_cvtps_ph(
|
87 |
+
b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
88 |
+
return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
|
89 |
+
}
|
90 |
+
|
91 |
+
// dtype conversion between float16/bfloat16 and float32
|
92 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
93 |
+
inline void cvt_to_fp32(const __m128i& a, __m256& o);
|
94 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m128i& a, __m256& o) {
|
95 |
+
cvtbf16_fp32(a, o);
|
96 |
+
};
|
97 |
+
template <> inline void cvt_to_fp32<Half>(const __m128i& a, __m256& o) {
|
98 |
+
cvtfp16_fp32(a, o);
|
99 |
+
}
|
100 |
+
|
101 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
102 |
+
inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2);
|
103 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m256& o1, __m256& o2) {
|
104 |
+
cvtbf16_fp32(a, o1, o2);
|
105 |
+
}
|
106 |
+
template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m256& o1, __m256& o2) {
|
107 |
+
cvtfp16_fp32(a, o1, o2);
|
108 |
+
}
|
109 |
+
|
110 |
+
template <typename T, bool is_compare_op = false,
|
111 |
+
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
112 |
+
inline __m256i cvt_from_fp32(const __m256& a, const __m256& b);
|
113 |
+
template <> inline __m256i cvt_from_fp32<BFloat16, false>(const __m256& a, const __m256& b) {
|
114 |
+
return cvtfp32_bf16(a, b);
|
115 |
+
}
|
116 |
+
template <> inline __m256i cvt_from_fp32<BFloat16, true>(const __m256& a, const __m256& b) {
|
117 |
+
return merge_compare_result(a, b);
|
118 |
+
}
|
119 |
+
template <> inline __m256i cvt_from_fp32<Half, false>(const __m256& a, const __m256& b) {
|
120 |
+
return cvtfp32_fp16(a, b);
|
121 |
+
}
|
122 |
+
template <> inline __m256i cvt_from_fp32<Half, true>(const __m256& a, const __m256& b) {
|
123 |
+
return cvtfp32_fp16(a, b);
|
124 |
+
}
|
125 |
+
|
126 |
+
template <typename T>
|
127 |
+
class Vectorized16 {
|
128 |
+
static_assert(
|
129 |
+
is_reduced_floating_point_v<T>,
|
130 |
+
"Support only float16 and bfloat16.");
|
131 |
+
protected:
|
132 |
+
__m256i values;
|
133 |
+
public:
|
134 |
+
using value_type = uint16_t;
|
135 |
+
using size_type = int;
|
136 |
+
static constexpr size_type size() {
|
137 |
+
return 16;
|
138 |
+
}
|
139 |
+
Vectorized16() {}
|
140 |
+
Vectorized16(__m256i v) : values(v) {}
|
141 |
+
Vectorized16(T val) {
|
142 |
+
value_type uw = val.x;
|
143 |
+
values = _mm256_set1_epi16(uw);
|
144 |
+
}
|
145 |
+
Vectorized16(T val1, T val2, T val3, T val4,
|
146 |
+
T val5, T val6, T val7, T val8,
|
147 |
+
T val9, T val10, T val11, T val12,
|
148 |
+
T val13, T val14, T val15, T val16) {
|
149 |
+
values = _mm256_setr_epi16(
|
150 |
+
val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x,
|
151 |
+
val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x);
|
152 |
+
}
|
153 |
+
operator __m256i() const {
|
154 |
+
return values;
|
155 |
+
}
|
156 |
+
T& operator[](int idx) = delete;
|
157 |
+
const T& operator[](int idx) const = delete;
|
158 |
+
int zero_mask() const {
|
159 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
160 |
+
__m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0));
|
161 |
+
return _mm256_movemask_epi8(cmp);
|
162 |
+
}
|
163 |
+
static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
|
164 |
+
if (count == size())
|
165 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
166 |
+
|
167 |
+
__at_align__ int16_t tmp_values[size()];
|
168 |
+
std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
|
169 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(tmp_values));
|
170 |
+
}
|
171 |
+
void store(void* ptr, int count = size()) const {
|
172 |
+
if (count == size()) {
|
173 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
174 |
+
} else if (count > 0) {
|
175 |
+
__at_align__ int16_t tmp_values[size()];
|
176 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
177 |
+
std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
|
178 |
+
}
|
179 |
+
}
|
180 |
+
template <int64_t mask>
|
181 |
+
static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
|
182 |
+
__at_align__ int16_t tmp_values[size()];
|
183 |
+
a.store(tmp_values);
|
184 |
+
if (mask & 0x01)
|
185 |
+
tmp_values[0] = _mm256_extract_epi16(b.values, 0);
|
186 |
+
if (mask & 0x02)
|
187 |
+
tmp_values[1] = _mm256_extract_epi16(b.values, 1);
|
188 |
+
if (mask & 0x04)
|
189 |
+
tmp_values[2] = _mm256_extract_epi16(b.values, 2);
|
190 |
+
if (mask & 0x08)
|
191 |
+
tmp_values[3] = _mm256_extract_epi16(b.values, 3);
|
192 |
+
if (mask & 0x10)
|
193 |
+
tmp_values[4] = _mm256_extract_epi16(b.values, 4);
|
194 |
+
if (mask & 0x20)
|
195 |
+
tmp_values[5] = _mm256_extract_epi16(b.values, 5);
|
196 |
+
if (mask & 0x40)
|
197 |
+
tmp_values[6] = _mm256_extract_epi16(b.values, 6);
|
198 |
+
if (mask & 0x80)
|
199 |
+
tmp_values[7] = _mm256_extract_epi16(b.values, 7);
|
200 |
+
if (mask & 0x100)
|
201 |
+
tmp_values[8] = _mm256_extract_epi16(b.values, 8);
|
202 |
+
if (mask & 0x200)
|
203 |
+
tmp_values[9] = _mm256_extract_epi16(b.values, 9);
|
204 |
+
if (mask & 0x400)
|
205 |
+
tmp_values[10] = _mm256_extract_epi16(b.values, 10);
|
206 |
+
if (mask & 0x800)
|
207 |
+
tmp_values[11] = _mm256_extract_epi16(b.values, 11);
|
208 |
+
if (mask & 0x1000)
|
209 |
+
tmp_values[12] = _mm256_extract_epi16(b.values, 12);
|
210 |
+
if (mask & 0x2000)
|
211 |
+
tmp_values[13] = _mm256_extract_epi16(b.values, 13);
|
212 |
+
if (mask & 0x4000)
|
213 |
+
tmp_values[14] = _mm256_extract_epi16(b.values, 14);
|
214 |
+
if (mask & 0x8000)
|
215 |
+
tmp_values[15] = _mm256_extract_epi16(b.values, 15);
|
216 |
+
return loadu(tmp_values);
|
217 |
+
}
|
218 |
+
static Vectorized<T> blendv(const Vectorized<T>& a,
|
219 |
+
const Vectorized<T>& b, const Vectorized<T>& mask) {
|
220 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
221 |
+
}
|
222 |
+
template<typename step_t>
|
223 |
+
static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
|
224 |
+
return Vectorized<T>(
|
225 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
226 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
227 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
228 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
229 |
+
}
|
230 |
+
static Vectorized<T> set(const Vectorized<T>& a,
|
231 |
+
const Vectorized<T>& b, int64_t count = size()) {
|
232 |
+
switch (count) {
|
233 |
+
case 0:
|
234 |
+
return a;
|
235 |
+
case 1:
|
236 |
+
return blend<1>(a, b);
|
237 |
+
case 2:
|
238 |
+
return blend<3>(a, b);
|
239 |
+
case 3:
|
240 |
+
return blend<7>(a, b);
|
241 |
+
case 4:
|
242 |
+
return blend<15>(a, b);
|
243 |
+
case 5:
|
244 |
+
return blend<31>(a, b);
|
245 |
+
case 6:
|
246 |
+
return blend<63>(a, b);
|
247 |
+
case 7:
|
248 |
+
return blend<127>(a, b);
|
249 |
+
case 8:
|
250 |
+
return blend<255>(a, b);
|
251 |
+
case 9:
|
252 |
+
return blend<511>(a, b);
|
253 |
+
case 10:
|
254 |
+
return blend<1023>(a, b);
|
255 |
+
case 11:
|
256 |
+
return blend<2047>(a, b);
|
257 |
+
case 12:
|
258 |
+
return blend<4095>(a, b);
|
259 |
+
case 13:
|
260 |
+
return blend<8191>(a, b);
|
261 |
+
case 14:
|
262 |
+
return blend<16383>(a, b);
|
263 |
+
case 15:
|
264 |
+
return blend<32767>(a, b);
|
265 |
+
}
|
266 |
+
return b;
|
267 |
+
}
|
268 |
+
Vectorized<T> map(const __m256 (*const vop)(__m256)) const {
|
269 |
+
__m256 lo, hi;
|
270 |
+
cvt_to_fp32<T>(values, lo, hi);
|
271 |
+
const auto o1 = vop(lo);
|
272 |
+
const auto o2 = vop(hi);
|
273 |
+
return cvt_from_fp32<T>(o1, o2);
|
274 |
+
}
|
275 |
+
Vectorized<T> isnan() const {
|
276 |
+
__m256 lo, hi;
|
277 |
+
cvt_to_fp32<T>(values, lo, hi);
|
278 |
+
lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
|
279 |
+
hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
|
280 |
+
return merge_compare_result(lo, hi);
|
281 |
+
}
|
282 |
+
Vectorized<T> abs() const {
|
283 |
+
return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values);
|
284 |
+
}
|
285 |
+
Vectorized<T> angle() const {
|
286 |
+
__m256 lo, hi;
|
287 |
+
cvt_to_fp32<T>(values, lo, hi);
|
288 |
+
auto angle_lambda = [](__m256 values_2) {
|
289 |
+
const auto zero_vec = _mm256_set1_ps(0.f);
|
290 |
+
const auto nan_vec = _mm256_set1_ps(NAN);
|
291 |
+
const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ);
|
292 |
+
const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
|
293 |
+
const auto pi = _mm256_set1_ps(c10::pi<float>);
|
294 |
+
|
295 |
+
const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ);
|
296 |
+
auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
|
297 |
+
angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
|
298 |
+
return angle;
|
299 |
+
};
|
300 |
+
auto o1 = angle_lambda(lo);
|
301 |
+
auto o2 = angle_lambda(hi);
|
302 |
+
return cvt_from_fp32<T>(o1, o2);
|
303 |
+
}
|
304 |
+
Vectorized<T> real() const {
|
305 |
+
return *this;
|
306 |
+
}
|
307 |
+
Vectorized<T> imag() const {
|
308 |
+
return _mm256_set1_epi16(0);
|
309 |
+
}
|
310 |
+
Vectorized<T> conj() const {
|
311 |
+
return *this;
|
312 |
+
}
|
313 |
+
Vectorized<T> acos() const {
|
314 |
+
return map(Sleef_acosf8_u10);
|
315 |
+
}
|
316 |
+
Vectorized<T> acosh() const {
|
317 |
+
return map(Sleef_acoshf8_u10);
|
318 |
+
}
|
319 |
+
Vectorized<T> asin() const {
|
320 |
+
return map(Sleef_asinf8_u10);
|
321 |
+
}
|
322 |
+
Vectorized<T> atan() const {
|
323 |
+
return map(Sleef_atanf8_u10);
|
324 |
+
}
|
325 |
+
Vectorized<T> atanh() const {
|
326 |
+
return map(Sleef_atanhf8_u10);
|
327 |
+
}
|
328 |
+
Vectorized<T> atan2(const Vectorized<T> &b) const {
|
329 |
+
__m256 lo, hi;
|
330 |
+
__m256 b1, b2;
|
331 |
+
cvt_to_fp32<T>(values, lo, hi);
|
332 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
333 |
+
auto o1 = Sleef_atan2f8_u10(lo, b1);
|
334 |
+
auto o2 = Sleef_atan2f8_u10(hi, b2);
|
335 |
+
return cvt_from_fp32<T>(o1, o2);
|
336 |
+
}
|
337 |
+
Vectorized<T> copysign(const Vectorized<T> &sign) const {
|
338 |
+
// copy sign bit (0x8000) from sign and remaining bits from values
|
339 |
+
__m256i mask_value = _mm256_set1_epi32(~0x80008000);
|
340 |
+
__m256i mask_signbit = _mm256_set1_epi32(0x80008000);
|
341 |
+
return Vectorized<T>(
|
342 |
+
_mm256_or_si256(
|
343 |
+
_mm256_and_si256(values, mask_value),
|
344 |
+
_mm256_and_si256(sign, mask_signbit)));
|
345 |
+
}
|
346 |
+
Vectorized<T> erf() const {
|
347 |
+
return map(Sleef_erff8_u10);
|
348 |
+
}
|
349 |
+
Vectorized<T> erfc() const {
|
350 |
+
return map(Sleef_erfcf8_u15);
|
351 |
+
}
|
352 |
+
Vectorized<T> erfinv() const {
|
353 |
+
__m256 lo, hi;
|
354 |
+
cvt_to_fp32<T>(values, lo, hi);
|
355 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
356 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
357 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
358 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
359 |
+
tmp1[i] = calc_erfinv(tmp1[i]);
|
360 |
+
tmp2[i] = calc_erfinv(tmp2[i]);
|
361 |
+
}
|
362 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
363 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
364 |
+
return cvt_from_fp32<T>(o1, o2);
|
365 |
+
}
|
366 |
+
Vectorized<T> exp() const {
|
367 |
+
return map(Sleef_expf8_u10);
|
368 |
+
}
|
369 |
+
Vectorized<T> exp2() const {
|
370 |
+
return map(Sleef_exp2f8_u10);
|
371 |
+
}
|
372 |
+
Vectorized<T> expm1() const {
|
373 |
+
return map(Sleef_expm1f8_u10);
|
374 |
+
}
|
375 |
+
Vectorized<T> exp_u20() const {
|
376 |
+
return exp();
|
377 |
+
}
|
378 |
+
Vectorized<T> fmod(const Vectorized<T> & q) const {
|
379 |
+
__m256 x_lo, x_hi;
|
380 |
+
cvt_to_fp32<T>(values, x_lo, x_hi);
|
381 |
+
__m256 q_lo, q_hi;
|
382 |
+
cvt_to_fp32<T>(q.values, q_lo, q_hi);
|
383 |
+
auto o1 = Sleef_fmodf8(x_lo, q_lo);
|
384 |
+
auto o2 = Sleef_fmodf8(x_hi, q_hi);
|
385 |
+
return cvt_from_fp32<T>(o1, o2);
|
386 |
+
}
|
387 |
+
Vectorized<T> hypot(const Vectorized<T> &b) const {
|
388 |
+
__m256 lo, hi;
|
389 |
+
__m256 b1, b2;
|
390 |
+
cvt_to_fp32<T>(values, lo, hi);
|
391 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
392 |
+
auto o1 = Sleef_hypotf8_u05(lo, b1);
|
393 |
+
auto o2 = Sleef_hypotf8_u05(hi, b2);
|
394 |
+
return cvt_from_fp32<T>(o1, o2);
|
395 |
+
}
|
396 |
+
Vectorized<T> i0() const {
|
397 |
+
__m256 lo, hi;
|
398 |
+
cvt_to_fp32<T>(values, lo, hi);
|
399 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
400 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
401 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
402 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
403 |
+
tmp1[i] = calc_i0(tmp1[i]);
|
404 |
+
tmp2[i] = calc_i0(tmp2[i]);
|
405 |
+
}
|
406 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
407 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
408 |
+
return cvt_from_fp32<T>(o1, o2);
|
409 |
+
}
|
410 |
+
Vectorized<T> i0e() const {
|
411 |
+
__m256 lo, hi;
|
412 |
+
cvt_to_fp32<T>(values, lo, hi);
|
413 |
+
constexpr auto sz = size();
|
414 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
415 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
416 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
417 |
+
|
418 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
419 |
+
tmp1[i] = calc_i0e(tmp1[i]);
|
420 |
+
tmp2[i] = calc_i0e(tmp2[i]);
|
421 |
+
}
|
422 |
+
const auto o1 = _mm256_loadu_ps(tmp1);
|
423 |
+
const auto o2 = _mm256_loadu_ps(tmp2);
|
424 |
+
return cvt_from_fp32<T>(o1, o2);
|
425 |
+
}
|
426 |
+
Vectorized<T> digamma() const {
|
427 |
+
__m256 lo, hi;
|
428 |
+
cvt_to_fp32<T>(values, lo, hi);
|
429 |
+
constexpr auto sz = size();
|
430 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
431 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
432 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
433 |
+
|
434 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
435 |
+
tmp1[i] = calc_digamma(tmp1[i]);
|
436 |
+
tmp2[i] = calc_digamma(tmp2[i]);
|
437 |
+
}
|
438 |
+
const auto o1 = _mm256_loadu_ps(tmp1);
|
439 |
+
const auto o2 = _mm256_loadu_ps(tmp2);
|
440 |
+
return cvt_from_fp32<T>(o1, o2);
|
441 |
+
}
|
442 |
+
Vectorized<T> igamma(const Vectorized<T> &x) const {
|
443 |
+
__m256 lo, hi;
|
444 |
+
__m256 xlo, xhi;
|
445 |
+
cvt_to_fp32<T>(values, lo, hi);
|
446 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
447 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
448 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
449 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
450 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
451 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
452 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
453 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
454 |
+
tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
|
455 |
+
tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
|
456 |
+
}
|
457 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
458 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
459 |
+
return cvt_from_fp32<T>(o1, o2);
|
460 |
+
}
|
461 |
+
|
462 |
+
Vectorized<T> igammac(const Vectorized<T> &x) const {
|
463 |
+
__m256 lo, hi;
|
464 |
+
__m256 xlo, xhi;
|
465 |
+
cvt_to_fp32<T>(values, lo, hi);
|
466 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
467 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
468 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
469 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
470 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
471 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
472 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
473 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
474 |
+
tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
|
475 |
+
tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
|
476 |
+
}
|
477 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
478 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
479 |
+
return cvt_from_fp32<T>(o1, o2);
|
480 |
+
}
|
481 |
+
Vectorized<T> log() const {
|
482 |
+
return map(Sleef_logf8_u10);
|
483 |
+
}
|
484 |
+
Vectorized<T> log2() const {
|
485 |
+
return map(Sleef_log2f8_u10);
|
486 |
+
}
|
487 |
+
Vectorized<T> log10() const {
|
488 |
+
return map(Sleef_log10f8_u10);
|
489 |
+
}
|
490 |
+
Vectorized<T> log1p() const {
|
491 |
+
return map(Sleef_log1pf8_u10);
|
492 |
+
}
|
493 |
+
Vectorized<T> sin() const {
|
494 |
+
return map(Sleef_sinf8_u10);
|
495 |
+
}
|
496 |
+
Vectorized<T> sinh() const {
|
497 |
+
return map(Sleef_sinhf8_u10);
|
498 |
+
}
|
499 |
+
Vectorized<T> cos() const {
|
500 |
+
return map(Sleef_cosf8_u10);
|
501 |
+
}
|
502 |
+
Vectorized<T> cosh() const {
|
503 |
+
return map(Sleef_coshf8_u10);
|
504 |
+
}
|
505 |
+
Vectorized<T> ceil() const {
|
506 |
+
__m256 lo, hi;
|
507 |
+
cvt_to_fp32<T>(values, lo, hi);
|
508 |
+
auto o1 = _mm256_ceil_ps(lo);
|
509 |
+
auto o2 = _mm256_ceil_ps(hi);
|
510 |
+
return cvt_from_fp32<T>(o1, o2);
|
511 |
+
}
|
512 |
+
Vectorized<T> floor() const {
|
513 |
+
__m256 lo, hi;
|
514 |
+
cvt_to_fp32<T>(values, lo, hi);
|
515 |
+
auto o1 = _mm256_floor_ps(lo);
|
516 |
+
auto o2 = _mm256_floor_ps(hi);
|
517 |
+
return cvt_from_fp32<T>(o1, o2);
|
518 |
+
}
|
519 |
+
Vectorized<T> neg() const {
|
520 |
+
return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000));
|
521 |
+
}
|
522 |
+
Vectorized<T> round() const {
|
523 |
+
__m256 lo, hi;
|
524 |
+
cvt_to_fp32<T>(values, lo, hi);
|
525 |
+
auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
526 |
+
auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
527 |
+
return cvt_from_fp32<T>(o1, o2);
|
528 |
+
}
|
529 |
+
Vectorized<T> tan() const {
|
530 |
+
return map(Sleef_tanf8_u10);
|
531 |
+
}
|
532 |
+
Vectorized<T> tanh() const {
|
533 |
+
return map(Sleef_tanhf8_u10);
|
534 |
+
}
|
535 |
+
Vectorized<T> trunc() const {
|
536 |
+
__m256 lo, hi;
|
537 |
+
cvt_to_fp32<T>(values, lo, hi);
|
538 |
+
auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
539 |
+
auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
540 |
+
return cvt_from_fp32<T>(o1, o2);
|
541 |
+
}
|
542 |
+
Vectorized<T> lgamma() const {
|
543 |
+
return map(Sleef_lgammaf8_u10);
|
544 |
+
}
|
545 |
+
Vectorized<T> sqrt() const {
|
546 |
+
__m256 lo, hi;
|
547 |
+
cvt_to_fp32<T>(values, lo, hi);
|
548 |
+
auto o1 = _mm256_sqrt_ps(lo);
|
549 |
+
auto o2 = _mm256_sqrt_ps(hi);
|
550 |
+
return cvt_from_fp32<T>(o1, o2);
|
551 |
+
}
|
552 |
+
Vectorized<T> reciprocal() const {
|
553 |
+
__m256 lo, hi;
|
554 |
+
cvt_to_fp32<T>(values, lo, hi);
|
555 |
+
auto ones = _mm256_set1_ps(1);
|
556 |
+
auto o1 = _mm256_div_ps(ones, lo);
|
557 |
+
auto o2 = _mm256_div_ps(ones, hi);
|
558 |
+
return cvt_from_fp32<T>(o1, o2);
|
559 |
+
}
|
560 |
+
Vectorized<T> rsqrt() const {
|
561 |
+
__m256 lo, hi;
|
562 |
+
cvt_to_fp32<T>(values, lo, hi);
|
563 |
+
auto ones = _mm256_set1_ps(1);
|
564 |
+
auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo));
|
565 |
+
auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi));
|
566 |
+
return cvt_from_fp32<T>(o1, o2);
|
567 |
+
}
|
568 |
+
Vectorized<T> pow(const Vectorized<T> &b) const {
|
569 |
+
__m256 lo, hi;
|
570 |
+
__m256 b1, b2;
|
571 |
+
cvt_to_fp32<T>(values, lo, hi);
|
572 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
573 |
+
auto o1 = Sleef_powf8_u10(lo, b1);
|
574 |
+
auto o2 = Sleef_powf8_u10(hi, b2);
|
575 |
+
return cvt_from_fp32<T>(o1, o2);
|
576 |
+
}
|
577 |
+
private:
|
578 |
+
template<typename Op>
|
579 |
+
Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
|
580 |
+
__m256 a_lo, a_hi;
|
581 |
+
__m256 b_lo, b_hi;
|
582 |
+
cvt_to_fp32<T>(values, a_lo, a_hi);
|
583 |
+
cvt_to_fp32<T>(b.values, b_lo, b_hi);
|
584 |
+
auto o1 = op(a_lo, b_lo);
|
585 |
+
auto o2 = op(a_hi, b_hi);
|
586 |
+
return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
|
587 |
+
}
|
588 |
+
|
589 |
+
public:
|
590 |
+
Vectorized<T> inline operator>(const Vectorized<T>& other) const {
|
591 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); });
|
592 |
+
}
|
593 |
+
Vectorized<T> inline operator<(const Vectorized<T>& other) const {
|
594 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); });
|
595 |
+
}
|
596 |
+
Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
|
597 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); });
|
598 |
+
}
|
599 |
+
Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
|
600 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); });
|
601 |
+
}
|
602 |
+
Vectorized<T> inline operator==(const Vectorized<T>& other) const {
|
603 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); });
|
604 |
+
}
|
605 |
+
Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
|
606 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); });
|
607 |
+
}
|
608 |
+
};
|
609 |
+
|
610 |
+
template<typename T, typename Op>
|
611 |
+
static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
612 |
+
__m256 a_lo, a_hi;
|
613 |
+
__m256 b_lo, b_hi;
|
614 |
+
cvt_to_fp32<T>(__m256i(a), a_lo, a_hi);
|
615 |
+
cvt_to_fp32<T>(__m256i(b), b_lo, b_hi);
|
616 |
+
auto o1 = op(a_lo, b_lo);
|
617 |
+
auto o2 = op(a_hi, b_hi);
|
618 |
+
return cvt_from_fp32<T>(o1, o2);
|
619 |
+
}
|
620 |
+
|
621 |
+
template <>
|
622 |
+
class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
|
623 |
+
public:
|
624 |
+
using Vectorized16::Vectorized16;
|
625 |
+
|
626 |
+
Vectorized<BFloat16> frac() const;
|
627 |
+
|
628 |
+
Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
|
629 |
+
Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
|
630 |
+
Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
|
631 |
+
Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
|
632 |
+
Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
|
633 |
+
Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
|
634 |
+
};
|
635 |
+
|
636 |
+
Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
637 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
|
638 |
+
}
|
639 |
+
Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
640 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
|
641 |
+
}
|
642 |
+
Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
643 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
|
644 |
+
}
|
645 |
+
Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
646 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
|
647 |
+
}
|
648 |
+
Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
649 |
+
return _mm256_and_si256(a, b);
|
650 |
+
}
|
651 |
+
Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
652 |
+
return _mm256_or_si256(a, b);
|
653 |
+
}
|
654 |
+
Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
655 |
+
return _mm256_xor_si256(a, b);
|
656 |
+
}
|
657 |
+
|
658 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
|
659 |
+
return (*this == other) & Vectorized<BFloat16>(1.0f);
|
660 |
+
}
|
661 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
|
662 |
+
return (*this != other) & Vectorized<BFloat16>(1.0f);
|
663 |
+
}
|
664 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
|
665 |
+
return (*this > other) & Vectorized<BFloat16>(1.0f);
|
666 |
+
}
|
667 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
|
668 |
+
return (*this >= other) & Vectorized<BFloat16>(1.0f);
|
669 |
+
}
|
670 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
|
671 |
+
return (*this < other) & Vectorized<BFloat16>(1.0f);
|
672 |
+
}
|
673 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
|
674 |
+
return (*this <= other) & Vectorized<BFloat16>(1.0f);
|
675 |
+
}
|
676 |
+
|
677 |
+
// frac. Implement this here so we can use subtraction
|
678 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
|
679 |
+
return *this - this->trunc();
|
680 |
+
}
|
681 |
+
|
682 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
683 |
+
// either input is a NaN.
|
684 |
+
template <>
|
685 |
+
Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
686 |
+
__m256 a_lo, a_hi;
|
687 |
+
__m256 b_lo, b_hi;
|
688 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
689 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
690 |
+
auto max_lo = _mm256_max_ps(a_lo, b_lo);
|
691 |
+
auto max_hi = _mm256_max_ps(a_hi, b_hi);
|
692 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
693 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
694 |
+
// Exploit the fact that all-ones is a NaN.
|
695 |
+
auto o1 = _mm256_or_ps(max_lo, nan_lo);
|
696 |
+
auto o2 = _mm256_or_ps(max_hi, nan_hi);
|
697 |
+
return cvtfp32_bf16(o1, o2);
|
698 |
+
}
|
699 |
+
|
700 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
701 |
+
// either input is a NaN.
|
702 |
+
template <>
|
703 |
+
Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
704 |
+
__m256 a_lo, a_hi;
|
705 |
+
__m256 b_lo, b_hi;
|
706 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
707 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
708 |
+
auto min_lo = _mm256_min_ps(a_lo, b_lo);
|
709 |
+
auto min_hi = _mm256_min_ps(a_hi, b_hi);
|
710 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
711 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
712 |
+
// Exploit the fact that all-ones is a NaN.
|
713 |
+
auto o1 = _mm256_or_ps(min_lo, nan_lo);
|
714 |
+
auto o2 = _mm256_or_ps(min_hi, nan_hi);
|
715 |
+
return cvtfp32_bf16(o1, o2);
|
716 |
+
}
|
717 |
+
|
718 |
+
template <>
|
719 |
+
Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
|
720 |
+
const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
|
721 |
+
__m256 a_lo, a_hi;
|
722 |
+
__m256 min_lo, min_hi;
|
723 |
+
__m256 max_lo, max_hi;
|
724 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
725 |
+
cvtbf16_fp32(__m256i(min), min_lo, min_hi);
|
726 |
+
cvtbf16_fp32(__m256i(max), max_lo, max_hi);
|
727 |
+
auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
|
728 |
+
auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
|
729 |
+
return cvtfp32_bf16(o1, o2);
|
730 |
+
}
|
731 |
+
|
732 |
+
template <>
|
733 |
+
Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
|
734 |
+
__m256 a_lo, a_hi;
|
735 |
+
__m256 max_lo, max_hi;
|
736 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
737 |
+
cvtbf16_fp32(__m256i(max), max_lo, max_hi);
|
738 |
+
auto o1 = _mm256_min_ps(max_lo, a_lo);
|
739 |
+
auto o2 = _mm256_min_ps(max_hi, a_hi);
|
740 |
+
return cvtfp32_bf16(o1, o2);
|
741 |
+
}
|
742 |
+
|
743 |
+
template <>
|
744 |
+
Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
|
745 |
+
__m256 a_lo, a_hi;
|
746 |
+
__m256 min_lo, min_hi;
|
747 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
748 |
+
cvtbf16_fp32(__m256i(min), min_lo, min_hi);
|
749 |
+
auto o1 = _mm256_max_ps(min_lo, a_lo);
|
750 |
+
auto o2 = _mm256_max_ps(min_hi, a_hi);
|
751 |
+
return cvtfp32_bf16(o1, o2);
|
752 |
+
}
|
753 |
+
|
754 |
+
template <>
|
755 |
+
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
756 |
+
int64_t i;
|
757 |
+
#pragma unroll
|
758 |
+
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
759 |
+
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
760 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
761 |
+
}
|
762 |
+
#pragma unroll
|
763 |
+
for (; i < n; i++) {
|
764 |
+
dst[i] = src[i];
|
765 |
+
}
|
766 |
+
}
|
767 |
+
|
768 |
+
template <>
|
769 |
+
inline void convert(const float* src, BFloat16* dst, int64_t n) {
|
770 |
+
int64_t i;
|
771 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
772 |
+
__m256 a = _mm256_loadu_ps(&src[i]);
|
773 |
+
__m256 b = _mm256_loadu_ps(&src[i + 8]);
|
774 |
+
|
775 |
+
__m256i bf = cvtfp32_bf16(a, b);
|
776 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
|
777 |
+
}
|
778 |
+
for (; i < n; i++) {
|
779 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
780 |
+
}
|
781 |
+
}
|
782 |
+
|
783 |
+
template <>
|
784 |
+
inline void convert(const double* src, BFloat16* dst, int64_t n) {
|
785 |
+
auto load_float = [](const double *src) -> __m256 {
|
786 |
+
// Load one float vector from an array of doubles
|
787 |
+
__m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
|
788 |
+
__m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
|
789 |
+
return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
|
790 |
+
};
|
791 |
+
|
792 |
+
int64_t i;
|
793 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
794 |
+
__m256 a = load_float(&src[i]);
|
795 |
+
__m256 b = load_float(&src[i + 8]);
|
796 |
+
|
797 |
+
__m256i bf = cvtfp32_bf16(a, b);
|
798 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
|
799 |
+
}
|
800 |
+
for (; i < n; i++) {
|
801 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
802 |
+
}
|
803 |
+
}
|
804 |
+
|
805 |
+
template <>
|
806 |
+
Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
|
807 |
+
const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
|
808 |
+
__m256 a_lo, a_hi;
|
809 |
+
__m256 b_lo, b_hi;
|
810 |
+
__m256 c_lo, c_hi;
|
811 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
812 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
813 |
+
cvtbf16_fp32(__m256i(c), c_lo, c_hi);
|
814 |
+
auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
|
815 |
+
auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
|
816 |
+
return cvtfp32_bf16(o1, o2);
|
817 |
+
}
|
818 |
+
|
819 |
+
template <>
|
820 |
+
class Vectorized<Half>: public Vectorized16<Half> {
|
821 |
+
public:
|
822 |
+
using Vectorized16::Vectorized16;
|
823 |
+
|
824 |
+
Vectorized<Half> frac() const;
|
825 |
+
|
826 |
+
Vectorized<Half> eq(const Vectorized<Half>& other) const;
|
827 |
+
Vectorized<Half> ne(const Vectorized<Half>& other) const;
|
828 |
+
Vectorized<Half> gt(const Vectorized<Half>& other) const;
|
829 |
+
Vectorized<Half> ge(const Vectorized<Half>& other) const;
|
830 |
+
Vectorized<Half> lt(const Vectorized<Half>& other) const;
|
831 |
+
Vectorized<Half> le(const Vectorized<Half>& other) const;
|
832 |
+
};
|
833 |
+
|
834 |
+
Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
835 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
|
836 |
+
}
|
837 |
+
Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
838 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
|
839 |
+
}
|
840 |
+
Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
841 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
|
842 |
+
}
|
843 |
+
Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
844 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
|
845 |
+
}
|
846 |
+
Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
847 |
+
return _mm256_and_si256(a, b);
|
848 |
+
}
|
849 |
+
Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
850 |
+
return _mm256_or_si256(a, b);
|
851 |
+
}
|
852 |
+
Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
853 |
+
return _mm256_xor_si256(a, b);
|
854 |
+
}
|
855 |
+
|
856 |
+
inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
|
857 |
+
return (*this == other) & Vectorized<Half>(1.0f);
|
858 |
+
}
|
859 |
+
inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
|
860 |
+
return (*this != other) & Vectorized<Half>(1.0f);
|
861 |
+
}
|
862 |
+
inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
|
863 |
+
return (*this > other) & Vectorized<Half>(1.0f);
|
864 |
+
}
|
865 |
+
inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
|
866 |
+
return (*this >= other) & Vectorized<Half>(1.0f);
|
867 |
+
}
|
868 |
+
inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
|
869 |
+
return (*this < other) & Vectorized<Half>(1.0f);
|
870 |
+
}
|
871 |
+
inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
|
872 |
+
return (*this <= other) & Vectorized<Half>(1.0f);
|
873 |
+
}
|
874 |
+
|
875 |
+
// frac. Implement this here so we can use subtraction
|
876 |
+
inline Vectorized<Half> Vectorized<Half>::frac() const {
|
877 |
+
return *this - this->trunc();
|
878 |
+
}
|
879 |
+
|
880 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
881 |
+
// either input is a NaN.
|
882 |
+
template <>
|
883 |
+
Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
884 |
+
__m256 a_lo, a_hi;
|
885 |
+
__m256 b_lo, b_hi;
|
886 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
887 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
888 |
+
auto max_lo = _mm256_max_ps(a_lo, b_lo);
|
889 |
+
auto max_hi = _mm256_max_ps(a_hi, b_hi);
|
890 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
891 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
892 |
+
// Exploit the fact that all-ones is a NaN.
|
893 |
+
auto o1 = _mm256_or_ps(max_lo, nan_lo);
|
894 |
+
auto o2 = _mm256_or_ps(max_hi, nan_hi);
|
895 |
+
return cvtfp32_fp16(o1, o2);
|
896 |
+
}
|
897 |
+
|
898 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
899 |
+
// either input is a NaN.
|
900 |
+
template <>
|
901 |
+
Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
902 |
+
__m256 a_lo, a_hi;
|
903 |
+
__m256 b_lo, b_hi;
|
904 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
905 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
906 |
+
auto min_lo = _mm256_min_ps(a_lo, b_lo);
|
907 |
+
auto min_hi = _mm256_min_ps(a_hi, b_hi);
|
908 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
909 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
910 |
+
// Exploit the fact that all-ones is a NaN.
|
911 |
+
auto o1 = _mm256_or_ps(min_lo, nan_lo);
|
912 |
+
auto o2 = _mm256_or_ps(min_hi, nan_hi);
|
913 |
+
return cvtfp32_fp16(o1, o2);
|
914 |
+
}
|
915 |
+
|
916 |
+
template <>
|
917 |
+
Vectorized<Half> inline clamp(const Vectorized<Half>& a,
|
918 |
+
const Vectorized<Half>& min, const Vectorized<Half>& max) {
|
919 |
+
__m256 a_lo, a_hi;
|
920 |
+
__m256 min_lo, min_hi;
|
921 |
+
__m256 max_lo, max_hi;
|
922 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
923 |
+
cvtfp16_fp32(__m256i(min), min_lo, min_hi);
|
924 |
+
cvtfp16_fp32(__m256i(max), max_lo, max_hi);
|
925 |
+
auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
|
926 |
+
auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
|
927 |
+
return cvtfp32_fp16(o1, o2);
|
928 |
+
}
|
929 |
+
|
930 |
+
template <>
|
931 |
+
Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
|
932 |
+
__m256 a_lo, a_hi;
|
933 |
+
__m256 max_lo, max_hi;
|
934 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
935 |
+
cvtfp16_fp32(__m256i(max), max_lo, max_hi);
|
936 |
+
auto o1 = _mm256_min_ps(max_lo, a_lo);
|
937 |
+
auto o2 = _mm256_min_ps(max_hi, a_hi);
|
938 |
+
return cvtfp32_fp16(o1, o2);
|
939 |
+
}
|
940 |
+
|
941 |
+
template <>
|
942 |
+
Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
|
943 |
+
__m256 a_lo, a_hi;
|
944 |
+
__m256 min_lo, min_hi;
|
945 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
946 |
+
cvtfp16_fp32(__m256i(min), min_lo, min_hi);
|
947 |
+
auto o1 = _mm256_max_ps(min_lo, a_lo);
|
948 |
+
auto o2 = _mm256_max_ps(min_hi, a_hi);
|
949 |
+
return cvtfp32_fp16(o1, o2);
|
950 |
+
}
|
951 |
+
|
952 |
+
template <>
|
953 |
+
inline void convert(const Half* src, Half* dst, int64_t n) {
|
954 |
+
int64_t i;
|
955 |
+
#pragma unroll
|
956 |
+
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
957 |
+
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
958 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
959 |
+
}
|
960 |
+
#pragma unroll
|
961 |
+
for (; i < n; i++) {
|
962 |
+
dst[i] = src[i];
|
963 |
+
}
|
964 |
+
}
|
965 |
+
|
966 |
+
template <>
|
967 |
+
inline void convert(const float* src, Half* dst, int64_t n) {
|
968 |
+
int64_t i;
|
969 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
970 |
+
__m256 a = _mm256_loadu_ps(&src[i]);
|
971 |
+
__m256 b = _mm256_loadu_ps(&src[i + 8]);
|
972 |
+
|
973 |
+
__m256i c = cvtfp32_fp16(a, b);
|
974 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
|
975 |
+
}
|
976 |
+
for (; i < n; i++) {
|
977 |
+
dst[i] = c10::convert<Half>(src[i]);
|
978 |
+
}
|
979 |
+
}
|
980 |
+
|
981 |
+
template <>
|
982 |
+
inline void convert(const double* src, Half* dst, int64_t n) {
|
983 |
+
auto load_float = [](const double *src) -> __m256 {
|
984 |
+
// Load one float vector from an array of doubles
|
985 |
+
__m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
|
986 |
+
__m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
|
987 |
+
return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
|
988 |
+
};
|
989 |
+
|
990 |
+
int64_t i;
|
991 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
992 |
+
__m256 a = load_float(&src[i]);
|
993 |
+
__m256 b = load_float(&src[i + 8]);
|
994 |
+
|
995 |
+
__m256i c = cvtfp32_fp16(a, b);
|
996 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
|
997 |
+
}
|
998 |
+
for (; i < n; i++) {
|
999 |
+
dst[i] = c10::convert<Half>(src[i]);
|
1000 |
+
}
|
1001 |
+
}
|
1002 |
+
|
1003 |
+
template <>
|
1004 |
+
Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
|
1005 |
+
const Vectorized<Half>& b, const Vectorized<Half>& c) {
|
1006 |
+
__m256 a_lo, a_hi;
|
1007 |
+
__m256 b_lo, b_hi;
|
1008 |
+
__m256 c_lo, c_hi;
|
1009 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
1010 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
1011 |
+
cvtfp16_fp32(__m256i(c), c_lo, c_hi);
|
1012 |
+
auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
|
1013 |
+
auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
|
1014 |
+
return cvtfp32_fp16(o1, o2);
|
1015 |
+
}
|
1016 |
+
|
1017 |
+
#define CONVERT_VECTORIZED_INIT(type, name) \
|
1018 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
1019 |
+
__m256 o1, o2; \
|
1020 |
+
cvt_to_fp32<type>(__m256i(a), o1, o2); \
|
1021 |
+
return std::make_tuple(o1, o2); \
|
1022 |
+
} \
|
1023 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
1024 |
+
return cvt_from_fp32<type>(__m256(a), __m256(b)); \
|
1025 |
+
}
|
1026 |
+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
|
1027 |
+
CONVERT_VECTORIZED_INIT(Half, half);
|
1028 |
+
|
1029 |
+
#else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
1030 |
+
|
1031 |
+
#define CONVERT_NON_VECTORIZED_INIT(type, name) \
|
1032 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
1033 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
1034 |
+
__at_align__ float arr[K]; \
|
1035 |
+
__at_align__ type arr2[K]; \
|
1036 |
+
a.store(arr2); \
|
1037 |
+
convert(arr2, arr, K); \
|
1038 |
+
return std::make_tuple( \
|
1039 |
+
Vectorized<float>::loadu(arr), \
|
1040 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
|
1041 |
+
} \
|
1042 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
1043 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
1044 |
+
__at_align__ float arr[K]; \
|
1045 |
+
__at_align__ type arr2[K]; \
|
1046 |
+
a.store(arr); \
|
1047 |
+
b.store(arr + Vectorized<float>::size()); \
|
1048 |
+
convert(arr, arr2, K); \
|
1049 |
+
return Vectorized<type>::loadu(arr2); \
|
1050 |
+
}
|
1051 |
+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
|
1052 |
+
CONVERT_NON_VECTORIZED_INIT(Half, half);
|
1053 |
+
|
1054 |
+
#endif // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
1055 |
+
|
1056 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
1057 |
+
#define LOAD_FP32_VECTORIZED_INIT(type, name) \
|
1058 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
1059 |
+
auto values = _mm_loadu_si128(reinterpret_cast<const __m128i*>(data)); \
|
1060 |
+
__m256 out_values; \
|
1061 |
+
cvt_to_fp32<type>(values, out_values); \
|
1062 |
+
out = out_values; \
|
1063 |
+
} \
|
1064 |
+
\
|
1065 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
1066 |
+
auto vec = Vectorized<type>::loadu(data); \
|
1067 |
+
__m256 out1_values, out2_values; \
|
1068 |
+
cvt_to_fp32<type>(vec, out1_values, out2_values); \
|
1069 |
+
out1 = out1_values; \
|
1070 |
+
out2 = out2_values; \
|
1071 |
+
}
|
1072 |
+
LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
|
1073 |
+
LOAD_FP32_VECTORIZED_INIT(Half, fp16);
|
1074 |
+
|
1075 |
+
#else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
1076 |
+
#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
|
1077 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
1078 |
+
__at_align__ float values[Vectorized<float>::size()]; \
|
1079 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) { \
|
1080 |
+
values[k] = data[k]; \
|
1081 |
+
} \
|
1082 |
+
out = Vectorized<float>::loadu(values); \
|
1083 |
+
} \
|
1084 |
+
\
|
1085 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
1086 |
+
load_fp32_from_##name(data, out1); \
|
1087 |
+
data += Vectorized<float>::size(); \
|
1088 |
+
load_fp32_from_##name(data, out2); \
|
1089 |
+
}
|
1090 |
+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
|
1091 |
+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
|
1092 |
+
|
1093 |
+
#endif
|
1094 |
+
}} // namsepace at::vec::CPU_CAPABILITY
|
1095 |
+
|
1096 |
+
#pragma GCC diagnostic pop
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h
ADDED
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
4 |
+
// See Note [Do not compile initializers with AVX]
|
5 |
+
|
6 |
+
#include <c10/util/complex.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
10 |
+
|
11 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
12 |
+
#include <sleef.h>
|
13 |
+
#endif
|
14 |
+
|
15 |
+
namespace at::vec {
|
16 |
+
// See Note [CPU_CAPABILITY namespace]
|
17 |
+
inline namespace CPU_CAPABILITY {
|
18 |
+
|
19 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
20 |
+
|
21 |
+
template <> class Vectorized<c10::complex<double>> {
|
22 |
+
private:
|
23 |
+
__m256d values;
|
24 |
+
public:
|
25 |
+
using value_type = c10::complex<double>;
|
26 |
+
using size_type = int;
|
27 |
+
static constexpr size_type size() {
|
28 |
+
return 2;
|
29 |
+
}
|
30 |
+
Vectorized() {}
|
31 |
+
Vectorized(__m256d v) : values(v) {}
|
32 |
+
Vectorized(c10::complex<double> val) {
|
33 |
+
double real_value = val.real();
|
34 |
+
double imag_value = val.imag();
|
35 |
+
values = _mm256_setr_pd(real_value, imag_value,
|
36 |
+
real_value, imag_value);
|
37 |
+
}
|
38 |
+
Vectorized(c10::complex<double> val1, c10::complex<double> val2) {
|
39 |
+
values = _mm256_setr_pd(val1.real(), val1.imag(),
|
40 |
+
val2.real(), val2.imag());
|
41 |
+
}
|
42 |
+
operator __m256d() const {
|
43 |
+
return values;
|
44 |
+
}
|
45 |
+
template <int64_t mask>
|
46 |
+
static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
47 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
48 |
+
static_assert (mask > -1 && mask < 4, "Unexpected mask value");
|
49 |
+
switch (mask) {
|
50 |
+
case 0:
|
51 |
+
return a;
|
52 |
+
case 1:
|
53 |
+
return _mm256_blend_pd(a.values, b.values, 0x03);
|
54 |
+
case 2:
|
55 |
+
return _mm256_blend_pd(a.values, b.values, 0x0c);
|
56 |
+
case 3: break;
|
57 |
+
}
|
58 |
+
return b;
|
59 |
+
}
|
60 |
+
static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
|
61 |
+
const Vectorized<c10::complex<double>>& mask) {
|
62 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
63 |
+
auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values);
|
64 |
+
return _mm256_blendv_pd(a.values, b.values, mask_);
|
65 |
+
|
66 |
+
}
|
67 |
+
template<typename step_t>
|
68 |
+
static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0., step_t step = static_cast<step_t>(1)) {
|
69 |
+
return Vectorized<c10::complex<double>>(base,
|
70 |
+
base + step);
|
71 |
+
}
|
72 |
+
static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
|
73 |
+
int64_t count = size()) {
|
74 |
+
switch (count) {
|
75 |
+
case 0:
|
76 |
+
return a;
|
77 |
+
case 1:
|
78 |
+
return blend<1>(a, b);
|
79 |
+
}
|
80 |
+
return b;
|
81 |
+
}
|
82 |
+
static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
|
83 |
+
if (count == size())
|
84 |
+
return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
|
85 |
+
|
86 |
+
__at_align__ double tmp_values[2*size()];
|
87 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
88 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
89 |
+
// instructions while a loop would be compiled to one instruction.
|
90 |
+
for (const auto i : c10::irange(2*size())) {
|
91 |
+
tmp_values[i] = 0.0;
|
92 |
+
}
|
93 |
+
std::memcpy(
|
94 |
+
tmp_values,
|
95 |
+
reinterpret_cast<const double*>(ptr),
|
96 |
+
count * sizeof(c10::complex<double>));
|
97 |
+
return _mm256_load_pd(tmp_values);
|
98 |
+
}
|
99 |
+
void store(void* ptr, int count = size()) const {
|
100 |
+
if (count == size()) {
|
101 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
|
102 |
+
} else if (count > 0) {
|
103 |
+
double tmp_values[2*size()];
|
104 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
|
105 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
|
106 |
+
}
|
107 |
+
}
|
108 |
+
const c10::complex<double>& operator[](int idx) const = delete;
|
109 |
+
c10::complex<double>& operator[](int idx) = delete;
|
110 |
+
Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
|
111 |
+
__at_align__ c10::complex<double> tmp[size()];
|
112 |
+
store(tmp);
|
113 |
+
for (const auto i : c10::irange(size())) {
|
114 |
+
tmp[i] = f(tmp[i]);
|
115 |
+
}
|
116 |
+
return loadu(tmp);
|
117 |
+
}
|
118 |
+
__m256d abs_2_() const {
|
119 |
+
auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
|
120 |
+
return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
|
121 |
+
}
|
122 |
+
__m256d abs_() const {
|
123 |
+
auto real = _mm256_movedup_pd(values); // real real
|
124 |
+
// movehdup_pd does not exist...
|
125 |
+
auto imag = _mm256_permute_pd(values, 0xf); // imag imag
|
126 |
+
return Sleef_hypotd4_u05(real, imag); // abs abs
|
127 |
+
}
|
128 |
+
Vectorized<c10::complex<double>> abs() const {
|
129 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
130 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
131 |
+
return _mm256_and_pd(abs_(), real_mask); // abs 0
|
132 |
+
}
|
133 |
+
__m256d angle_() const {
|
134 |
+
//angle = atan2(b/a)
|
135 |
+
auto b_a = _mm256_permute_pd(values, 0x05); // b a
|
136 |
+
return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
|
137 |
+
}
|
138 |
+
Vectorized<c10::complex<double>> angle() const {
|
139 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
140 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
141 |
+
auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle
|
142 |
+
return _mm256_and_pd(angle, real_mask); // angle 0
|
143 |
+
}
|
144 |
+
Vectorized<c10::complex<double>> sgn() const {
|
145 |
+
auto abs = abs_();
|
146 |
+
auto zero = _mm256_setzero_pd();
|
147 |
+
auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ);
|
148 |
+
auto div = values / abs;
|
149 |
+
return _mm256_blendv_pd(div, zero, mask);
|
150 |
+
}
|
151 |
+
__m256d real_() const {
|
152 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
153 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
154 |
+
return _mm256_and_pd(values, real_mask);
|
155 |
+
}
|
156 |
+
Vectorized<c10::complex<double>> real() const {
|
157 |
+
return real_();
|
158 |
+
}
|
159 |
+
__m256d imag_() const {
|
160 |
+
const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
161 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
|
162 |
+
return _mm256_and_pd(values, imag_mask);
|
163 |
+
}
|
164 |
+
Vectorized<c10::complex<double>> imag() const {
|
165 |
+
return _mm256_permute_pd(imag_(), 0x05); //b a
|
166 |
+
}
|
167 |
+
__m256d conj_() const {
|
168 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
169 |
+
return _mm256_xor_pd(values, sign_mask); // a -b
|
170 |
+
}
|
171 |
+
Vectorized<c10::complex<double>> conj() const {
|
172 |
+
return conj_();
|
173 |
+
}
|
174 |
+
Vectorized<c10::complex<double>> log() const {
|
175 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
176 |
+
return map(std::log);
|
177 |
+
}
|
178 |
+
Vectorized<c10::complex<double>> log2() const {
|
179 |
+
const __m256d log2_ = _mm256_set1_pd(std::log(2));
|
180 |
+
return _mm256_div_pd(log(), log2_);
|
181 |
+
}
|
182 |
+
Vectorized<c10::complex<double>> log10() const {
|
183 |
+
const __m256d log10_ = _mm256_set1_pd(std::log(10));
|
184 |
+
return _mm256_div_pd(log(), log10_);
|
185 |
+
}
|
186 |
+
Vectorized<c10::complex<double>> log1p() const {
|
187 |
+
return map(std::log1p);
|
188 |
+
}
|
189 |
+
Vectorized<c10::complex<double>> asin() const {
|
190 |
+
// asin(x)
|
191 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
192 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
193 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
194 |
+
const __m256d one = _mm256_set1_pd(1);
|
195 |
+
|
196 |
+
auto conj = conj_();
|
197 |
+
auto b_a = _mm256_permute_pd(conj, 0x05); //-b a
|
198 |
+
auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab
|
199 |
+
auto im = _mm256_add_pd(ab, ab); //-2ab -2ab
|
200 |
+
|
201 |
+
auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
|
202 |
+
auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a
|
203 |
+
re = _mm256_sub_pd(one, re);
|
204 |
+
|
205 |
+
auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im)
|
206 |
+
auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt())
|
207 |
+
return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln()
|
208 |
+
}
|
209 |
+
Vectorized<c10::complex<double>> acos() const {
|
210 |
+
// acos(x) = pi/2 - asin(x)
|
211 |
+
constexpr auto pi_2d = c10::pi<double> / 2;
|
212 |
+
const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0);
|
213 |
+
return _mm256_sub_pd(pi_2, asin());
|
214 |
+
}
|
215 |
+
Vectorized<c10::complex<double>> atan() const;
|
216 |
+
Vectorized<c10::complex<double>> atanh() const {
|
217 |
+
return map(std::atanh);
|
218 |
+
}
|
219 |
+
Vectorized<c10::complex<double>> exp() const {
|
220 |
+
//exp(a + bi)
|
221 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
222 |
+
auto exp = Sleef_expd4_u10(values); //exp(a) exp(b)
|
223 |
+
exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a)
|
224 |
+
|
225 |
+
auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
226 |
+
auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05),
|
227 |
+
sin_cos.x, 0x0A); //cos(b) sin(b)
|
228 |
+
return _mm256_mul_pd(exp, cos_sin);
|
229 |
+
}
|
230 |
+
Vectorized<c10::complex<double>> exp2() const {
|
231 |
+
// Use identity 2**x = exp(log(2) * x)
|
232 |
+
const __m256d ln_2 = _mm256_set1_pd(c10::ln_2<double>);
|
233 |
+
Vectorized<c10::complex<double>> scaled_values = _mm256_mul_pd(values, ln_2);
|
234 |
+
return scaled_values.exp();
|
235 |
+
}
|
236 |
+
Vectorized<c10::complex<double>> expm1() const {
|
237 |
+
return map(std::expm1);
|
238 |
+
}
|
239 |
+
Vectorized<c10::complex<double>> sin() const {
|
240 |
+
return map(std::sin);
|
241 |
+
}
|
242 |
+
Vectorized<c10::complex<double>> sinh() const {
|
243 |
+
return map(std::sinh);
|
244 |
+
}
|
245 |
+
Vectorized<c10::complex<double>> cos() const {
|
246 |
+
return map(std::cos);
|
247 |
+
}
|
248 |
+
Vectorized<c10::complex<double>> cosh() const {
|
249 |
+
return map(std::cosh);
|
250 |
+
}
|
251 |
+
Vectorized<c10::complex<double>> ceil() const {
|
252 |
+
return _mm256_ceil_pd(values);
|
253 |
+
}
|
254 |
+
Vectorized<c10::complex<double>> floor() const {
|
255 |
+
return _mm256_floor_pd(values);
|
256 |
+
}
|
257 |
+
Vectorized<c10::complex<double>> neg() const {
|
258 |
+
auto zero = _mm256_setzero_pd();
|
259 |
+
return _mm256_sub_pd(zero, values);
|
260 |
+
}
|
261 |
+
Vectorized<c10::complex<double>> round() const {
|
262 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
263 |
+
}
|
264 |
+
Vectorized<c10::complex<double>> tan() const {
|
265 |
+
return map(std::tan);
|
266 |
+
}
|
267 |
+
Vectorized<c10::complex<double>> tanh() const {
|
268 |
+
return map(std::tanh);
|
269 |
+
}
|
270 |
+
Vectorized<c10::complex<double>> trunc() const {
|
271 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
272 |
+
}
|
273 |
+
Vectorized<c10::complex<double>> sqrt() const {
|
274 |
+
return map(std::sqrt);
|
275 |
+
}
|
276 |
+
Vectorized<c10::complex<double>> reciprocal() const;
|
277 |
+
Vectorized<c10::complex<double>> rsqrt() const {
|
278 |
+
return sqrt().reciprocal();
|
279 |
+
}
|
280 |
+
Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
|
281 |
+
__at_align__ c10::complex<double> x_tmp[size()];
|
282 |
+
__at_align__ c10::complex<double> y_tmp[size()];
|
283 |
+
store(x_tmp);
|
284 |
+
exp.store(y_tmp);
|
285 |
+
for (const auto i : c10::irange(size())) {
|
286 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
287 |
+
}
|
288 |
+
return loadu(x_tmp);
|
289 |
+
}
|
290 |
+
// Comparison using the _CMP_**_OQ predicate.
|
291 |
+
// `O`: get false if an operand is NaN
|
292 |
+
// `Q`: do not raise if an operand is NaN
|
293 |
+
Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
|
294 |
+
return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
|
295 |
+
}
|
296 |
+
Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
|
297 |
+
return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
|
298 |
+
}
|
299 |
+
Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>&) const {
|
300 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
301 |
+
}
|
302 |
+
Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>&) const {
|
303 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
304 |
+
}
|
305 |
+
Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>&) const {
|
306 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
307 |
+
}
|
308 |
+
Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>&) const {
|
309 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
310 |
+
}
|
311 |
+
|
312 |
+
Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
|
313 |
+
Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
|
314 |
+
};
|
315 |
+
|
316 |
+
template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
317 |
+
return _mm256_add_pd(a, b);
|
318 |
+
}
|
319 |
+
|
320 |
+
template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
321 |
+
return _mm256_sub_pd(a, b);
|
322 |
+
}
|
323 |
+
|
324 |
+
template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
325 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
326 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
327 |
+
auto ac_bd = _mm256_mul_pd(a, b); //ac bd
|
328 |
+
|
329 |
+
auto d_c = _mm256_permute_pd(b, 0x05); //d c
|
330 |
+
d_c = _mm256_xor_pd(sign_mask, d_c); //d -c
|
331 |
+
auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc
|
332 |
+
|
333 |
+
auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
|
334 |
+
return ret;
|
335 |
+
}
|
336 |
+
|
337 |
+
template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
338 |
+
//re + im*i = (a + bi) / (c + di)
|
339 |
+
auto mask = _mm256_set1_pd(-0.f);
|
340 |
+
auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d|
|
341 |
+
auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c|
|
342 |
+
auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
343 |
+
auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc
|
344 |
+
auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc
|
345 |
+
auto acbd2 = _mm256_mul_pd(a2, b2);
|
346 |
+
|
347 |
+
const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0);
|
348 |
+
auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc
|
349 |
+
dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
|
350 |
+
auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
|
351 |
+
auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
352 |
+
|
353 |
+
// get the denominator
|
354 |
+
auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
355 |
+
res2 = _mm256_div_pd(res2, denom2);
|
356 |
+
return res2;
|
357 |
+
}
|
358 |
+
|
359 |
+
// reciprocal. Implement this here so we can use multiplication.
|
360 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
|
361 |
+
//re + im*i = (a + bi) / (c + di)
|
362 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
363 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
364 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
365 |
+
auto c_d = _mm256_xor_pd(sign_mask, values); //c -d
|
366 |
+
return _mm256_div_pd(c_d, abs_2_());
|
367 |
+
}
|
368 |
+
|
369 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
|
370 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
371 |
+
const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0);
|
372 |
+
const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5);
|
373 |
+
|
374 |
+
auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b
|
375 |
+
auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b
|
376 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
377 |
+
return i_half*ln; // i/2*ln()
|
378 |
+
}
|
379 |
+
|
380 |
+
template <>
|
381 |
+
Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
382 |
+
auto abs_a = a.abs_2_();
|
383 |
+
auto abs_b = b.abs_2_();
|
384 |
+
auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ);
|
385 |
+
auto max = _mm256_blendv_pd(a, b, mask);
|
386 |
+
// Exploit the fact that all-ones is a NaN.
|
387 |
+
auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
|
388 |
+
return _mm256_or_pd(max, isnan);
|
389 |
+
}
|
390 |
+
|
391 |
+
template <>
|
392 |
+
Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
393 |
+
auto abs_a = a.abs_2_();
|
394 |
+
auto abs_b = b.abs_2_();
|
395 |
+
auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ);
|
396 |
+
auto min = _mm256_blendv_pd(a, b, mask);
|
397 |
+
// Exploit the fact that all-ones is a NaN.
|
398 |
+
auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
|
399 |
+
return _mm256_or_pd(min, isnan);
|
400 |
+
}
|
401 |
+
|
402 |
+
template <>
|
403 |
+
Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
404 |
+
return _mm256_and_pd(a, b);
|
405 |
+
}
|
406 |
+
|
407 |
+
template <>
|
408 |
+
Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
409 |
+
return _mm256_or_pd(a, b);
|
410 |
+
}
|
411 |
+
|
412 |
+
template <>
|
413 |
+
Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
414 |
+
return _mm256_xor_pd(a, b);
|
415 |
+
}
|
416 |
+
|
417 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
|
418 |
+
auto eq = (*this == other); // compares real and imag individually
|
419 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
420 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
|
421 |
+
}
|
422 |
+
|
423 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
|
424 |
+
auto ne = (*this != other); // compares real and imag individually
|
425 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
426 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
|
427 |
+
}
|
428 |
+
|
429 |
+
#endif
|
430 |
+
|
431 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
4 |
+
// See Note [Do not compile initializers with AVX]
|
5 |
+
|
6 |
+
#include <c10/util/complex.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
10 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
11 |
+
#include <sleef.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
namespace at::vec {
|
15 |
+
// See Note [CPU_CAPABILITY namespace]
|
16 |
+
inline namespace CPU_CAPABILITY {
|
17 |
+
|
18 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
19 |
+
|
20 |
+
template <> class Vectorized<c10::complex<float>> {
|
21 |
+
private:
|
22 |
+
__m256 values;
|
23 |
+
public:
|
24 |
+
using value_type = c10::complex<float>;
|
25 |
+
using size_type = int;
|
26 |
+
static constexpr size_type size() {
|
27 |
+
return 4;
|
28 |
+
}
|
29 |
+
Vectorized() {}
|
30 |
+
Vectorized(__m256 v) : values(v) {}
|
31 |
+
Vectorized(c10::complex<float> val) {
|
32 |
+
float real_value = val.real();
|
33 |
+
float imag_value = val.imag();
|
34 |
+
values = _mm256_setr_ps(real_value, imag_value,
|
35 |
+
real_value, imag_value,
|
36 |
+
real_value, imag_value,
|
37 |
+
real_value, imag_value
|
38 |
+
);
|
39 |
+
}
|
40 |
+
Vectorized(c10::complex<float> val1, c10::complex<float> val2, c10::complex<float> val3, c10::complex<float> val4) {
|
41 |
+
values = _mm256_setr_ps(val1.real(), val1.imag(),
|
42 |
+
val2.real(), val2.imag(),
|
43 |
+
val3.real(), val3.imag(),
|
44 |
+
val4.real(), val4.imag()
|
45 |
+
);
|
46 |
+
}
|
47 |
+
operator __m256() const {
|
48 |
+
return values;
|
49 |
+
}
|
50 |
+
template <int64_t mask>
|
51 |
+
static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
52 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
53 |
+
static_assert(mask > -1 && mask < 16, "Unexpected mask range");
|
54 |
+
switch (mask) {
|
55 |
+
case 0:
|
56 |
+
return a;
|
57 |
+
case 1:
|
58 |
+
return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011
|
59 |
+
case 2:
|
60 |
+
return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100
|
61 |
+
case 3:
|
62 |
+
return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111
|
63 |
+
case 4:
|
64 |
+
return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000
|
65 |
+
case 5:
|
66 |
+
return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011
|
67 |
+
case 6:
|
68 |
+
return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100
|
69 |
+
case 7:
|
70 |
+
return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111
|
71 |
+
case 8:
|
72 |
+
return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000
|
73 |
+
case 9:
|
74 |
+
return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011
|
75 |
+
case 10:
|
76 |
+
return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100
|
77 |
+
case 11:
|
78 |
+
return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111
|
79 |
+
case 12:
|
80 |
+
return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000
|
81 |
+
case 13:
|
82 |
+
return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011
|
83 |
+
case 14:
|
84 |
+
return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100
|
85 |
+
default: break;
|
86 |
+
}
|
87 |
+
return b;
|
88 |
+
}
|
89 |
+
static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
|
90 |
+
const Vectorized<c10::complex<float>>& mask) {
|
91 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
92 |
+
auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values);
|
93 |
+
return _mm256_blendv_ps(a.values, b.values, mask_);
|
94 |
+
|
95 |
+
}
|
96 |
+
template<typename step_t>
|
97 |
+
static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0., step_t step = static_cast<step_t>(1)) {
|
98 |
+
return Vectorized<c10::complex<float>>(base,
|
99 |
+
base + step,
|
100 |
+
base + c10::complex<float>(2)*step,
|
101 |
+
base + c10::complex<float>(3)*step);
|
102 |
+
}
|
103 |
+
static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
|
104 |
+
int64_t count = size()) {
|
105 |
+
switch (count) {
|
106 |
+
case 0:
|
107 |
+
return a;
|
108 |
+
case 1:
|
109 |
+
return blend<1>(a, b);
|
110 |
+
case 2:
|
111 |
+
return blend<3>(a, b);
|
112 |
+
case 3:
|
113 |
+
return blend<7>(a, b);
|
114 |
+
}
|
115 |
+
return b;
|
116 |
+
}
|
117 |
+
static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
|
118 |
+
if (count == size())
|
119 |
+
return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
|
120 |
+
|
121 |
+
__at_align__ float tmp_values[2*size()];
|
122 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
123 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
124 |
+
// instructions while a loop would be compiled to one instruction.
|
125 |
+
for (const auto i : c10::irange(2*size())) {
|
126 |
+
tmp_values[i] = 0.0;
|
127 |
+
}
|
128 |
+
std::memcpy(
|
129 |
+
tmp_values,
|
130 |
+
reinterpret_cast<const float*>(ptr),
|
131 |
+
count * sizeof(c10::complex<float>));
|
132 |
+
return _mm256_load_ps(tmp_values);
|
133 |
+
}
|
134 |
+
void store(void* ptr, int count = size()) const {
|
135 |
+
if (count == size()) {
|
136 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
|
137 |
+
} else if (count > 0) {
|
138 |
+
float tmp_values[2*size()];
|
139 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
|
140 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
|
141 |
+
}
|
142 |
+
}
|
143 |
+
const c10::complex<float>& operator[](int idx) const = delete;
|
144 |
+
c10::complex<float>& operator[](int idx) = delete;
|
145 |
+
Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
|
146 |
+
__at_align__ c10::complex<float> tmp[size()];
|
147 |
+
store(tmp);
|
148 |
+
for (const auto i : c10::irange(size())) {
|
149 |
+
tmp[i] = f(tmp[i]);
|
150 |
+
}
|
151 |
+
return loadu(tmp);
|
152 |
+
}
|
153 |
+
__m256 abs_2_() const {
|
154 |
+
auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
|
155 |
+
auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
|
156 |
+
return _mm256_permute_ps(ret, 0xD8);
|
157 |
+
}
|
158 |
+
__m256 abs_() const {
|
159 |
+
auto real = _mm256_moveldup_ps(values); // real real
|
160 |
+
auto imag = _mm256_movehdup_ps(values); // imag imag
|
161 |
+
return Sleef_hypotf8_u05(real, imag); // abs abs
|
162 |
+
}
|
163 |
+
Vectorized<c10::complex<float>> abs() const {
|
164 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
165 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
166 |
+
return _mm256_and_ps(abs_(), real_mask); // abs 0
|
167 |
+
}
|
168 |
+
__m256 angle_() const {
|
169 |
+
//angle = atan2(b/a)
|
170 |
+
auto b_a = _mm256_permute_ps(values, 0xB1); // b a
|
171 |
+
return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
|
172 |
+
}
|
173 |
+
Vectorized<c10::complex<float>> angle() const {
|
174 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
175 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
176 |
+
auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle
|
177 |
+
return _mm256_and_ps(angle, real_mask); // angle 0
|
178 |
+
}
|
179 |
+
Vectorized<c10::complex<float>> sgn() const {
|
180 |
+
auto abs = abs_();
|
181 |
+
auto zero = _mm256_setzero_ps();
|
182 |
+
auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ);
|
183 |
+
auto div = values / abs;
|
184 |
+
return _mm256_blendv_ps(div, zero, mask);
|
185 |
+
}
|
186 |
+
__m256 real_() const {
|
187 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
188 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
189 |
+
return _mm256_and_ps(values, real_mask);
|
190 |
+
}
|
191 |
+
Vectorized<c10::complex<float>> real() const {
|
192 |
+
return real_();
|
193 |
+
}
|
194 |
+
__m256 imag_() const {
|
195 |
+
const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
|
196 |
+
0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
|
197 |
+
return _mm256_and_ps(values, imag_mask);
|
198 |
+
}
|
199 |
+
Vectorized<c10::complex<float>> imag() const {
|
200 |
+
return _mm256_permute_ps(imag_(), 0xB1); //b a
|
201 |
+
}
|
202 |
+
__m256 conj_() const {
|
203 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
204 |
+
return _mm256_xor_ps(values, sign_mask); // a -b
|
205 |
+
}
|
206 |
+
Vectorized<c10::complex<float>> conj() const {
|
207 |
+
return conj_();
|
208 |
+
}
|
209 |
+
Vectorized<c10::complex<float>> log() const {
|
210 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
211 |
+
return map(std::log);
|
212 |
+
}
|
213 |
+
Vectorized<c10::complex<float>> log2() const {
|
214 |
+
const __m256 log2_ = _mm256_set1_ps(std::log(2));
|
215 |
+
return _mm256_div_ps(log(), log2_);
|
216 |
+
}
|
217 |
+
Vectorized<c10::complex<float>> log10() const {
|
218 |
+
const __m256 log10_ = _mm256_set1_ps(std::log(10));
|
219 |
+
return _mm256_div_ps(log(), log10_);
|
220 |
+
}
|
221 |
+
Vectorized<c10::complex<float>> log1p() const {
|
222 |
+
return map(std::log1p);
|
223 |
+
}
|
224 |
+
Vectorized<c10::complex<float>> asin() const {
|
225 |
+
// asin(x)
|
226 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
227 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
228 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
229 |
+
const __m256 one = _mm256_set1_ps(1);
|
230 |
+
|
231 |
+
auto conj = conj_();
|
232 |
+
auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a
|
233 |
+
auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab
|
234 |
+
auto im = _mm256_add_ps(ab, ab); //-2ab -2ab
|
235 |
+
|
236 |
+
auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
|
237 |
+
auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
|
238 |
+
re = _mm256_permute_ps(re, 0xD8);
|
239 |
+
re = _mm256_sub_ps(one, re);
|
240 |
+
|
241 |
+
auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im)
|
242 |
+
auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt())
|
243 |
+
return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
|
244 |
+
}
|
245 |
+
Vectorized<c10::complex<float>> acos() const {
|
246 |
+
return map(std::acos);
|
247 |
+
}
|
248 |
+
Vectorized<c10::complex<float>> atan() const;
|
249 |
+
Vectorized<c10::complex<float>> atanh() const {
|
250 |
+
return map(std::atanh);
|
251 |
+
}
|
252 |
+
Vectorized<c10::complex<float>> exp() const {
|
253 |
+
//exp(a + bi)
|
254 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
255 |
+
auto exp = Sleef_expf8_u10(values); //exp(a) exp(b)
|
256 |
+
exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a)
|
257 |
+
|
258 |
+
auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
259 |
+
auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1),
|
260 |
+
sin_cos.x, 0xAA); //cos(b) sin(b)
|
261 |
+
return _mm256_mul_ps(exp, cos_sin);
|
262 |
+
}
|
263 |
+
Vectorized<c10::complex<float>> exp2() const {
|
264 |
+
// Use identity 2**x = exp(log(2) * x)
|
265 |
+
const __m256 ln_2 = _mm256_set1_ps(c10::ln_2<float>);
|
266 |
+
Vectorized<c10::complex<float>> scaled_values = _mm256_mul_ps(values, ln_2);
|
267 |
+
return scaled_values.exp();
|
268 |
+
}
|
269 |
+
Vectorized<c10::complex<float>> expm1() const {
|
270 |
+
return map(std::expm1);
|
271 |
+
}
|
272 |
+
Vectorized<c10::complex<float>> sin() const {
|
273 |
+
return map(std::sin);
|
274 |
+
}
|
275 |
+
Vectorized<c10::complex<float>> sinh() const {
|
276 |
+
return map(std::sinh);
|
277 |
+
}
|
278 |
+
Vectorized<c10::complex<float>> cos() const {
|
279 |
+
return map(std::cos);
|
280 |
+
}
|
281 |
+
Vectorized<c10::complex<float>> cosh() const {
|
282 |
+
return map(std::cosh);
|
283 |
+
}
|
284 |
+
Vectorized<c10::complex<float>> ceil() const {
|
285 |
+
return _mm256_ceil_ps(values);
|
286 |
+
}
|
287 |
+
Vectorized<c10::complex<float>> floor() const {
|
288 |
+
return _mm256_floor_ps(values);
|
289 |
+
}
|
290 |
+
Vectorized<c10::complex<float>> neg() const {
|
291 |
+
auto zero = _mm256_setzero_ps();
|
292 |
+
return _mm256_sub_ps(zero, values);
|
293 |
+
}
|
294 |
+
Vectorized<c10::complex<float>> round() const {
|
295 |
+
return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
296 |
+
}
|
297 |
+
Vectorized<c10::complex<float>> tan() const {
|
298 |
+
return map(std::tan);
|
299 |
+
}
|
300 |
+
Vectorized<c10::complex<float>> tanh() const {
|
301 |
+
return map(std::tanh);
|
302 |
+
}
|
303 |
+
Vectorized<c10::complex<float>> trunc() const {
|
304 |
+
return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
305 |
+
}
|
306 |
+
Vectorized<c10::complex<float>> sqrt() const {
|
307 |
+
return map(std::sqrt);
|
308 |
+
}
|
309 |
+
Vectorized<c10::complex<float>> reciprocal() const;
|
310 |
+
Vectorized<c10::complex<float>> rsqrt() const {
|
311 |
+
return sqrt().reciprocal();
|
312 |
+
}
|
313 |
+
Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
|
314 |
+
__at_align__ c10::complex<float> x_tmp[size()];
|
315 |
+
__at_align__ c10::complex<float> y_tmp[size()];
|
316 |
+
store(x_tmp);
|
317 |
+
exp.store(y_tmp);
|
318 |
+
for (const auto i : c10::irange(size())) {
|
319 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
320 |
+
}
|
321 |
+
return loadu(x_tmp);
|
322 |
+
}
|
323 |
+
// Comparison using the _CMP_**_OQ predicate.
|
324 |
+
// `O`: get false if an operand is NaN
|
325 |
+
// `Q`: do not raise if an operand is NaN
|
326 |
+
Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
|
327 |
+
return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
|
328 |
+
}
|
329 |
+
Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
|
330 |
+
return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
|
331 |
+
}
|
332 |
+
Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& /*other*/) const {
|
333 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
334 |
+
}
|
335 |
+
Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& /*other*/) const {
|
336 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
337 |
+
}
|
338 |
+
Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& /*other*/) const {
|
339 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
340 |
+
}
|
341 |
+
Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& /*other*/) const {
|
342 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
343 |
+
}
|
344 |
+
|
345 |
+
Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
|
346 |
+
Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
|
347 |
+
};
|
348 |
+
|
349 |
+
template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
350 |
+
return _mm256_add_ps(a, b);
|
351 |
+
}
|
352 |
+
|
353 |
+
template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
354 |
+
return _mm256_sub_ps(a, b);
|
355 |
+
}
|
356 |
+
|
357 |
+
template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
358 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
359 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
360 |
+
auto ac_bd = _mm256_mul_ps(a, b); //ac bd
|
361 |
+
|
362 |
+
auto d_c = _mm256_permute_ps(b, 0xB1); //d c
|
363 |
+
d_c = _mm256_xor_ps(sign_mask, d_c); //d -c
|
364 |
+
auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc
|
365 |
+
|
366 |
+
auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
|
367 |
+
ret = _mm256_permute_ps(ret, 0xD8);
|
368 |
+
return ret;
|
369 |
+
}
|
370 |
+
|
371 |
+
template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
372 |
+
//re + im*i = (a + bi) / (c + di)
|
373 |
+
auto mask = _mm256_set1_ps(-0.f);
|
374 |
+
auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d|
|
375 |
+
auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c|
|
376 |
+
auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
377 |
+
auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc
|
378 |
+
auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc
|
379 |
+
auto acbd2 = _mm256_mul_ps(a2, b2);
|
380 |
+
|
381 |
+
const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
|
382 |
+
auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc
|
383 |
+
dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
|
384 |
+
auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
|
385 |
+
auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
386 |
+
res2 = _mm256_permute_ps(res2, 0xD8);
|
387 |
+
|
388 |
+
// get the denominator
|
389 |
+
auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
390 |
+
res2 = _mm256_div_ps(res2, denom2);
|
391 |
+
return res2;
|
392 |
+
}
|
393 |
+
|
394 |
+
// reciprocal. Implement this here so we can use multiplication.
|
395 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
|
396 |
+
//re + im*i = (a + bi) / (c + di)
|
397 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
398 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
399 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
400 |
+
auto c_d = _mm256_xor_ps(sign_mask, values); //c -d
|
401 |
+
return _mm256_div_ps(c_d, abs_2_());
|
402 |
+
}
|
403 |
+
|
404 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
|
405 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
406 |
+
const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
|
407 |
+
const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
|
408 |
+
|
409 |
+
auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b
|
410 |
+
auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b
|
411 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
412 |
+
return i_half*ln; // i/2*ln()
|
413 |
+
}
|
414 |
+
|
415 |
+
template <>
|
416 |
+
Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
417 |
+
auto abs_a = a.abs_2_();
|
418 |
+
auto abs_b = b.abs_2_();
|
419 |
+
auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
|
420 |
+
auto max = _mm256_blendv_ps(a, b, mask);
|
421 |
+
// Exploit the fact that all-ones is a NaN.
|
422 |
+
auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
423 |
+
return _mm256_or_ps(max, isnan);
|
424 |
+
}
|
425 |
+
|
426 |
+
template <>
|
427 |
+
Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
428 |
+
auto abs_a = a.abs_2_();
|
429 |
+
auto abs_b = b.abs_2_();
|
430 |
+
auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
|
431 |
+
auto min = _mm256_blendv_ps(a, b, mask);
|
432 |
+
// Exploit the fact that all-ones is a NaN.
|
433 |
+
auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
434 |
+
return _mm256_or_ps(min, isnan);
|
435 |
+
}
|
436 |
+
|
437 |
+
template <>
|
438 |
+
Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
439 |
+
return _mm256_and_ps(a, b);
|
440 |
+
}
|
441 |
+
|
442 |
+
template <>
|
443 |
+
Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
444 |
+
return _mm256_or_ps(a, b);
|
445 |
+
}
|
446 |
+
|
447 |
+
template <>
|
448 |
+
Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
449 |
+
return _mm256_xor_ps(a, b);
|
450 |
+
}
|
451 |
+
|
452 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
|
453 |
+
const Vectorized<c10::complex<float>>& other) const {
|
454 |
+
auto eq = (*this == other); // compares real and imag individually
|
455 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
456 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
|
457 |
+
}
|
458 |
+
|
459 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
|
460 |
+
const Vectorized<c10::complex<float>>& other) const {
|
461 |
+
auto ne = (*this != other); // compares real and imag individually
|
462 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
463 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
|
464 |
+
}
|
465 |
+
|
466 |
+
#endif
|
467 |
+
|
468 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
5 |
+
#include <ATen/cpu/vec/vec_base.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
namespace vec {
|
10 |
+
// See Note [CPU_CAPABILITY namespace]
|
11 |
+
inline namespace CPU_CAPABILITY {
|
12 |
+
|
13 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float(
|
14 |
+
const Vectorized<BFloat16>& a) {
|
15 |
+
constexpr int64_t K = Vectorized<BFloat16>::size();
|
16 |
+
__at_align__ float arr[K];
|
17 |
+
__at_align__ BFloat16 arr2[K];
|
18 |
+
a.store(arr2);
|
19 |
+
convert(arr2, arr, K);
|
20 |
+
return std::make_tuple(
|
21 |
+
Vectorized<float>::loadu(arr),
|
22 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size()));
|
23 |
+
}
|
24 |
+
|
25 |
+
inline Vectorized<BFloat16> convert_float_bfloat16(
|
26 |
+
const Vectorized<float>& a,
|
27 |
+
const Vectorized<float>& b) {
|
28 |
+
constexpr int64_t K = Vectorized<BFloat16>::size();
|
29 |
+
__at_align__ float arr[K];
|
30 |
+
__at_align__ BFloat16 arr2[K];
|
31 |
+
a.store(arr);
|
32 |
+
b.store(arr + Vectorized<float>::size());
|
33 |
+
convert(arr, arr2, K);
|
34 |
+
return Vectorized<BFloat16>::loadu(arr2);
|
35 |
+
}
|
36 |
+
|
37 |
+
inline void load_fp32_from_bf16(const c10::BFloat16* data, Vectorized<float>& out) {
|
38 |
+
__at_align__ float values[Vectorized<float>::size()];
|
39 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) {
|
40 |
+
values[k] = data[k];
|
41 |
+
}
|
42 |
+
out = Vectorized<float>::loadu(values);
|
43 |
+
}
|
44 |
+
|
45 |
+
inline void load_fp32_from_bf16(
|
46 |
+
const c10::BFloat16* data,
|
47 |
+
Vectorized<float>& out1,
|
48 |
+
Vectorized<float>& out2) {
|
49 |
+
load_fp32_from_bf16(data, out1);
|
50 |
+
data += Vectorized<float>::size();
|
51 |
+
load_fp32_from_bf16(data, out2);
|
52 |
+
}
|
53 |
+
|
54 |
+
inline void load_fp32_from_fp16(const c10::Half* data, Vectorized<float>& out) {
|
55 |
+
__at_align__ float values[Vectorized<float>::size()];
|
56 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) {
|
57 |
+
values[k] = data[k];
|
58 |
+
}
|
59 |
+
out = Vectorized<float>::loadu(values);
|
60 |
+
}
|
61 |
+
|
62 |
+
inline void load_fp32_from_fp16(
|
63 |
+
const c10::Half* data,
|
64 |
+
Vectorized<float>& out1,
|
65 |
+
Vectorized<float>& out2) {
|
66 |
+
load_fp32_from_fp16(data, out1);
|
67 |
+
data += Vectorized<float>::size();
|
68 |
+
load_fp32_from_fp16(data, out2);
|
69 |
+
}
|
70 |
+
|
71 |
+
} // namespace
|
72 |
+
} // namespace vec
|
73 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
|
7 |
+
// Note: header order is important here
|
8 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h>
|
9 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h>
|
10 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h>
|
11 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h>
|
12 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h>
|
13 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h>
|
14 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h>
|
15 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h>
|
16 |
+
|
17 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h>
|
18 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h>
|
19 |
+
|
20 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
namespace vec {
|
24 |
+
|
25 |
+
inline namespace CPU_CAPABILITY {
|
26 |
+
|
27 |
+
DEFINE_CLAMP_FUNCS(c10::quint8)
|
28 |
+
DEFINE_CLAMP_FUNCS(c10::qint8)
|
29 |
+
DEFINE_CLAMP_FUNCS(c10::qint32)
|
30 |
+
DEFINE_CLAMP_FUNCS(int16_t)
|
31 |
+
DEFINE_CLAMP_FUNCS(int32_t)
|
32 |
+
DEFINE_CLAMP_FUNCS(int64_t)
|
33 |
+
DEFINE_CLAMP_FUNCS(float)
|
34 |
+
DEFINE_CLAMP_FUNCS(double)
|
35 |
+
|
36 |
+
template <>
|
37 |
+
Vectorized<double> C10_ALWAYS_INLINE fmadd(
|
38 |
+
const Vectorized<double>& a,
|
39 |
+
const Vectorized<double>& b,
|
40 |
+
const Vectorized<double>& c) {
|
41 |
+
return Vectorized<double>{
|
42 |
+
vec_madd(a.vec0(), b.vec0(), c.vec0()),
|
43 |
+
vec_madd(a.vec1(), b.vec1(), c.vec1())};
|
44 |
+
}
|
45 |
+
|
46 |
+
template <>
|
47 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
|
48 |
+
const Vectorized<int64_t>& a,
|
49 |
+
const Vectorized<int64_t>& b,
|
50 |
+
const Vectorized<int64_t>& c) {
|
51 |
+
return Vectorized<int64_t>{
|
52 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
53 |
+
}
|
54 |
+
template <>
|
55 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
|
56 |
+
const Vectorized<int32_t>& a,
|
57 |
+
const Vectorized<int32_t>& b,
|
58 |
+
const Vectorized<int32_t>& c) {
|
59 |
+
return Vectorized<int32_t>{
|
60 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
61 |
+
}
|
62 |
+
template <>
|
63 |
+
Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
|
64 |
+
const Vectorized<int16_t>& a,
|
65 |
+
const Vectorized<int16_t>& b,
|
66 |
+
const Vectorized<int16_t>& c) {
|
67 |
+
return Vectorized<int16_t>{
|
68 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
69 |
+
}
|
70 |
+
|
71 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
|
72 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
|
73 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
|
74 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
|
75 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
|
76 |
+
|
77 |
+
template <>
|
78 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE
|
79 |
+
convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
|
80 |
+
return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
81 |
+
}
|
82 |
+
|
83 |
+
template <>
|
84 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE
|
85 |
+
convert_to_int_of_same_size<float>(
|
86 |
+
const Vectorized<float>& src) {
|
87 |
+
return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
88 |
+
}
|
89 |
+
|
90 |
+
template <>
|
91 |
+
inline void convert(const int32_t* src, float* dst, int64_t n) {
|
92 |
+
// int32_t and float have same size
|
93 |
+
int64_t i;
|
94 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
95 |
+
const int32_t* src_a = src + i;
|
96 |
+
float* dst_a = dst + i;
|
97 |
+
vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a));
|
98 |
+
vint32 input_vec1 =
|
99 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a));
|
100 |
+
vfloat32 c0 = vec_float(input_vec0);
|
101 |
+
vfloat32 c1 = vec_float(input_vec1);
|
102 |
+
vec_vsx_st(c0, offset0, dst_a);
|
103 |
+
vec_vsx_st(c1, offset16, dst_a);
|
104 |
+
}
|
105 |
+
|
106 |
+
for (; i < n; i++) {
|
107 |
+
dst[i] = static_cast<float>(src[i]);
|
108 |
+
}
|
109 |
+
}
|
110 |
+
|
111 |
+
template <>
|
112 |
+
inline void convert(const int64_t* src, double* dst, int64_t n) {
|
113 |
+
int64_t i;
|
114 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
115 |
+
const int64_t* src_a = src + i;
|
116 |
+
double* dst_a = dst + i;
|
117 |
+
vint64 input_vec0 =
|
118 |
+
vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a));
|
119 |
+
vint64 input_vec1 =
|
120 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a));
|
121 |
+
vfloat64 c0 = vec_double(input_vec0);
|
122 |
+
vfloat64 c1 = vec_double(input_vec1);
|
123 |
+
vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a));
|
124 |
+
vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a));
|
125 |
+
}
|
126 |
+
for (; i < n; i++) {
|
127 |
+
dst[i] = static_cast<double>(src[i]);
|
128 |
+
}
|
129 |
+
}
|
130 |
+
//Generic implementation to fix compiler error
|
131 |
+
//TO-DO : Add optimized version for ppc64
|
132 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(
|
133 |
+
const Vectorized<Half>& a) {
|
134 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
135 |
+
__at_align__ float arr[K];
|
136 |
+
__at_align__ Half arr2[K];
|
137 |
+
a.store(arr2);
|
138 |
+
convert(arr2, arr, K);
|
139 |
+
return std::make_tuple(
|
140 |
+
Vectorized<float>::loadu(arr),
|
141 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size()));
|
142 |
+
}
|
143 |
+
|
144 |
+
inline Vectorized<Half> convert_float_half(
|
145 |
+
const Vectorized<float>& a, const Vectorized<float>& b) {
|
146 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
147 |
+
__at_align__ float arr[K];
|
148 |
+
__at_align__ Half arr2[K];
|
149 |
+
a.store(arr);
|
150 |
+
b.store(arr + Vectorized<float>::size());
|
151 |
+
convert(arr, arr2, K);
|
152 |
+
return Vectorized<Half>::loadu(arr2);
|
153 |
+
};
|
154 |
+
|
155 |
+
template <>
|
156 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
|
157 |
+
const Vectorized<double>& a,
|
158 |
+
const Vectorized<double>& b) {
|
159 |
+
// inputs:
|
160 |
+
// a = {a0, a1, a2, a3}
|
161 |
+
// b = {b0, b1, b2, b3}
|
162 |
+
|
163 |
+
vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0);
|
164 |
+
vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3);
|
165 |
+
vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0);
|
166 |
+
vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3);
|
167 |
+
// return {a0, b0, a1, b1}
|
168 |
+
// {a2, b2, a3, b3}
|
169 |
+
return std::make_pair(
|
170 |
+
Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11});
|
171 |
+
}
|
172 |
+
|
173 |
+
template <>
|
174 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
|
175 |
+
const Vectorized<double>& a,
|
176 |
+
const Vectorized<double>& b) {
|
177 |
+
// inputs:
|
178 |
+
// a = {a0, b0, a1, b1}
|
179 |
+
// b = {a2, b2, a3, b3}
|
180 |
+
vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0);
|
181 |
+
vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0);
|
182 |
+
|
183 |
+
vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3);
|
184 |
+
vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3);
|
185 |
+
|
186 |
+
// swap lanes:
|
187 |
+
// return {a0, a1, a2, a3}
|
188 |
+
// {b0, b1, b2, b3}
|
189 |
+
return std::make_pair(
|
190 |
+
Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23});
|
191 |
+
}
|
192 |
+
|
193 |
+
template <>
|
194 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
|
195 |
+
const Vectorized<float>& a,
|
196 |
+
const Vectorized<float>& b) {
|
197 |
+
// inputs:
|
198 |
+
// a = {a0, a1, a2, a3,, a4, a5, a6, a7}
|
199 |
+
// b = {b0, b1, b2, b3,, b4, b5, b6, b7}
|
200 |
+
|
201 |
+
vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0());
|
202 |
+
vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0());
|
203 |
+
|
204 |
+
vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
|
205 |
+
vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1());
|
206 |
+
// group cols crossing lanes:
|
207 |
+
// return {a0, b0, a1, b1,, a2, b2, a3, b3}
|
208 |
+
// {a4, b4, a5, b5,, a6, b6, a7, b7}
|
209 |
+
|
210 |
+
return std::make_pair(
|
211 |
+
Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233});
|
212 |
+
}
|
213 |
+
|
214 |
+
template <>
|
215 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
|
216 |
+
const Vectorized<float>& a,
|
217 |
+
const Vectorized<float>& b) {
|
218 |
+
// inputs:
|
219 |
+
// a = {a0, b0, a1, b1,, a2, b2, a3, b3}
|
220 |
+
// b = {a4, b4, a5, b5,, a6, b6, a7, b7}
|
221 |
+
|
222 |
+
// {a0,a2,b0,b2} {a1,a3,b1,b3}
|
223 |
+
vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
|
224 |
+
vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
|
225 |
+
|
226 |
+
vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
|
227 |
+
vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
|
228 |
+
|
229 |
+
vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
|
230 |
+
vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
|
231 |
+
|
232 |
+
vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
|
233 |
+
vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
|
234 |
+
|
235 |
+
// it could be done with vec_perm ,too
|
236 |
+
// swap lanes:
|
237 |
+
// return {a0, a1, a2, a3,, a4, a5, a6, a7}
|
238 |
+
// {b0, b1, b2, b3,, b4, b5, b6, b7}
|
239 |
+
|
240 |
+
return std::make_pair(
|
241 |
+
Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2});
|
242 |
+
}
|
243 |
+
|
244 |
+
} // namespace
|
245 |
+
} // namespace vec
|
246 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h
ADDED
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
3 |
+
#include <ATen/cpu/vec/vec_base.h>
|
4 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
5 |
+
#include <c10/util/complex.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
namespace vec {
|
10 |
+
// See Note [CPU_CAPABILITY namespace]
|
11 |
+
inline namespace CPU_CAPABILITY {
|
12 |
+
using ComplexDbl = c10::complex<double>;
|
13 |
+
|
14 |
+
template <>
|
15 |
+
class Vectorized<ComplexDbl> {
|
16 |
+
union {
|
17 |
+
struct {
|
18 |
+
vfloat64 _vec0;
|
19 |
+
vfloat64 _vec1;
|
20 |
+
};
|
21 |
+
struct {
|
22 |
+
vbool64 _vecb0;
|
23 |
+
vbool64 _vecb1;
|
24 |
+
};
|
25 |
+
|
26 |
+
} __attribute__((__may_alias__));
|
27 |
+
|
28 |
+
public:
|
29 |
+
using value_type = ComplexDbl;
|
30 |
+
using vec_internal_type = vfloat64;
|
31 |
+
using vec_internal_mask_type = vbool64;
|
32 |
+
using size_type = int;
|
33 |
+
static constexpr size_type size() {
|
34 |
+
return 2;
|
35 |
+
}
|
36 |
+
Vectorized() {}
|
37 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
|
38 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
39 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
|
40 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
|
41 |
+
|
42 |
+
Vectorized(ComplexDbl val) {
|
43 |
+
double real_value = val.real();
|
44 |
+
double imag_value = val.imag();
|
45 |
+
_vec0 = vfloat64{real_value, imag_value};
|
46 |
+
_vec1 = vfloat64{real_value, imag_value};
|
47 |
+
}
|
48 |
+
Vectorized(ComplexDbl val1, ComplexDbl val2) {
|
49 |
+
_vec0 = vfloat64{val1.real(), val1.imag()};
|
50 |
+
_vec1 = vfloat64{val2.real(), val2.imag()};
|
51 |
+
}
|
52 |
+
|
53 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
54 |
+
return _vec0;
|
55 |
+
}
|
56 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
57 |
+
return _vec1;
|
58 |
+
}
|
59 |
+
|
60 |
+
template <int64_t mask>
|
61 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 0, Vectorized<ComplexDbl>>
|
62 |
+
C10_ALWAYS_INLINE
|
63 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
64 |
+
return a;
|
65 |
+
}
|
66 |
+
|
67 |
+
template <int64_t mask>
|
68 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 1, Vectorized<ComplexDbl>>
|
69 |
+
C10_ALWAYS_INLINE
|
70 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
71 |
+
return b;
|
72 |
+
}
|
73 |
+
|
74 |
+
template <int64_t mask>
|
75 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 2, Vectorized<ComplexDbl>>
|
76 |
+
C10_ALWAYS_INLINE
|
77 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
78 |
+
return {b._vec0, a._vec1};
|
79 |
+
}
|
80 |
+
|
81 |
+
template <int64_t mask>
|
82 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 3, Vectorized<ComplexDbl>>
|
83 |
+
C10_ALWAYS_INLINE
|
84 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
85 |
+
return {a._vec0, b._vec1};
|
86 |
+
}
|
87 |
+
|
88 |
+
template <int64_t mask>
|
89 |
+
static Vectorized<ComplexDbl> C10_ALWAYS_INLINE
|
90 |
+
el_blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
91 |
+
const vbool64 mask_1st = VsxDblMask1(mask);
|
92 |
+
const vbool64 mask_2nd = VsxDblMask2(mask);
|
93 |
+
return {
|
94 |
+
(vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
|
95 |
+
(vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
96 |
+
}
|
97 |
+
|
98 |
+
static Vectorized<ComplexDbl> blendv(
|
99 |
+
const Vectorized<ComplexDbl>& a,
|
100 |
+
const Vectorized<ComplexDbl>& b,
|
101 |
+
const Vectorized<ComplexDbl>& mask) {
|
102 |
+
// convert std::complex<V> index mask to V index mask: xy -> xxyy
|
103 |
+
auto mask_complex =
|
104 |
+
Vectorized<ComplexDbl>(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0));
|
105 |
+
return {
|
106 |
+
vec_sel(a._vec0, b._vec0, mask_complex._vecb0),
|
107 |
+
vec_sel(a._vec1, b._vec1, mask_complex._vecb1)};
|
108 |
+
}
|
109 |
+
|
110 |
+
static Vectorized<ComplexDbl> C10_ALWAYS_INLINE elwise_blendv(
|
111 |
+
const Vectorized<ComplexDbl>& a,
|
112 |
+
const Vectorized<ComplexDbl>& b,
|
113 |
+
const Vectorized<ComplexDbl>& mask) {
|
114 |
+
return {
|
115 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
116 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
117 |
+
}
|
118 |
+
template <typename step_t>
|
119 |
+
static Vectorized<ComplexDbl> arange(
|
120 |
+
ComplexDbl base = 0.,
|
121 |
+
step_t step = static_cast<step_t>(1)) {
|
122 |
+
return Vectorized<ComplexDbl>(base, base + step);
|
123 |
+
}
|
124 |
+
static Vectorized<ComplexDbl> set(
|
125 |
+
const Vectorized<ComplexDbl>& a,
|
126 |
+
const Vectorized<ComplexDbl>& b,
|
127 |
+
int64_t count = size()) {
|
128 |
+
switch (count) {
|
129 |
+
case 0:
|
130 |
+
return a;
|
131 |
+
case 1:
|
132 |
+
return blend<1>(a, b);
|
133 |
+
}
|
134 |
+
return b;
|
135 |
+
}
|
136 |
+
|
137 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
138 |
+
loadu(const void* ptr, int count = size()) {
|
139 |
+
if (count == size()) {
|
140 |
+
return {
|
141 |
+
vec_vsx_ld(offset0, reinterpret_cast<const double*>(ptr)),
|
142 |
+
vec_vsx_ld(offset16, reinterpret_cast<const double*>(ptr))};
|
143 |
+
}
|
144 |
+
|
145 |
+
__at_align__ value_type tmp_values[size()] = {};
|
146 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
147 |
+
|
148 |
+
return {
|
149 |
+
vec_vsx_ld(offset0, reinterpret_cast<const double*>(tmp_values)),
|
150 |
+
vec_vsx_ld(offset16, reinterpret_cast<const double*>(tmp_values))};
|
151 |
+
}
|
152 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
153 |
+
if (count == size()) {
|
154 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(ptr));
|
155 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(ptr));
|
156 |
+
} else if (count > 0) {
|
157 |
+
__at_align__ value_type tmp_values[size()];
|
158 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(tmp_values));
|
159 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(tmp_values));
|
160 |
+
std::memcpy(
|
161 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
162 |
+
}
|
163 |
+
}
|
164 |
+
|
165 |
+
const ComplexDbl& operator[](int idx) const = delete;
|
166 |
+
ComplexDbl& operator[](int idx) = delete;
|
167 |
+
|
168 |
+
Vectorized<ComplexDbl> map(ComplexDbl (*const f)(ComplexDbl)) const {
|
169 |
+
__at_align__ ComplexDbl tmp[size()];
|
170 |
+
store(tmp);
|
171 |
+
for (const auto i : c10::irange(size())) {
|
172 |
+
tmp[i] = f(tmp[i]);
|
173 |
+
}
|
174 |
+
return loadu(tmp);
|
175 |
+
}
|
176 |
+
|
177 |
+
Vectorized<ComplexDbl> map(ComplexDbl (*const f)(const ComplexDbl&)) const {
|
178 |
+
__at_align__ ComplexDbl tmp[size()];
|
179 |
+
store(tmp);
|
180 |
+
for (const auto i : c10::irange(size())) {
|
181 |
+
tmp[i] = f(tmp[i]);
|
182 |
+
}
|
183 |
+
return loadu(tmp);
|
184 |
+
}
|
185 |
+
|
186 |
+
Vectorized<ComplexDbl> el_swapped() const {
|
187 |
+
vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2);
|
188 |
+
vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2);
|
189 |
+
return {v0, v1};
|
190 |
+
}
|
191 |
+
|
192 |
+
Vectorized<ComplexDbl> el_madd(
|
193 |
+
const Vectorized<ComplexDbl>& multiplier,
|
194 |
+
const Vectorized<ComplexDbl>& val) const {
|
195 |
+
return {
|
196 |
+
vec_madd(_vec0, multiplier._vec0, val._vec0),
|
197 |
+
vec_madd(_vec1, multiplier._vec1, val._vec1)};
|
198 |
+
}
|
199 |
+
|
200 |
+
Vectorized<ComplexDbl> el_mergeo() const {
|
201 |
+
vfloat64 v0 = vec_splat(_vec0, 1);
|
202 |
+
vfloat64 v1 = vec_splat(_vec1, 1);
|
203 |
+
return {v0, v1};
|
204 |
+
}
|
205 |
+
|
206 |
+
Vectorized<ComplexDbl> el_mergee() const {
|
207 |
+
vfloat64 v0 = vec_splat(_vec0, 0);
|
208 |
+
vfloat64 v1 = vec_splat(_vec1, 0);
|
209 |
+
return {v0, v1};
|
210 |
+
}
|
211 |
+
|
212 |
+
static Vectorized<ComplexDbl> el_mergee(
|
213 |
+
Vectorized<ComplexDbl>& first,
|
214 |
+
Vectorized<ComplexDbl>& second) {
|
215 |
+
return {
|
216 |
+
vec_mergeh(first._vec0, second._vec0),
|
217 |
+
vec_mergeh(first._vec1, second._vec1)};
|
218 |
+
}
|
219 |
+
|
220 |
+
static Vectorized<ComplexDbl> el_mergeo(
|
221 |
+
Vectorized<ComplexDbl>& first,
|
222 |
+
Vectorized<ComplexDbl>& second) {
|
223 |
+
return {
|
224 |
+
vec_mergel(first._vec0, second._vec0),
|
225 |
+
vec_mergel(first._vec1, second._vec1)};
|
226 |
+
}
|
227 |
+
|
228 |
+
Vectorized<ComplexDbl> abs_2_() const {
|
229 |
+
auto a = (*this).elwise_mult(*this);
|
230 |
+
auto permuted = a.el_swapped();
|
231 |
+
a = a + permuted;
|
232 |
+
return a;
|
233 |
+
}
|
234 |
+
|
235 |
+
Vectorized<ComplexDbl> abs_() const {
|
236 |
+
auto vi = el_mergeo();
|
237 |
+
auto vr = el_mergee();
|
238 |
+
return {Sleef_hypotd2_u05vsx(vr._vec0, vi._vec0), Sleef_hypotd2_u05vsx(vr._vec1, vi._vec1)};
|
239 |
+
}
|
240 |
+
|
241 |
+
Vectorized<ComplexDbl> abs() const {
|
242 |
+
return abs_() & vd_real_mask;
|
243 |
+
}
|
244 |
+
|
245 |
+
Vectorized<ComplexDbl> angle_() const {
|
246 |
+
// angle = atan2(b/a)
|
247 |
+
// auto b_a = _mm256_permute_pd(values, 0x05); // b a
|
248 |
+
// return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
|
249 |
+
Vectorized<ComplexDbl> ret;
|
250 |
+
ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]);
|
251 |
+
ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]);
|
252 |
+
return ret;
|
253 |
+
}
|
254 |
+
|
255 |
+
Vectorized<ComplexDbl> angle() const {
|
256 |
+
return angle_() & vd_real_mask;
|
257 |
+
}
|
258 |
+
|
259 |
+
Vectorized<ComplexDbl> real_() const {
|
260 |
+
return *this & vd_real_mask;
|
261 |
+
}
|
262 |
+
Vectorized<ComplexDbl> real() const {
|
263 |
+
return *this & vd_real_mask;
|
264 |
+
}
|
265 |
+
Vectorized<ComplexDbl> imag_() const {
|
266 |
+
return *this & vd_imag_mask;
|
267 |
+
}
|
268 |
+
Vectorized<ComplexDbl> imag() const {
|
269 |
+
return imag_().el_swapped();
|
270 |
+
}
|
271 |
+
|
272 |
+
Vectorized<ComplexDbl> conj_() const {
|
273 |
+
return *this ^ vd_isign_mask;
|
274 |
+
}
|
275 |
+
Vectorized<ComplexDbl> conj() const {
|
276 |
+
return *this ^ vd_isign_mask;
|
277 |
+
}
|
278 |
+
|
279 |
+
Vectorized<ComplexDbl> log() const {
|
280 |
+
// Most trigonomic ops use the log() op to improve complex number
|
281 |
+
// performance.
|
282 |
+
return map(std::log);
|
283 |
+
}
|
284 |
+
|
285 |
+
Vectorized<ComplexDbl> log2() const {
|
286 |
+
// log2eB_inv
|
287 |
+
auto ret = log();
|
288 |
+
return ret.elwise_mult(vd_log2e_inv);
|
289 |
+
}
|
290 |
+
Vectorized<ComplexDbl> log10() const {
|
291 |
+
auto ret = log();
|
292 |
+
return ret.elwise_mult(vd_log10e_inv);
|
293 |
+
}
|
294 |
+
|
295 |
+
Vectorized<ComplexDbl> log1p() const {
|
296 |
+
return map(std::log1p);
|
297 |
+
}
|
298 |
+
|
299 |
+
Vectorized<ComplexDbl> asin() const {
|
300 |
+
// asin(x)
|
301 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
302 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
303 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
304 |
+
auto conj = conj_();
|
305 |
+
auto b_a = conj.el_swapped();
|
306 |
+
auto ab = conj.elwise_mult(b_a);
|
307 |
+
auto im = ab + ab;
|
308 |
+
auto val_2 = (*this).elwise_mult(*this);
|
309 |
+
auto val_2_swapped = val_2.el_swapped();
|
310 |
+
auto re = horizontal_sub(val_2, val_2_swapped);
|
311 |
+
re = Vectorized<ComplexDbl>(vd_one) - re;
|
312 |
+
auto root = el_blend<0x0A>(re, im).sqrt();
|
313 |
+
auto ln = (b_a + root).log();
|
314 |
+
return ln.el_swapped().conj();
|
315 |
+
}
|
316 |
+
|
317 |
+
Vectorized<ComplexDbl> acos() const {
|
318 |
+
// acos(x) = pi/2 - asin(x)
|
319 |
+
return Vectorized(vd_pi_2) - asin();
|
320 |
+
}
|
321 |
+
|
322 |
+
Vectorized<ComplexDbl> atan() const {
|
323 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
324 |
+
auto ione = Vectorized(vd_imag_one);
|
325 |
+
auto sum = ione + *this;
|
326 |
+
auto sub = ione - *this;
|
327 |
+
auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
|
328 |
+
return ln * vd_imag_half; // i/2*ln()
|
329 |
+
}
|
330 |
+
Vectorized<ComplexDbl> atanh() const {
|
331 |
+
return map(std::atanh);
|
332 |
+
}
|
333 |
+
|
334 |
+
Vectorized<ComplexDbl> sin() const {
|
335 |
+
return map(std::sin);
|
336 |
+
}
|
337 |
+
Vectorized<ComplexDbl> sinh() const {
|
338 |
+
return map(std::sinh);
|
339 |
+
}
|
340 |
+
Vectorized<ComplexDbl> cos() const {
|
341 |
+
return map(std::cos);
|
342 |
+
}
|
343 |
+
Vectorized<ComplexDbl> cosh() const {
|
344 |
+
return map(std::cosh);
|
345 |
+
}
|
346 |
+
|
347 |
+
Vectorized<ComplexDbl> tan() const {
|
348 |
+
return map(std::tan);
|
349 |
+
}
|
350 |
+
Vectorized<ComplexDbl> tanh() const {
|
351 |
+
return map(std::tanh);
|
352 |
+
}
|
353 |
+
Vectorized<ComplexDbl> ceil() const {
|
354 |
+
return {vec_ceil(_vec0), vec_ceil(_vec1)};
|
355 |
+
}
|
356 |
+
Vectorized<ComplexDbl> floor() const {
|
357 |
+
return {vec_floor(_vec0), vec_floor(_vec1)};
|
358 |
+
}
|
359 |
+
Vectorized<ComplexDbl> neg() const {
|
360 |
+
auto z = Vectorized<ComplexDbl>(vd_zero);
|
361 |
+
return z - *this;
|
362 |
+
}
|
363 |
+
Vectorized<ComplexDbl> round() const {
|
364 |
+
return {vec_rint(_vec0), vec_rint(_vec1)};
|
365 |
+
}
|
366 |
+
|
367 |
+
Vectorized<ComplexDbl> trunc() const {
|
368 |
+
return {vec_trunc(_vec0), vec_trunc(_vec1)};
|
369 |
+
}
|
370 |
+
|
371 |
+
Vectorized<ComplexDbl> elwise_sqrt() const {
|
372 |
+
return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
|
373 |
+
}
|
374 |
+
|
375 |
+
Vectorized<ComplexDbl> sqrt() const {
|
376 |
+
return map(std::sqrt);
|
377 |
+
}
|
378 |
+
|
379 |
+
Vectorized<ComplexDbl> reciprocal() const {
|
380 |
+
// re + im*i = (a + bi) / (c + di)
|
381 |
+
// re = (ac + bd)/abs_2() = c/abs_2()
|
382 |
+
// im = (bc - ad)/abs_2() = d/abs_2()
|
383 |
+
auto c_d = *this ^ vd_isign_mask; // c -d
|
384 |
+
auto abs = abs_2_();
|
385 |
+
return c_d.elwise_div(abs);
|
386 |
+
}
|
387 |
+
|
388 |
+
Vectorized<ComplexDbl> rsqrt() const {
|
389 |
+
return sqrt().reciprocal();
|
390 |
+
}
|
391 |
+
|
392 |
+
static Vectorized<ComplexDbl> horizontal_add(
|
393 |
+
Vectorized<ComplexDbl>& first,
|
394 |
+
Vectorized<ComplexDbl>& second) {
|
395 |
+
// Operates on individual floats, see _mm_hadd_ps
|
396 |
+
// {f0+f1, s0+s1, f2+f3, s2+s3, ...}
|
397 |
+
// i.e. it sums the re and im of each value and interleaves first and second:
|
398 |
+
// {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...}
|
399 |
+
return el_mergee(first, second) + el_mergeo(first, second);
|
400 |
+
}
|
401 |
+
|
402 |
+
static Vectorized<ComplexDbl> horizontal_sub(
|
403 |
+
Vectorized<ComplexDbl>& first,
|
404 |
+
Vectorized<ComplexDbl>& second) {
|
405 |
+
// we will simulate it differently with 6 instructions total
|
406 |
+
// lets permute second so that we can add it getting horizontal sums
|
407 |
+
auto first_perm = first.el_swapped(); // 2perm
|
408 |
+
auto second_perm = second.el_swapped(); // 2perm
|
409 |
+
// summ
|
410 |
+
auto first_ret = first - first_perm; // 2sub
|
411 |
+
auto second_ret = second - second_perm; // 2 sub
|
412 |
+
// now lets choose evens
|
413 |
+
return el_mergee(first_ret, second_ret); // 2 mergee's
|
414 |
+
}
|
415 |
+
|
416 |
+
Vectorized<ComplexDbl> inline operator*(const Vectorized<ComplexDbl>& b) const {
|
417 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
418 |
+
#if 1
|
419 |
+
// this is more vsx friendly than simulating horizontal from x86
|
420 |
+
auto vi = b.el_mergeo();
|
421 |
+
auto vr = b.el_mergee();
|
422 |
+
vi = vi ^ vd_rsign_mask;
|
423 |
+
auto ret = elwise_mult(vr);
|
424 |
+
auto vx_swapped = el_swapped();
|
425 |
+
ret = vx_swapped.el_madd(vi, ret);
|
426 |
+
#else
|
427 |
+
auto ac_bd = elwise_mult(b);
|
428 |
+
auto d_c = b.el_swapped();
|
429 |
+
d_c = d_c ^ vd_isign_mask;
|
430 |
+
auto ad_bc = elwise_mult(d_c);
|
431 |
+
auto ret = horizontal_sub(ac_bd, ad_bc);
|
432 |
+
#endif
|
433 |
+
return ret;
|
434 |
+
}
|
435 |
+
|
436 |
+
Vectorized<ComplexDbl> inline operator/(const Vectorized<ComplexDbl>& b) const {
|
437 |
+
// re + im*i = (a + bi) / (c + di)
|
438 |
+
// re = (ac + bd)/abs_2()
|
439 |
+
// im = (bc - ad)/abs_2()
|
440 |
+
auto fabs_cd = Vectorized{
|
441 |
+
vec_andc(b._vec0, vd_sign_mask),
|
442 |
+
vec_andc(b._vec1, vd_sign_mask)}; // |c| |d|
|
443 |
+
auto fabs_dc = fabs_cd.el_swapped(); // |d| |c|
|
444 |
+
auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|)
|
445 |
+
auto a2 = elwise_div(scale); // a/sc b/sc
|
446 |
+
auto b2 = b.elwise_div(scale); // c/sc d/sc
|
447 |
+
auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2
|
448 |
+
auto dc2 = b2.el_swapped(); // d/sc c/sc
|
449 |
+
dc2 = dc2 ^ vd_rsign_mask; // -d/sc c/sc
|
450 |
+
auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2
|
451 |
+
auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2
|
452 |
+
auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
453 |
+
ret = ret.elwise_div(denom2);
|
454 |
+
return ret;
|
455 |
+
}
|
456 |
+
|
457 |
+
Vectorized<ComplexDbl> exp() const {
|
458 |
+
return map(std::exp);
|
459 |
+
}
|
460 |
+
Vectorized<ComplexDbl> exp2() const {
|
461 |
+
return map(exp2_impl);
|
462 |
+
}
|
463 |
+
Vectorized<ComplexDbl> expm1() const {
|
464 |
+
return map(std::expm1);
|
465 |
+
}
|
466 |
+
|
467 |
+
Vectorized<ComplexDbl> pow(const Vectorized<ComplexDbl>& exp) const {
|
468 |
+
__at_align__ ComplexDbl x_tmp[size()];
|
469 |
+
__at_align__ ComplexDbl y_tmp[size()];
|
470 |
+
store(x_tmp);
|
471 |
+
exp.store(y_tmp);
|
472 |
+
for (const auto i : c10::irange(size())) {
|
473 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
474 |
+
}
|
475 |
+
return loadu(x_tmp);
|
476 |
+
}
|
477 |
+
|
478 |
+
Vectorized<ComplexDbl> sgn() const {
|
479 |
+
return map(at::native::sgn_impl);
|
480 |
+
}
|
481 |
+
|
482 |
+
Vectorized<ComplexDbl> operator<(const Vectorized<ComplexDbl>& other) const {
|
483 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
484 |
+
}
|
485 |
+
Vectorized<ComplexDbl> operator<=(const Vectorized<ComplexDbl>& other) const {
|
486 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
487 |
+
}
|
488 |
+
Vectorized<ComplexDbl> operator>(const Vectorized<ComplexDbl>& other) const {
|
489 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
490 |
+
}
|
491 |
+
Vectorized<ComplexDbl> operator>=(const Vectorized<ComplexDbl>& other) const {
|
492 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
493 |
+
}
|
494 |
+
|
495 |
+
Vectorized<ComplexDbl> eq(const Vectorized<ComplexDbl>& other) const {
|
496 |
+
auto eq = (*this == other); // compares real and imag individually
|
497 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
498 |
+
return (eq.real() & eq.imag()) & vd_one;
|
499 |
+
}
|
500 |
+
Vectorized<ComplexDbl> ne(const Vectorized<ComplexDbl>& other) const {
|
501 |
+
auto ne = (*this != other); // compares real and imag individually
|
502 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
503 |
+
return (ne.real() | ne.imag()) & vd_one;
|
504 |
+
}
|
505 |
+
|
506 |
+
DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq)
|
507 |
+
DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne)
|
508 |
+
|
509 |
+
DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add)
|
510 |
+
DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub)
|
511 |
+
DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and)
|
512 |
+
DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or)
|
513 |
+
DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor)
|
514 |
+
// elementwise helpers
|
515 |
+
DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul)
|
516 |
+
DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div)
|
517 |
+
DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt)
|
518 |
+
DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge)
|
519 |
+
DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt)
|
520 |
+
DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple)
|
521 |
+
DEFINE_MEMBER_OP(elwise_max, ComplexDbl, vec_max)
|
522 |
+
};
|
523 |
+
|
524 |
+
template <>
|
525 |
+
Vectorized<ComplexDbl> inline maximum(
|
526 |
+
const Vectorized<ComplexDbl>& a,
|
527 |
+
const Vectorized<ComplexDbl>& b) {
|
528 |
+
auto abs_a = a.abs_2_();
|
529 |
+
auto abs_b = b.abs_2_();
|
530 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
|
531 |
+
// auto max = _mm256_blendv_ps(a, b, mask);
|
532 |
+
auto mask = abs_a.elwise_lt(abs_b);
|
533 |
+
auto max = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
|
534 |
+
|
535 |
+
return max;
|
536 |
+
// Exploit the fact that all-ones is a NaN.
|
537 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
538 |
+
// return _mm256_or_ps(max, isnan);
|
539 |
+
}
|
540 |
+
|
541 |
+
template <>
|
542 |
+
Vectorized<ComplexDbl> inline minimum(
|
543 |
+
const Vectorized<ComplexDbl>& a,
|
544 |
+
const Vectorized<ComplexDbl>& b) {
|
545 |
+
auto abs_a = a.abs_2_();
|
546 |
+
auto abs_b = b.abs_2_();
|
547 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
|
548 |
+
// auto min = _mm256_blendv_ps(a, b, mask);
|
549 |
+
auto mask = abs_a.elwise_gt(abs_b);
|
550 |
+
auto min = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
|
551 |
+
return min;
|
552 |
+
// Exploit the fact that all-ones is a NaN.
|
553 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
554 |
+
// return _mm256_or_ps(min, isnan);
|
555 |
+
}
|
556 |
+
|
557 |
+
|
558 |
+
} // namespace
|
559 |
+
} // namespace vec
|
560 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h
ADDED
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#pragma once
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
#include <c10/util/complex.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
namespace vec {
|
11 |
+
// See Note [CPU_CAPABILITY namespace]
|
12 |
+
inline namespace CPU_CAPABILITY {
|
13 |
+
using ComplexFlt = c10::complex<float>;
|
14 |
+
|
15 |
+
template <>
|
16 |
+
class Vectorized<ComplexFlt> {
|
17 |
+
private:
|
18 |
+
union {
|
19 |
+
struct {
|
20 |
+
vfloat32 _vec0;
|
21 |
+
vfloat32 _vec1;
|
22 |
+
};
|
23 |
+
struct {
|
24 |
+
vbool32 _vecb0;
|
25 |
+
vbool32 _vecb1;
|
26 |
+
};
|
27 |
+
|
28 |
+
} __attribute__((__may_alias__));
|
29 |
+
|
30 |
+
public:
|
31 |
+
using value_type = ComplexFlt;
|
32 |
+
using vec_internal_type = vfloat32;
|
33 |
+
using vec_internal_mask_type = vbool32;
|
34 |
+
using size_type = int;
|
35 |
+
|
36 |
+
static constexpr size_type size() {
|
37 |
+
return 4;
|
38 |
+
}
|
39 |
+
Vectorized() {}
|
40 |
+
|
41 |
+
C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
|
42 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
43 |
+
C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
|
44 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
45 |
+
|
46 |
+
Vectorized(ComplexFlt val) {
|
47 |
+
float real_value = val.real();
|
48 |
+
float imag_value = val.imag();
|
49 |
+
_vec0 = vfloat32{real_value, imag_value, real_value, imag_value};
|
50 |
+
_vec1 = vfloat32{real_value, imag_value, real_value, imag_value};
|
51 |
+
}
|
52 |
+
|
53 |
+
Vectorized(ComplexFlt val1, ComplexFlt val2, ComplexFlt val3, ComplexFlt val4) {
|
54 |
+
_vec0 = vfloat32{val1.real(), val1.imag(), val2.real(), val2.imag()};
|
55 |
+
_vec1 = vfloat32{val3.real(), val3.imag(), val4.real(), val4.imag()};
|
56 |
+
}
|
57 |
+
|
58 |
+
template <uint64_t mask>
|
59 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 0, Vectorized<ComplexFlt>>
|
60 |
+
C10_ALWAYS_INLINE
|
61 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
62 |
+
return a;
|
63 |
+
}
|
64 |
+
|
65 |
+
template <uint64_t mask>
|
66 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 1, Vectorized<ComplexFlt>>
|
67 |
+
C10_ALWAYS_INLINE
|
68 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
69 |
+
return b;
|
70 |
+
}
|
71 |
+
|
72 |
+
template <uint64_t mask>
|
73 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 2, Vectorized<ComplexFlt>>
|
74 |
+
C10_ALWAYS_INLINE
|
75 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
76 |
+
return {b._vec0, a._vec1};
|
77 |
+
}
|
78 |
+
|
79 |
+
template <uint64_t mask>
|
80 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 3, Vectorized<ComplexFlt>>
|
81 |
+
C10_ALWAYS_INLINE
|
82 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
83 |
+
return {a._vec0, b._vec1};
|
84 |
+
}
|
85 |
+
|
86 |
+
template <uint64_t mask>
|
87 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 4, Vectorized<ComplexFlt>>
|
88 |
+
C10_ALWAYS_INLINE
|
89 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
90 |
+
const vbool32 mask_1st = VsxComplexMask1(mask);
|
91 |
+
return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
|
92 |
+
}
|
93 |
+
|
94 |
+
template <uint64_t mask>
|
95 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 5, Vectorized<ComplexFlt>>
|
96 |
+
C10_ALWAYS_INLINE
|
97 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
98 |
+
const vbool32 mask_1st = VsxComplexMask1(mask);
|
99 |
+
return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
|
100 |
+
}
|
101 |
+
|
102 |
+
template <uint64_t mask>
|
103 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 6, Vectorized<ComplexFlt>>
|
104 |
+
C10_ALWAYS_INLINE
|
105 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
106 |
+
const vbool32 mask_2nd = VsxComplexMask2(mask);
|
107 |
+
// generated masks
|
108 |
+
return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
109 |
+
}
|
110 |
+
|
111 |
+
template <uint64_t mask>
|
112 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 7, Vectorized<ComplexFlt>>
|
113 |
+
C10_ALWAYS_INLINE
|
114 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
115 |
+
const vbool32 mask_2nd = VsxComplexMask2(mask);
|
116 |
+
// generated masks
|
117 |
+
return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
118 |
+
}
|
119 |
+
|
120 |
+
template <uint64_t mask>
|
121 |
+
static std::enable_if_t<blendChoiceComplex(mask) == 8, Vectorized<ComplexFlt>>
|
122 |
+
C10_ALWAYS_INLINE
|
123 |
+
blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
124 |
+
const vbool32 mask_1st = VsxComplexMask1(mask);
|
125 |
+
const vbool32 mask_2nd = VsxComplexMask2(mask);
|
126 |
+
return {
|
127 |
+
(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
|
128 |
+
(vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
129 |
+
}
|
130 |
+
|
131 |
+
template <int64_t mask>
|
132 |
+
static Vectorized<ComplexFlt> C10_ALWAYS_INLINE
|
133 |
+
el_blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
|
134 |
+
const vbool32 mask_1st = VsxMask1(mask);
|
135 |
+
const vbool32 mask_2nd = VsxMask2(mask);
|
136 |
+
return {
|
137 |
+
(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
|
138 |
+
(vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
139 |
+
}
|
140 |
+
|
141 |
+
static Vectorized<ComplexFlt> blendv(
|
142 |
+
const Vectorized<ComplexFlt>& a,
|
143 |
+
const Vectorized<ComplexFlt>& b,
|
144 |
+
const Vectorized<ComplexFlt>& mask) {
|
145 |
+
// convert std::complex<V> index mask to V index mask: xy -> xxyy
|
146 |
+
auto mask_complex = Vectorized<ComplexFlt>(
|
147 |
+
vec_mergeh(mask._vec0, mask._vec0), vec_mergeh(mask._vec1, mask._vec1));
|
148 |
+
return {
|
149 |
+
vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask_complex._vec0)),
|
150 |
+
vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask_complex._vec1)),
|
151 |
+
};
|
152 |
+
}
|
153 |
+
|
154 |
+
static Vectorized<ComplexFlt> elwise_blendv(
|
155 |
+
const Vectorized<ComplexFlt>& a,
|
156 |
+
const Vectorized<ComplexFlt>& b,
|
157 |
+
const Vectorized<ComplexFlt>& mask) {
|
158 |
+
return {
|
159 |
+
vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask._vec0)),
|
160 |
+
vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask._vec1)),
|
161 |
+
};
|
162 |
+
}
|
163 |
+
|
164 |
+
template <typename step_t>
|
165 |
+
static Vectorized<ComplexFlt> arange(
|
166 |
+
ComplexFlt base = 0.,
|
167 |
+
step_t step = static_cast<step_t>(1)) {
|
168 |
+
return Vectorized<ComplexFlt>(
|
169 |
+
base,
|
170 |
+
base + step,
|
171 |
+
base + ComplexFlt(2) * step,
|
172 |
+
base + ComplexFlt(3) * step);
|
173 |
+
}
|
174 |
+
static Vectorized<ComplexFlt> set(
|
175 |
+
const Vectorized<ComplexFlt>& a,
|
176 |
+
const Vectorized<ComplexFlt>& b,
|
177 |
+
int64_t count = size()) {
|
178 |
+
switch (count) {
|
179 |
+
case 0:
|
180 |
+
return a;
|
181 |
+
case 1:
|
182 |
+
return blend<1>(a, b);
|
183 |
+
case 2:
|
184 |
+
return blend<3>(a, b);
|
185 |
+
case 3:
|
186 |
+
return blend<7>(a, b);
|
187 |
+
}
|
188 |
+
return b;
|
189 |
+
}
|
190 |
+
|
191 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
192 |
+
loadu(const void* ptr, int count = size()) {
|
193 |
+
if (count == size()) {
|
194 |
+
return {
|
195 |
+
vec_vsx_ld(offset0, reinterpret_cast<const float*>(ptr)),
|
196 |
+
vec_vsx_ld(offset16, reinterpret_cast<const float*>(ptr))};
|
197 |
+
}
|
198 |
+
|
199 |
+
__at_align__ value_type tmp_values[size()] = {};
|
200 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
201 |
+
|
202 |
+
return {
|
203 |
+
vec_vsx_ld(offset0, reinterpret_cast<const float*>(tmp_values)),
|
204 |
+
vec_vsx_ld(offset16, reinterpret_cast<const float*>(tmp_values))};
|
205 |
+
}
|
206 |
+
|
207 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
208 |
+
if (count == size()) {
|
209 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(ptr));
|
210 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(ptr));
|
211 |
+
} else if (count > 0) {
|
212 |
+
__at_align__ value_type tmp_values[size()];
|
213 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(tmp_values));
|
214 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(tmp_values));
|
215 |
+
std::memcpy(
|
216 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
217 |
+
}
|
218 |
+
}
|
219 |
+
|
220 |
+
const ComplexFlt& operator[](int idx) const = delete;
|
221 |
+
ComplexFlt& operator[](int idx) = delete;
|
222 |
+
|
223 |
+
Vectorized<ComplexFlt> map(ComplexFlt (*const f)(ComplexFlt)) const {
|
224 |
+
__at_align__ ComplexFlt tmp[size()];
|
225 |
+
store(tmp);
|
226 |
+
for (const auto i : c10::irange(size())) {
|
227 |
+
tmp[i] = f(tmp[i]);
|
228 |
+
}
|
229 |
+
return loadu(tmp);
|
230 |
+
}
|
231 |
+
|
232 |
+
Vectorized<ComplexFlt> map(ComplexFlt (*const f)(const ComplexFlt&)) const {
|
233 |
+
__at_align__ ComplexFlt tmp[size()];
|
234 |
+
store(tmp);
|
235 |
+
for (const auto i : c10::irange(size())) {
|
236 |
+
tmp[i] = f(tmp[i]);
|
237 |
+
}
|
238 |
+
return loadu(tmp);
|
239 |
+
}
|
240 |
+
|
241 |
+
static Vectorized<ComplexFlt> horizontal_add(
|
242 |
+
Vectorized<ComplexFlt>& first,
|
243 |
+
Vectorized<ComplexFlt>& second) {
|
244 |
+
// Operates on individual floats, see _mm_hadd_ps
|
245 |
+
// {f0+f1, s0+s1, f2+f3, s2+s3, ...}
|
246 |
+
// i.e. it sums the re and im of each value and interleaves first and second:
|
247 |
+
// {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...}
|
248 |
+
return el_mergee(first, second) + el_mergeo(first, second);
|
249 |
+
}
|
250 |
+
|
251 |
+
static Vectorized<ComplexFlt> horizontal_sub_permD8(
|
252 |
+
Vectorized<ComplexFlt>& first,
|
253 |
+
Vectorized<ComplexFlt>& second) {
|
254 |
+
// we will simulate it differently with 6 instructions total
|
255 |
+
// lets permute second so that we can add it getting horizontal sums
|
256 |
+
auto first_perm = first.el_swapped(); // 2perm
|
257 |
+
auto second_perm = second.el_swapped(); // 2perm
|
258 |
+
// sum
|
259 |
+
auto first_ret = first - first_perm; // 2sub
|
260 |
+
auto second_ret = second - second_perm; // 2 sub
|
261 |
+
// now lets choose evens
|
262 |
+
return el_mergee(first_ret, second_ret); // 2 mergee's
|
263 |
+
}
|
264 |
+
|
265 |
+
Vectorized<ComplexFlt> abs_2_() const {
|
266 |
+
auto a = (*this).elwise_mult(*this);
|
267 |
+
auto permuted = a.el_swapped();
|
268 |
+
a = a + permuted;
|
269 |
+
return a.el_mergee();
|
270 |
+
}
|
271 |
+
|
272 |
+
Vectorized<ComplexFlt> abs_() const {
|
273 |
+
auto vi = el_mergeo();
|
274 |
+
auto vr = el_mergee();
|
275 |
+
return {Sleef_hypotf4_u05vsx(vr._vec0, vi._vec0), Sleef_hypotf4_u05vsx(vr._vec1, vi._vec1)};
|
276 |
+
}
|
277 |
+
|
278 |
+
Vectorized<ComplexFlt> abs() const {
|
279 |
+
return abs_() & real_mask;
|
280 |
+
}
|
281 |
+
|
282 |
+
Vectorized<ComplexFlt> real_() const {
|
283 |
+
return *this & real_mask;
|
284 |
+
}
|
285 |
+
Vectorized<ComplexFlt> real() const {
|
286 |
+
return *this & real_mask;
|
287 |
+
}
|
288 |
+
Vectorized<ComplexFlt> imag_() const {
|
289 |
+
return *this & imag_mask;
|
290 |
+
}
|
291 |
+
Vectorized<ComplexFlt> imag() const {
|
292 |
+
// we can use swap_mask or sldwi
|
293 |
+
auto ret = imag_();
|
294 |
+
return {
|
295 |
+
vec_sldw(ret._vec0, ret._vec0, 3), vec_sldw(ret._vec1, ret._vec1, 3)};
|
296 |
+
}
|
297 |
+
|
298 |
+
Vectorized<ComplexFlt> conj_() const {
|
299 |
+
return *this ^ isign_mask;
|
300 |
+
}
|
301 |
+
Vectorized<ComplexFlt> conj() const {
|
302 |
+
return *this ^ isign_mask;
|
303 |
+
}
|
304 |
+
|
305 |
+
Vectorized<ComplexFlt> log() const {
|
306 |
+
// Most trigonomic ops use the log() op to improve complex number
|
307 |
+
// performance.
|
308 |
+
return map(std::log);
|
309 |
+
}
|
310 |
+
|
311 |
+
Vectorized<ComplexFlt> log2() const {
|
312 |
+
// log2eB_inv
|
313 |
+
auto ret = log();
|
314 |
+
return ret.elwise_mult(log2e_inv);
|
315 |
+
}
|
316 |
+
Vectorized<ComplexFlt> log10() const {
|
317 |
+
auto ret = log();
|
318 |
+
return ret.elwise_mult(log10e_inv);
|
319 |
+
}
|
320 |
+
|
321 |
+
Vectorized<ComplexFlt> log1p() const {
|
322 |
+
return map(std::log1p);
|
323 |
+
}
|
324 |
+
|
325 |
+
Vectorized<ComplexFlt> el_swapped() const {
|
326 |
+
vfloat32 v0 = vec_perm(_vec0, _vec0, swap_mask);
|
327 |
+
vfloat32 v1 = vec_perm(_vec1, _vec1, swap_mask);
|
328 |
+
return {v0, v1};
|
329 |
+
}
|
330 |
+
|
331 |
+
Vectorized<ComplexFlt> el_mergee() const {
|
332 |
+
// as mergee phased in , we can use vec_perm with mask
|
333 |
+
return {vec_mergee(_vecb0, _vecb0), vec_mergee(_vecb1, _vecb1)};
|
334 |
+
}
|
335 |
+
|
336 |
+
Vectorized<ComplexFlt> el_mergeo() const {
|
337 |
+
// as mergeo phased in , we can use vec_perm with mask
|
338 |
+
return {vec_mergeo(_vecb0, _vecb0), vec_mergeo(_vecb1, _vecb1)};
|
339 |
+
}
|
340 |
+
|
341 |
+
Vectorized<ComplexFlt> el_madd(
|
342 |
+
const Vectorized<ComplexFlt>& multiplier,
|
343 |
+
const Vectorized<ComplexFlt>& val) const {
|
344 |
+
return {
|
345 |
+
vec_madd(_vec0, multiplier._vec0, val._vec0),
|
346 |
+
vec_madd(_vec1, multiplier._vec1, val._vec1)};
|
347 |
+
}
|
348 |
+
|
349 |
+
static Vectorized<ComplexFlt> el_mergee(
|
350 |
+
Vectorized<ComplexFlt>& first,
|
351 |
+
Vectorized<ComplexFlt>& second) {
|
352 |
+
return {
|
353 |
+
vec_mergee(first._vecb0, second._vecb0),
|
354 |
+
vec_mergee(first._vecb1, second._vecb1)};
|
355 |
+
}
|
356 |
+
|
357 |
+
static Vectorized<ComplexFlt> el_mergeo(
|
358 |
+
Vectorized<ComplexFlt>& first,
|
359 |
+
Vectorized<ComplexFlt>& second) {
|
360 |
+
return {
|
361 |
+
vec_mergeo(first._vecb0, second._vecb0),
|
362 |
+
vec_mergeo(first._vecb1, second._vecb1)};
|
363 |
+
}
|
364 |
+
|
365 |
+
Vectorized<ComplexFlt> angle_() const {
|
366 |
+
// angle = atan2(b/a)
|
367 |
+
// auto b_a = _mm256_permute_ps(values, 0xB1); // b a
|
368 |
+
// return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
|
369 |
+
Vectorized<ComplexFlt> ret;
|
370 |
+
for (int i = 0; i < 4; i += 2) {
|
371 |
+
ret._vec0[i] = std::atan2(_vec0[i + 1], _vec0[i]);
|
372 |
+
ret._vec1[i] = std::atan2(_vec1[i + 1], _vec1[i]);
|
373 |
+
}
|
374 |
+
return ret;
|
375 |
+
}
|
376 |
+
|
377 |
+
Vectorized<ComplexFlt> angle() const {
|
378 |
+
return angle_() & real_mask;
|
379 |
+
}
|
380 |
+
|
381 |
+
Vectorized<ComplexFlt> sin() const {
|
382 |
+
return map(std::sin);
|
383 |
+
}
|
384 |
+
Vectorized<ComplexFlt> sinh() const {
|
385 |
+
return map(std::sinh);
|
386 |
+
}
|
387 |
+
Vectorized<ComplexFlt> cos() const {
|
388 |
+
return map(std::cos);
|
389 |
+
}
|
390 |
+
Vectorized<ComplexFlt> cosh() const {
|
391 |
+
return map(std::cosh);
|
392 |
+
}
|
393 |
+
Vectorized<ComplexFlt> ceil() const {
|
394 |
+
return {vec_ceil(_vec0), vec_ceil(_vec1)};
|
395 |
+
}
|
396 |
+
Vectorized<ComplexFlt> floor() const {
|
397 |
+
return {vec_floor(_vec0), vec_floor(_vec1)};
|
398 |
+
}
|
399 |
+
Vectorized<ComplexFlt> neg() const {
|
400 |
+
auto z = Vectorized<ComplexFlt>(zero);
|
401 |
+
return z - *this;
|
402 |
+
}
|
403 |
+
Vectorized<ComplexFlt> round() const {
|
404 |
+
return {vec_round(_vec0), vec_round(_vec1)};
|
405 |
+
}
|
406 |
+
Vectorized<ComplexFlt> tan() const {
|
407 |
+
return map(std::tan);
|
408 |
+
}
|
409 |
+
Vectorized<ComplexFlt> tanh() const {
|
410 |
+
return map(std::tanh);
|
411 |
+
}
|
412 |
+
Vectorized<ComplexFlt> trunc() const {
|
413 |
+
return {vec_trunc(_vec0), vec_trunc(_vec1)};
|
414 |
+
}
|
415 |
+
|
416 |
+
Vectorized<ComplexFlt> elwise_sqrt() const {
|
417 |
+
return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
|
418 |
+
}
|
419 |
+
|
420 |
+
Vectorized<ComplexFlt> sqrt() const {
|
421 |
+
return map(std::sqrt);
|
422 |
+
}
|
423 |
+
|
424 |
+
Vectorized<ComplexFlt> reciprocal() const {
|
425 |
+
// re + im*i = (a + bi) / (c + di)
|
426 |
+
// re = (ac + bd)/abs_2() = c/abs_2()
|
427 |
+
// im = (bc - ad)/abs_2() = d/abs_2()
|
428 |
+
auto c_d = *this ^ isign_mask; // c -d
|
429 |
+
auto abs = abs_2_();
|
430 |
+
return c_d.elwise_div(abs);
|
431 |
+
}
|
432 |
+
|
433 |
+
Vectorized<ComplexFlt> rsqrt() const {
|
434 |
+
return sqrt().reciprocal();
|
435 |
+
}
|
436 |
+
|
437 |
+
Vectorized<ComplexFlt> pow(const Vectorized<ComplexFlt>& exp) const {
|
438 |
+
__at_align__ ComplexFlt x_tmp[size()];
|
439 |
+
__at_align__ ComplexFlt y_tmp[size()];
|
440 |
+
store(x_tmp);
|
441 |
+
exp.store(y_tmp);
|
442 |
+
for (const auto i : c10::irange(size())) {
|
443 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
444 |
+
}
|
445 |
+
return loadu(x_tmp);
|
446 |
+
}
|
447 |
+
|
448 |
+
Vectorized<ComplexFlt> atan() const {
|
449 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
450 |
+
auto ione = Vectorized(imag_one);
|
451 |
+
auto sum = ione + *this;
|
452 |
+
auto sub = ione - *this;
|
453 |
+
auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
|
454 |
+
return ln * imag_half; // i/2*ln()
|
455 |
+
}
|
456 |
+
Vectorized<ComplexFlt> atanh() const {
|
457 |
+
return map(std::atanh);
|
458 |
+
}
|
459 |
+
|
460 |
+
Vectorized<ComplexFlt> acos() const {
|
461 |
+
// acos(x) = pi/2 - asin(x)
|
462 |
+
return Vectorized(pi_2) - asin();
|
463 |
+
}
|
464 |
+
|
465 |
+
Vectorized<ComplexFlt> inline operator*(const Vectorized<ComplexFlt>& b) const {
|
466 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
467 |
+
|
468 |
+
#if 1
|
469 |
+
// this is more vsx friendly than simulating horizontal from x86
|
470 |
+
|
471 |
+
auto vi = b.el_mergeo();
|
472 |
+
auto vr = b.el_mergee();
|
473 |
+
vi = vi ^ rsign_mask;
|
474 |
+
auto ret = elwise_mult(vr);
|
475 |
+
auto vx_swapped = el_swapped();
|
476 |
+
ret = vx_swapped.el_madd(vi, ret);
|
477 |
+
return ret;
|
478 |
+
|
479 |
+
#else
|
480 |
+
|
481 |
+
auto ac_bd = elwise_mult(b);
|
482 |
+
auto d_c = b.el_swapped();
|
483 |
+
d_c = d_c ^ isign_mask;
|
484 |
+
auto ad_bc = elwise_mult(d_c);
|
485 |
+
auto ret = horizontal_sub_permD8(ac_bd, ad_bc);
|
486 |
+
return ret;
|
487 |
+
#endif
|
488 |
+
}
|
489 |
+
|
490 |
+
Vectorized<ComplexFlt> inline operator/(const Vectorized<ComplexFlt>& b) const {
|
491 |
+
// re + im*i = (a + bi) / (c + di)
|
492 |
+
// re = (ac + bd)/abs_2()
|
493 |
+
// im = (bc - ad)/abs_2()
|
494 |
+
auto fabs_cd = Vectorized{
|
495 |
+
vec_andc(b._vec0, sign_mask),
|
496 |
+
vec_andc(b._vec1, sign_mask)}; // |c| |d|
|
497 |
+
auto fabs_dc = fabs_cd.el_swapped(); // |d| |c|
|
498 |
+
auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|)
|
499 |
+
auto a2 = elwise_div(scale); // a/sc b/sc
|
500 |
+
auto b2 = b.elwise_div(scale); // c/sc d/sc
|
501 |
+
auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2
|
502 |
+
auto dc2 = b2.el_swapped(); // d/sc c/sc
|
503 |
+
dc2 = dc2 ^ rsign_mask; // -d/sc c/sc
|
504 |
+
auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2
|
505 |
+
auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2
|
506 |
+
auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
507 |
+
ret = ret.elwise_div(denom2);
|
508 |
+
return ret;
|
509 |
+
}
|
510 |
+
|
511 |
+
Vectorized<ComplexFlt> asin() const {
|
512 |
+
// asin(x)
|
513 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
514 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
515 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
516 |
+
|
517 |
+
#if 1
|
518 |
+
auto conj = conj_();
|
519 |
+
auto b_a = conj.el_swapped();
|
520 |
+
auto ab = conj.elwise_mult(b_a);
|
521 |
+
auto im = ab + ab;
|
522 |
+
auto val_2 = (*this).elwise_mult(*this);
|
523 |
+
auto val_2_swapped = val_2.el_swapped();
|
524 |
+
auto re = horizontal_sub_permD8(val_2, val_2_swapped);
|
525 |
+
re = Vectorized<ComplexFlt>(one) - re;
|
526 |
+
auto root = el_blend<0xAA>(re, im).sqrt();
|
527 |
+
auto ln = (b_a + root).log();
|
528 |
+
return ln.el_swapped().conj();
|
529 |
+
#else
|
530 |
+
return map(std::asin);
|
531 |
+
#endif
|
532 |
+
}
|
533 |
+
|
534 |
+
Vectorized<ComplexFlt> exp() const {
|
535 |
+
return map(std::exp);
|
536 |
+
}
|
537 |
+
Vectorized<ComplexFlt> exp2() const {
|
538 |
+
return map(exp2_impl);
|
539 |
+
}
|
540 |
+
Vectorized<ComplexFlt> expm1() const {
|
541 |
+
return map(std::expm1);
|
542 |
+
}
|
543 |
+
|
544 |
+
Vectorized<ComplexFlt> eq(const Vectorized<ComplexFlt>& other) const {
|
545 |
+
auto eq = (*this == other); // compares real and imag individually
|
546 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
547 |
+
return (eq.real() & eq.imag()) & one;
|
548 |
+
}
|
549 |
+
Vectorized<ComplexFlt> ne(const Vectorized<ComplexFlt>& other) const {
|
550 |
+
auto ne = (*this != other); // compares real and imag individually
|
551 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
552 |
+
return (ne.real() | ne.imag()) & one;
|
553 |
+
}
|
554 |
+
|
555 |
+
Vectorized<ComplexFlt> sgn() const {
|
556 |
+
return map(at::native::sgn_impl);
|
557 |
+
}
|
558 |
+
|
559 |
+
Vectorized<ComplexFlt> operator<(const Vectorized<ComplexFlt>& other) const {
|
560 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
561 |
+
}
|
562 |
+
|
563 |
+
Vectorized<ComplexFlt> operator<=(const Vectorized<ComplexFlt>& other) const {
|
564 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
565 |
+
}
|
566 |
+
|
567 |
+
Vectorized<ComplexFlt> operator>(const Vectorized<ComplexFlt>& other) const {
|
568 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
569 |
+
}
|
570 |
+
|
571 |
+
Vectorized<ComplexFlt> operator>=(const Vectorized<ComplexFlt>& other) const {
|
572 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
573 |
+
}
|
574 |
+
|
575 |
+
DEFINE_MEMBER_OP(operator==, ComplexFlt, vec_cmpeq)
|
576 |
+
DEFINE_MEMBER_OP(operator!=, ComplexFlt, vec_cmpne)
|
577 |
+
|
578 |
+
DEFINE_MEMBER_OP(operator+, ComplexFlt, vec_add)
|
579 |
+
DEFINE_MEMBER_OP(operator-, ComplexFlt, vec_sub)
|
580 |
+
DEFINE_MEMBER_OP(operator&, ComplexFlt, vec_and)
|
581 |
+
DEFINE_MEMBER_OP(operator|, ComplexFlt, vec_or)
|
582 |
+
DEFINE_MEMBER_OP(operator^, ComplexFlt, vec_xor)
|
583 |
+
// elementwise helpers
|
584 |
+
DEFINE_MEMBER_OP(elwise_mult, ComplexFlt, vec_mul)
|
585 |
+
DEFINE_MEMBER_OP(elwise_div, ComplexFlt, vec_div)
|
586 |
+
DEFINE_MEMBER_OP(elwise_gt, ComplexFlt, vec_cmpgt)
|
587 |
+
DEFINE_MEMBER_OP(elwise_ge, ComplexFlt, vec_cmpge)
|
588 |
+
DEFINE_MEMBER_OP(elwise_lt, ComplexFlt, vec_cmplt)
|
589 |
+
DEFINE_MEMBER_OP(elwise_le, ComplexFlt, vec_cmple)
|
590 |
+
DEFINE_MEMBER_OP(elwise_max, ComplexFlt, vec_max)
|
591 |
+
};
|
592 |
+
|
593 |
+
template <>
|
594 |
+
Vectorized<ComplexFlt> inline maximum(
|
595 |
+
const Vectorized<ComplexFlt>& a,
|
596 |
+
const Vectorized<ComplexFlt>& b) {
|
597 |
+
auto abs_a = a.abs_2_();
|
598 |
+
auto abs_b = b.abs_2_();
|
599 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
|
600 |
+
// auto max = _mm256_blendv_ps(a, b, mask);
|
601 |
+
auto mask = abs_a.elwise_lt(abs_b);
|
602 |
+
auto max = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
|
603 |
+
|
604 |
+
return max;
|
605 |
+
// Exploit the fact that all-ones is a NaN.
|
606 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
607 |
+
// return _mm256_or_ps(max, isnan);
|
608 |
+
}
|
609 |
+
|
610 |
+
template <>
|
611 |
+
Vectorized<ComplexFlt> inline minimum(
|
612 |
+
const Vectorized<ComplexFlt>& a,
|
613 |
+
const Vectorized<ComplexFlt>& b) {
|
614 |
+
auto abs_a = a.abs_2_();
|
615 |
+
auto abs_b = b.abs_2_();
|
616 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
|
617 |
+
// auto min = _mm256_blendv_ps(a, b, mask);
|
618 |
+
auto mask = abs_a.elwise_gt(abs_b);
|
619 |
+
auto min = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
|
620 |
+
return min;
|
621 |
+
// Exploit the fact that all-ones is a NaN.
|
622 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
623 |
+
// return _mm256_or_ps(min, isnan);
|
624 |
+
}
|
625 |
+
|
626 |
+
} // namespace
|
627 |
+
} // namespace vec
|
628 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h
ADDED
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
#include <sleef.h>
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
namespace vec {
|
12 |
+
|
13 |
+
inline namespace CPU_CAPABILITY {
|
14 |
+
|
15 |
+
|
16 |
+
template <>
|
17 |
+
class Vectorized<double> {
|
18 |
+
private:
|
19 |
+
union {
|
20 |
+
struct {
|
21 |
+
vfloat64 _vec0;
|
22 |
+
vfloat64 _vec1;
|
23 |
+
};
|
24 |
+
struct {
|
25 |
+
vbool64 _vecb0;
|
26 |
+
vbool64 _vecb1;
|
27 |
+
};
|
28 |
+
|
29 |
+
} __attribute__((__may_alias__));
|
30 |
+
|
31 |
+
public:
|
32 |
+
using value_type = double;
|
33 |
+
using vec_internal_type = vfloat64;
|
34 |
+
using vec_internal_mask_type = vbool64;
|
35 |
+
using size_type = int;
|
36 |
+
static constexpr size_type size() {
|
37 |
+
return 4;
|
38 |
+
}
|
39 |
+
Vectorized() {}
|
40 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
|
41 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
42 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
|
43 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
|
44 |
+
C10_ALWAYS_INLINE Vectorized(double scalar)
|
45 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
46 |
+
C10_ALWAYS_INLINE Vectorized(
|
47 |
+
double scalar1,
|
48 |
+
double scalar2,
|
49 |
+
double scalar3,
|
50 |
+
double scalar4)
|
51 |
+
: _vec0{vfloat64{scalar1, scalar2}}, _vec1{vfloat64{scalar3, scalar4}} {}
|
52 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
53 |
+
return _vec0;
|
54 |
+
}
|
55 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
56 |
+
return _vec1;
|
57 |
+
}
|
58 |
+
|
59 |
+
int zero_mask() const {
|
60 |
+
auto cmp = (*this == vd_zero);
|
61 |
+
return (cmp._vecb0[0] & 1) | (cmp._vecb0[1] & 2) | (cmp._vecb1[0] & 4) |
|
62 |
+
(cmp._vecb1[1] & 8);
|
63 |
+
}
|
64 |
+
|
65 |
+
template <int64_t mask>
|
66 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 0, Vectorized<double>> C10_ALWAYS_INLINE
|
67 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
68 |
+
return a;
|
69 |
+
}
|
70 |
+
|
71 |
+
template <int64_t mask>
|
72 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 1, Vectorized<double>> C10_ALWAYS_INLINE
|
73 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
74 |
+
return b;
|
75 |
+
}
|
76 |
+
|
77 |
+
template <int64_t mask>
|
78 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 2, Vectorized<double>> C10_ALWAYS_INLINE
|
79 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
80 |
+
return { b._vec0, a._vec1 };
|
81 |
+
}
|
82 |
+
|
83 |
+
template <int64_t mask>
|
84 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 3, Vectorized<double>> C10_ALWAYS_INLINE
|
85 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
86 |
+
return { a._vec0, b._vec1 };
|
87 |
+
}
|
88 |
+
|
89 |
+
|
90 |
+
template <int64_t mask>
|
91 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 4, Vectorized<double>> C10_ALWAYS_INLINE
|
92 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
93 |
+
const vbool64 mask_1st = VsxDblMask1(mask);
|
94 |
+
return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1 };
|
95 |
+
}
|
96 |
+
|
97 |
+
template <int64_t mask>
|
98 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 5, Vectorized<double>> C10_ALWAYS_INLINE
|
99 |
+
blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
100 |
+
const vbool64 mask_1st = VsxDblMask1(mask);
|
101 |
+
return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1 };
|
102 |
+
}
|
103 |
+
|
104 |
+
|
105 |
+
template <int64_t mask>
|
106 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 6,
|
107 |
+
Vectorized<double>>
|
108 |
+
C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
109 |
+
const vbool64 mask_2nd = VsxDblMask2(mask);
|
110 |
+
// generated masks
|
111 |
+
return { a._vec0,
|
112 |
+
(vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
|
113 |
+
}
|
114 |
+
|
115 |
+
template <int64_t mask>
|
116 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 7,
|
117 |
+
Vectorized<double>>
|
118 |
+
C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
119 |
+
const vbool64 mask_2nd = VsxDblMask2(mask);
|
120 |
+
// generated masks
|
121 |
+
return { b._vec0,
|
122 |
+
(vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
|
123 |
+
}
|
124 |
+
|
125 |
+
template <int64_t mask>
|
126 |
+
static std::enable_if_t<blendChoiceDbl(mask) == 8, Vectorized<double>>
|
127 |
+
C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
128 |
+
const vbool64 mask_1st = VsxDblMask1(mask);
|
129 |
+
const vbool64 mask_2nd = VsxDblMask2(mask);
|
130 |
+
return {
|
131 |
+
(vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
|
132 |
+
(vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
|
133 |
+
}
|
134 |
+
|
135 |
+
|
136 |
+
static Vectorized<double> C10_ALWAYS_INLINE blendv(
|
137 |
+
const Vectorized<double>& a,
|
138 |
+
const Vectorized<double>& b,
|
139 |
+
const Vectorized<double>& mask) {
|
140 |
+
// the mask used here returned by comparision of vec256
|
141 |
+
|
142 |
+
return {
|
143 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
144 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
145 |
+
}
|
146 |
+
template <typename step_t>
|
147 |
+
static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
|
148 |
+
return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
|
149 |
+
}
|
150 |
+
|
151 |
+
static Vectorized<double> C10_ALWAYS_INLINE
|
152 |
+
set(const Vectorized<double>& a, const Vectorized<double>& b, size_t count = size()) {
|
153 |
+
switch (count) {
|
154 |
+
case 0:
|
155 |
+
return a;
|
156 |
+
case 1:
|
157 |
+
return blend<1>(a, b);
|
158 |
+
case 2:
|
159 |
+
return blend<3>(a, b);
|
160 |
+
case 3:
|
161 |
+
return blend<7>(a, b);
|
162 |
+
}
|
163 |
+
|
164 |
+
return b;
|
165 |
+
}
|
166 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
167 |
+
loadu(const void* ptr, int count = size()) {
|
168 |
+
if (count == size()) {
|
169 |
+
return {
|
170 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
171 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
172 |
+
}
|
173 |
+
|
174 |
+
__at_align__ value_type tmp_values[size()] = {};
|
175 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
176 |
+
|
177 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
178 |
+
}
|
179 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
180 |
+
if (count == size()) {
|
181 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
182 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
183 |
+
} else if (count > 0) {
|
184 |
+
__at_align__ value_type tmp_values[size()];
|
185 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
186 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
187 |
+
std::memcpy(
|
188 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
189 |
+
}
|
190 |
+
}
|
191 |
+
const double& operator[](int idx) const = delete;
|
192 |
+
double& operator[](int idx) = delete;
|
193 |
+
Vectorized<double> map(double (*const f)(double)) const {
|
194 |
+
Vectorized<double> ret;
|
195 |
+
for (const auto i : c10::irange(size()/2)) {
|
196 |
+
ret._vec0[i] = f(_vec0[i]);
|
197 |
+
}
|
198 |
+
for (const auto i : c10::irange(size()/2)) {
|
199 |
+
ret._vec1[i] = f(_vec1[i]);
|
200 |
+
}
|
201 |
+
return ret;
|
202 |
+
}
|
203 |
+
|
204 |
+
Vectorized<double> mapbi(double (*const f)(double, double), const Vectorized<double>& other)
|
205 |
+
const {
|
206 |
+
Vectorized<double> ret;
|
207 |
+
for (const auto i : c10::irange(size()/2)) {
|
208 |
+
ret._vec0[i] = f(_vec0[i], other._vec0[i]);
|
209 |
+
}
|
210 |
+
for (const auto i : c10::irange(size()/2)) {
|
211 |
+
ret._vec1[i] = f(_vec1[i], other._vec1[i]);
|
212 |
+
}
|
213 |
+
return ret;
|
214 |
+
}
|
215 |
+
Vectorized<double> C10_ALWAYS_INLINE abs() const {
|
216 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
217 |
+
}
|
218 |
+
|
219 |
+
Vectorized<double> C10_ALWAYS_INLINE acos() const {
|
220 |
+
return {Sleef_acosd2_u10(_vec0), Sleef_acosd2_u10(_vec1)};
|
221 |
+
}
|
222 |
+
Vectorized<double> C10_ALWAYS_INLINE asin() const {
|
223 |
+
return {Sleef_asind2_u10(_vec0), Sleef_asind2_u10(_vec1)};
|
224 |
+
}
|
225 |
+
Vectorized<double> atan() const {
|
226 |
+
return {Sleef_atand2_u10(_vec0), Sleef_atand2_u10(_vec1)};
|
227 |
+
}
|
228 |
+
Vectorized<double> atanh() const {
|
229 |
+
return {Sleef_atanhd2_u10(_vec0), Sleef_atanhd2_u10(_vec1)};
|
230 |
+
}
|
231 |
+
Vectorized<double> atan2(const Vectorized<double>& b) const {
|
232 |
+
return {Sleef_atan2d2_u10(_vec0, b._vec0), Sleef_atan2d2_u10(_vec1, b._vec1)};
|
233 |
+
}
|
234 |
+
Vectorized<double> copysign(const Vectorized<double> &sign) const {
|
235 |
+
return {Sleef_copysignd2(_vec0, sign._vec0), Sleef_copysignd2(_vec1, sign._vec1)};
|
236 |
+
}
|
237 |
+
Vectorized<double> erf() const {
|
238 |
+
return {Sleef_erfd2_u10(_vec0), Sleef_erfd2_u10(_vec1)};
|
239 |
+
}
|
240 |
+
Vectorized<double> erfc() const {
|
241 |
+
return {Sleef_erfcd2_u15(_vec0), Sleef_erfcd2_u15(_vec1)};
|
242 |
+
}
|
243 |
+
Vectorized<double> C10_ALWAYS_INLINE exp() const {
|
244 |
+
return {Sleef_expd2_u10(_vec0), Sleef_expd2_u10(_vec1)};
|
245 |
+
}
|
246 |
+
Vectorized<double> C10_ALWAYS_INLINE exp2() const {
|
247 |
+
return {Sleef_exp2d2_u10(_vec0), Sleef_exp2d2_u10(_vec1)};
|
248 |
+
}
|
249 |
+
Vectorized<double> expm1() const {
|
250 |
+
return {Sleef_expm1d2_u10(_vec0), Sleef_expm1d2_u10(_vec1)};
|
251 |
+
}
|
252 |
+
Vectorized<double> C10_ALWAYS_INLINE exp_u20() const {
|
253 |
+
return exp();
|
254 |
+
}
|
255 |
+
|
256 |
+
Vectorized<double> lgamma() const __ubsan_ignore_undefined__ {
|
257 |
+
return {Sleef_lgammad2_u10(_vec0), Sleef_lgammad2_u10(_vec1)};
|
258 |
+
}
|
259 |
+
|
260 |
+
Vectorized<double> erfinv() const {
|
261 |
+
return map(calc_erfinv);
|
262 |
+
}
|
263 |
+
|
264 |
+
Vectorized<double> angle() const {
|
265 |
+
auto tmp = blendv(
|
266 |
+
Vectorized<double>(0), Vectorized<double>(c10::pi<double>), *this < Vectorized<double>(0));
|
267 |
+
return blendv(tmp, *this, isnan());
|
268 |
+
}
|
269 |
+
Vectorized<double> real() const {
|
270 |
+
return *this;
|
271 |
+
}
|
272 |
+
Vectorized<double> imag() const {
|
273 |
+
return Vectorized<double>{0};
|
274 |
+
}
|
275 |
+
Vectorized<double> conj() const {
|
276 |
+
return *this;
|
277 |
+
}
|
278 |
+
|
279 |
+
Vectorized<double> C10_ALWAYS_INLINE log() const {
|
280 |
+
return {Sleef_logd2_u10(_vec0), Sleef_logd2_u10(_vec1)};
|
281 |
+
}
|
282 |
+
Vectorized<double> C10_ALWAYS_INLINE log10() const {
|
283 |
+
return {Sleef_log10d2_u10(_vec0), Sleef_log10d2_u10(_vec1)};
|
284 |
+
}
|
285 |
+
Vectorized<double> C10_ALWAYS_INLINE log1p() const {
|
286 |
+
return {Sleef_log1pd2_u10(_vec0), Sleef_log1pd2_u10(_vec1)};
|
287 |
+
}
|
288 |
+
Vectorized<double> C10_ALWAYS_INLINE log2() const {
|
289 |
+
return {Sleef_log2d2_u10(_vec0), Sleef_log2d2_u10(_vec1)};
|
290 |
+
}
|
291 |
+
Vectorized<double> C10_ALWAYS_INLINE ceil() const {
|
292 |
+
return {vec_ceil(_vec0), vec_ceil(_vec1)};
|
293 |
+
}
|
294 |
+
Vectorized<double> C10_ALWAYS_INLINE cos() const {
|
295 |
+
return {Sleef_cosd2_u10(_vec0), Sleef_cosd2_u10(_vec1)};
|
296 |
+
}
|
297 |
+
Vectorized<double> C10_ALWAYS_INLINE cosh() const {
|
298 |
+
return {Sleef_coshd2_u10(_vec0), Sleef_coshd2_u10(_vec1)};
|
299 |
+
}
|
300 |
+
Vectorized<double> C10_ALWAYS_INLINE floor() const {
|
301 |
+
return {vec_floor(_vec0), vec_floor(_vec1)};
|
302 |
+
}
|
303 |
+
Vectorized<double> C10_ALWAYS_INLINE neg() const {
|
304 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
305 |
+
}
|
306 |
+
Vectorized<double> C10_ALWAYS_INLINE round() const {
|
307 |
+
return {vec_rint(_vec0), vec_rint(_vec1)};
|
308 |
+
}
|
309 |
+
Vectorized<double> C10_ALWAYS_INLINE sin() const {
|
310 |
+
return {Sleef_sind2_u10(_vec0), Sleef_sind2_u10(_vec1)};
|
311 |
+
}
|
312 |
+
Vectorized<double> C10_ALWAYS_INLINE sinh() const {
|
313 |
+
return {Sleef_sinhd2_u10(_vec0), Sleef_sinhd2_u10(_vec1)};
|
314 |
+
}
|
315 |
+
Vectorized<double> C10_ALWAYS_INLINE tan() const {
|
316 |
+
return {Sleef_tand2_u10(_vec0), Sleef_tand2_u10(_vec1)};
|
317 |
+
}
|
318 |
+
Vectorized<double> C10_ALWAYS_INLINE tanh() const {
|
319 |
+
return {Sleef_tanhd2_u10(_vec0), Sleef_tanhd2_u10(_vec1)};
|
320 |
+
}
|
321 |
+
Vectorized<double> C10_ALWAYS_INLINE trunc() const {
|
322 |
+
return {vec_trunc(_vec0), vec_trunc(_vec1)};
|
323 |
+
}
|
324 |
+
|
325 |
+
Vectorized<double> C10_ALWAYS_INLINE frac() const {
|
326 |
+
return *this - trunc();
|
327 |
+
}
|
328 |
+
|
329 |
+
Vectorized<double> C10_ALWAYS_INLINE sqrt() const {
|
330 |
+
return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
|
331 |
+
}
|
332 |
+
Vectorized<double> C10_ALWAYS_INLINE reciprocal() const {
|
333 |
+
return {
|
334 |
+
vec_div(vd_one, _vec0), // vec_re(_vec0) is estimated one.
|
335 |
+
vec_div(vd_one, _vec1)};
|
336 |
+
}
|
337 |
+
Vectorized<double> C10_ALWAYS_INLINE rsqrt() const {
|
338 |
+
return sqrt().reciprocal();
|
339 |
+
}
|
340 |
+
|
341 |
+
Vectorized<double> C10_ALWAYS_INLINE pow(const Vectorized<double>& b) const {
|
342 |
+
return {Sleef_powd2_u10(_vec0, b._vec0), Sleef_powd2_u10(_vec1, b._vec1)};
|
343 |
+
}
|
344 |
+
Vectorized<double> C10_ALWAYS_INLINE fmod(const Vectorized<double>& b) const {
|
345 |
+
return {Sleef_fmodd2(_vec0, b._vec0),Sleef_fmodd2(_vec1, b._vec1)};
|
346 |
+
}
|
347 |
+
|
348 |
+
Vectorized<double> hypot(const Vectorized<double>& b) const {
|
349 |
+
return {Sleef_hypotd2_u05(_vec0, b._vec0), Sleef_hypotd2_u05(_vec1, b._vec1)};
|
350 |
+
}
|
351 |
+
|
352 |
+
Vectorized<double> nextafter(const Vectorized<double>& b) const {
|
353 |
+
return {Sleef_nextafterd2(_vec0, b._vec0), Sleef_nextafterd2(_vec1, b._vec1)};
|
354 |
+
}
|
355 |
+
|
356 |
+
Vectorized<double> igamma(const Vectorized<double>& x) const {
|
357 |
+
return mapbi(calc_igamma, x);
|
358 |
+
}
|
359 |
+
|
360 |
+
Vectorized<double> igammac(const Vectorized<double>& x) const {
|
361 |
+
return mapbi(calc_igammac, x);
|
362 |
+
}
|
363 |
+
|
364 |
+
|
365 |
+
Vectorized<double> i0() const {
|
366 |
+
return map(calc_i0);
|
367 |
+
}
|
368 |
+
|
369 |
+
Vectorized<double> i0e() const {
|
370 |
+
return map(calc_i0e);
|
371 |
+
}
|
372 |
+
|
373 |
+
Vectorized<double> digamma() const {
|
374 |
+
return map(calc_digamma);
|
375 |
+
}
|
376 |
+
|
377 |
+
Vectorized<double> _nor() const {
|
378 |
+
return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
|
379 |
+
}
|
380 |
+
|
381 |
+
Vectorized<double> isnan() const {
|
382 |
+
auto x = *this;
|
383 |
+
auto ret = (x == x);
|
384 |
+
return ret._nor();
|
385 |
+
}
|
386 |
+
bool has_inf_nan() const {
|
387 |
+
for (const auto i : c10::irange(size()/2)) {
|
388 |
+
if(_isnan(_vec0[i]) || _isinf(_vec0[i])) {
|
389 |
+
return true;
|
390 |
+
}
|
391 |
+
}
|
392 |
+
for (const auto i : c10::irange(size()/2)) {
|
393 |
+
if(_isnan(_vec1[i]) || _isinf(_vec1[i])) {
|
394 |
+
return true;
|
395 |
+
}
|
396 |
+
}
|
397 |
+
return false;
|
398 |
+
}
|
399 |
+
|
400 |
+
DEFINE_MEMBER_OP(operator==, double, vec_cmpeq)
|
401 |
+
DEFINE_MEMBER_OP(operator!=, double, vec_cmpne)
|
402 |
+
DEFINE_MEMBER_OP(operator<, double, vec_cmplt)
|
403 |
+
DEFINE_MEMBER_OP(operator<=, double, vec_cmple)
|
404 |
+
DEFINE_MEMBER_OP(operator>, double, vec_cmpgt)
|
405 |
+
DEFINE_MEMBER_OP(operator>=, double, vec_cmpge)
|
406 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, double, vec_cmpeq)
|
407 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, double, vec_cmpne)
|
408 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, double, vec_cmplt)
|
409 |
+
DEFINE_MEMBER_OP_AND_ONE(le, double, vec_cmple)
|
410 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, double, vec_cmpgt)
|
411 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, double, vec_cmpge)
|
412 |
+
DEFINE_MEMBER_OP(operator+, double, vec_add)
|
413 |
+
DEFINE_MEMBER_OP(operator-, double, vec_sub)
|
414 |
+
DEFINE_MEMBER_OP(operator*, double, vec_mul)
|
415 |
+
DEFINE_MEMBER_OP(operator/, double, vec_div)
|
416 |
+
DEFINE_MEMBER_OP(maximum, double, vec_max_nan2)
|
417 |
+
DEFINE_MEMBER_OP(minimum, double, vec_min_nan2)
|
418 |
+
DEFINE_MEMBER_OP(operator&, double, vec_and)
|
419 |
+
DEFINE_MEMBER_OP(operator|, double, vec_or)
|
420 |
+
DEFINE_MEMBER_OP(operator^, double, vec_xor)
|
421 |
+
DEFINE_MEMBER_TERNARY_OP(madd, double, vec_madd)
|
422 |
+
};
|
423 |
+
template <>
|
424 |
+
Vectorized<double> inline maximum(
|
425 |
+
const Vectorized<double>& a,
|
426 |
+
const Vectorized<double>& b) {
|
427 |
+
return a.maximum(b);
|
428 |
+
}
|
429 |
+
|
430 |
+
template <>
|
431 |
+
Vectorized<double> inline minimum(
|
432 |
+
const Vectorized<double>& a,
|
433 |
+
const Vectorized<double>& b) {
|
434 |
+
return a.minimum(b);
|
435 |
+
}
|
436 |
+
} // namespace
|
437 |
+
} // namespace vec
|
438 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h
ADDED
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
#include <sleef.h>
|
7 |
+
namespace at {
|
8 |
+
namespace vec {
|
9 |
+
// See Note [CPU_CAPABILITY namespace]
|
10 |
+
|
11 |
+
inline namespace CPU_CAPABILITY {
|
12 |
+
|
13 |
+
template <>
|
14 |
+
class Vectorized<float> {
|
15 |
+
private:
|
16 |
+
union {
|
17 |
+
struct {
|
18 |
+
vfloat32 _vec0;
|
19 |
+
vfloat32 _vec1;
|
20 |
+
};
|
21 |
+
struct {
|
22 |
+
vbool32 _vecb0;
|
23 |
+
vbool32 _vecb1;
|
24 |
+
};
|
25 |
+
|
26 |
+
} __attribute__((__may_alias__));
|
27 |
+
|
28 |
+
public:
|
29 |
+
using value_type = float;
|
30 |
+
using vec_internal_type = vfloat32;
|
31 |
+
using vec_internal_mask_type = vbool32;
|
32 |
+
using size_type = int;
|
33 |
+
|
34 |
+
static constexpr size_type size() {
|
35 |
+
return 8;
|
36 |
+
}
|
37 |
+
Vectorized() {}
|
38 |
+
|
39 |
+
C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
|
40 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
41 |
+
C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
|
42 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
43 |
+
C10_ALWAYS_INLINE Vectorized(float scalar)
|
44 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
45 |
+
C10_ALWAYS_INLINE Vectorized(
|
46 |
+
float scalar1,
|
47 |
+
float scalar2,
|
48 |
+
float scalar3,
|
49 |
+
float scalar4,
|
50 |
+
float scalar5,
|
51 |
+
float scalar6,
|
52 |
+
float scalar7,
|
53 |
+
float scalar8)
|
54 |
+
: _vec0{vfloat32{scalar1, scalar2, scalar3, scalar4}},
|
55 |
+
_vec1{vfloat32{scalar5, scalar6, scalar7, scalar8}} {}
|
56 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
57 |
+
return _vec0;
|
58 |
+
}
|
59 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
60 |
+
return _vec1;
|
61 |
+
}
|
62 |
+
|
63 |
+
template <int64_t mask>
|
64 |
+
static std::enable_if_t<blendChoice(mask) == 0, Vectorized<float>> C10_ALWAYS_INLINE
|
65 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
66 |
+
return a;
|
67 |
+
}
|
68 |
+
|
69 |
+
template <int64_t mask>
|
70 |
+
static std::enable_if_t<blendChoice(mask) == 1, Vectorized<float>> C10_ALWAYS_INLINE
|
71 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
72 |
+
return b;
|
73 |
+
}
|
74 |
+
|
75 |
+
template <int64_t mask>
|
76 |
+
static std::enable_if_t<blendChoice(mask) == 2, Vectorized<float>> C10_ALWAYS_INLINE
|
77 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
78 |
+
return {b._vec0, a._vec1};
|
79 |
+
}
|
80 |
+
|
81 |
+
template <int64_t mask>
|
82 |
+
static std::enable_if_t<blendChoice(mask) == 3, Vectorized<float>> C10_ALWAYS_INLINE
|
83 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
84 |
+
return {a._vec0, b._vec1};
|
85 |
+
}
|
86 |
+
|
87 |
+
template <int64_t mask>
|
88 |
+
static std::enable_if_t<blendChoice(mask) == 4, Vectorized<float>> C10_ALWAYS_INLINE
|
89 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
90 |
+
const vbool32 mask_1st = VsxMask1(mask);
|
91 |
+
return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
|
92 |
+
}
|
93 |
+
|
94 |
+
template <int64_t mask>
|
95 |
+
static std::enable_if_t<blendChoice(mask) == 5, Vectorized<float>> C10_ALWAYS_INLINE
|
96 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
97 |
+
const vbool32 mask_1st = VsxMask1(mask);
|
98 |
+
return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
|
99 |
+
}
|
100 |
+
|
101 |
+
template <int64_t mask>
|
102 |
+
static std::enable_if_t<blendChoice(mask) == 6, Vectorized<float>> C10_ALWAYS_INLINE
|
103 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
104 |
+
const vbool32 mask_2nd = VsxMask2(mask);
|
105 |
+
// generated masks
|
106 |
+
return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
107 |
+
}
|
108 |
+
|
109 |
+
template <int64_t mask>
|
110 |
+
static std::enable_if_t<blendChoice(mask) == 7, Vectorized<float>> C10_ALWAYS_INLINE
|
111 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
112 |
+
const vbool32 mask_2nd = VsxMask2(mask);
|
113 |
+
// generated masks
|
114 |
+
return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
115 |
+
}
|
116 |
+
|
117 |
+
template <int64_t mask>
|
118 |
+
static std::enable_if_t<blendChoice(mask) == 8, Vectorized<float>> C10_ALWAYS_INLINE
|
119 |
+
blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
120 |
+
const vbool32 mask_1st = VsxMask1(mask);
|
121 |
+
const vbool32 mask_2nd = VsxMask2(mask);
|
122 |
+
return {
|
123 |
+
(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
|
124 |
+
(vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
125 |
+
}
|
126 |
+
|
127 |
+
static Vectorized<float> C10_ALWAYS_INLINE blendv(
|
128 |
+
const Vectorized<float>& a,
|
129 |
+
const Vectorized<float>& b,
|
130 |
+
const Vectorized<float>& mask) {
|
131 |
+
// the mask used here returned by comparision of vec256
|
132 |
+
// assuming this we can use the same mask directly with vec_sel
|
133 |
+
return {
|
134 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
135 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
136 |
+
}
|
137 |
+
|
138 |
+
template <typename step_t>
|
139 |
+
static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
|
140 |
+
return Vectorized<float>(
|
141 |
+
base,
|
142 |
+
base + step,
|
143 |
+
base + 2 * step,
|
144 |
+
base + 3 * step,
|
145 |
+
base + 4 * step,
|
146 |
+
base + 5 * step,
|
147 |
+
base + 6 * step,
|
148 |
+
base + 7 * step);
|
149 |
+
}
|
150 |
+
static Vectorized<float> set(
|
151 |
+
const Vectorized<float>& a,
|
152 |
+
const Vectorized<float>& b,
|
153 |
+
size_t count = size()) {
|
154 |
+
switch (count) {
|
155 |
+
case 0:
|
156 |
+
return a;
|
157 |
+
case 1:
|
158 |
+
return blend<1>(a, b);
|
159 |
+
case 2:
|
160 |
+
return blend<3>(a, b);
|
161 |
+
case 3:
|
162 |
+
return blend<7>(a, b);
|
163 |
+
case 4:
|
164 |
+
return blend<15>(a, b);
|
165 |
+
case 5:
|
166 |
+
return blend<31>(a, b);
|
167 |
+
case 6:
|
168 |
+
return blend<63>(a, b);
|
169 |
+
case 7:
|
170 |
+
return blend<127>(a, b);
|
171 |
+
}
|
172 |
+
|
173 |
+
return b;
|
174 |
+
}
|
175 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
176 |
+
loadu(const void* ptr, int count = size()) {
|
177 |
+
if (count == size()) {
|
178 |
+
return {
|
179 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
180 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
181 |
+
}
|
182 |
+
|
183 |
+
__at_align__ value_type tmp_values[size()] = {};
|
184 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
185 |
+
|
186 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
187 |
+
}
|
188 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
189 |
+
if (count == size()) {
|
190 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
191 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
192 |
+
} else if (count > 0) {
|
193 |
+
__at_align__ value_type tmp_values[size()];
|
194 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
195 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
196 |
+
std::memcpy(
|
197 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
198 |
+
}
|
199 |
+
}
|
200 |
+
|
201 |
+
const float& operator[](int idx) const = delete;
|
202 |
+
float& operator[](int idx) = delete;
|
203 |
+
|
204 |
+
Vectorized<float> map(float (*const f)(float)) const {
|
205 |
+
Vectorized<float> ret;
|
206 |
+
for (int i = 0; i < size() / 2; i++) {
|
207 |
+
ret._vec0[i] = f(_vec0[i]);
|
208 |
+
}
|
209 |
+
for (int i = 0; i < size() / 2; i++) {
|
210 |
+
ret._vec1[i] = f(_vec1[i]);
|
211 |
+
}
|
212 |
+
return ret;
|
213 |
+
}
|
214 |
+
|
215 |
+
Vectorized<float> mapbi(float (*const f)(float, float), const Vectorized<float>& other)
|
216 |
+
const {
|
217 |
+
Vectorized<float> ret;
|
218 |
+
for (int i = 0; i < size() / 2; i++) {
|
219 |
+
ret._vec0[i] = f(_vec0[i], other._vec0[i]);
|
220 |
+
}
|
221 |
+
for (int i = 0; i < size() / 2; i++) {
|
222 |
+
ret._vec1[i] = f(_vec1[i], other._vec1[i]);
|
223 |
+
}
|
224 |
+
return ret;
|
225 |
+
}
|
226 |
+
|
227 |
+
Vectorized<float> _nor() const {
|
228 |
+
return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
|
229 |
+
}
|
230 |
+
|
231 |
+
Vectorized<float> isnan() const {
|
232 |
+
auto x = *this;
|
233 |
+
auto ret = (x == x);
|
234 |
+
return ret._nor();
|
235 |
+
}
|
236 |
+
|
237 |
+
bool has_inf_nan() const {
|
238 |
+
for (const auto i : c10::irange(size()/2)) {
|
239 |
+
if(_isnan(_vec0[i]) || _isinf(_vec0[i])) {
|
240 |
+
return true;
|
241 |
+
}
|
242 |
+
}
|
243 |
+
for (const auto i : c10::irange(size()/2)) {
|
244 |
+
if(_isnan(_vec1[i]) || _isinf(_vec1[i])) {
|
245 |
+
return true;
|
246 |
+
}
|
247 |
+
}
|
248 |
+
return false;
|
249 |
+
}
|
250 |
+
|
251 |
+
int zero_mask() const {
|
252 |
+
// returns an integer mask where all zero elements are translated to 1-bit
|
253 |
+
// and others are translated to 0-bit
|
254 |
+
//__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
|
255 |
+
auto cmp = (*this == zero);
|
256 |
+
// return _mm256_movemask_ps(cmp);
|
257 |
+
// possible simulation //mask= lvsl ( 0 ) vbpermq( vec, mask <<5)
|
258 |
+
vuint64 result0 = vec_vbpermq((vuint8)cmp._vecb0, mask_zero_bits);
|
259 |
+
vuint64 result1 = vec_vbpermq((vuint8)cmp._vecb1, mask_zero_bits);
|
260 |
+
return (result0[1] >> 12 | (result1[1] >> 8));
|
261 |
+
}
|
262 |
+
|
263 |
+
Vectorized<float> C10_ALWAYS_INLINE abs() const {
|
264 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
265 |
+
}
|
266 |
+
|
267 |
+
Vectorized<float> C10_ALWAYS_INLINE acos() const {
|
268 |
+
return {Sleef_acosf4_u10(_vec0), Sleef_acosf4_u10(_vec1)};
|
269 |
+
}
|
270 |
+
Vectorized<float> C10_ALWAYS_INLINE asin() const {
|
271 |
+
return {Sleef_asinf4_u10(_vec0), Sleef_asinf4_u10(_vec1)};
|
272 |
+
}
|
273 |
+
Vectorized<float> atan() const {
|
274 |
+
return {Sleef_atanf4_u10(_vec0), Sleef_atanf4_u10(_vec1)};
|
275 |
+
}
|
276 |
+
Vectorized<float> atanh() const {
|
277 |
+
return {Sleef_atanhf4_u10(_vec0), Sleef_atanhf4_u10(_vec1)};
|
278 |
+
}
|
279 |
+
Vectorized<float> atan2(const Vectorized<float>& b) const {
|
280 |
+
return {Sleef_atan2f4_u10(_vec0, b._vec0), Sleef_atan2f4_u10(_vec1, b._vec1)};
|
281 |
+
}
|
282 |
+
Vectorized<float> copysign(const Vectorized<float> &sign) const {
|
283 |
+
return {Sleef_copysignf4(_vec0, sign._vec0), Sleef_copysignf4(_vec1, sign._vec1)};
|
284 |
+
}
|
285 |
+
Vectorized<float> lgamma() const {
|
286 |
+
return {Sleef_lgammaf4_u10(_vec0), Sleef_lgammaf4_u10(_vec1)};
|
287 |
+
}
|
288 |
+
Vectorized<float> erf() const {
|
289 |
+
return {Sleef_erff4_u10(_vec0), Sleef_erff4_u10(_vec1)};
|
290 |
+
}
|
291 |
+
|
292 |
+
Vectorized<float> erfc() const {
|
293 |
+
return {Sleef_erfcf4_u15(_vec0), Sleef_erfcf4_u15(_vec1)};
|
294 |
+
}
|
295 |
+
|
296 |
+
Vectorized<float> erfinv() const {
|
297 |
+
return map(calc_erfinv);
|
298 |
+
}
|
299 |
+
|
300 |
+
Vectorized<float> angle() const {
|
301 |
+
auto tmp = blendv(
|
302 |
+
Vectorized<float>(0), Vectorized<float>(c10::pi<float>), *this < Vectorized<float>(0));
|
303 |
+
return blendv(tmp, *this, isnan());
|
304 |
+
}
|
305 |
+
Vectorized<float> real() const {
|
306 |
+
return *this;
|
307 |
+
}
|
308 |
+
Vectorized<float> imag() const {
|
309 |
+
return Vectorized<float>{0};
|
310 |
+
}
|
311 |
+
Vectorized<float> conj() const {
|
312 |
+
return *this;
|
313 |
+
}
|
314 |
+
|
315 |
+
Vectorized<float> C10_ALWAYS_INLINE exp() const {
|
316 |
+
return {Sleef_expf4_u10(_vec0), Sleef_expf4_u10(_vec1)};
|
317 |
+
}
|
318 |
+
Vectorized<float> C10_ALWAYS_INLINE exp2() const {
|
319 |
+
return {Sleef_exp2f4_u10(_vec0), Sleef_exp2f4_u10(_vec1)};
|
320 |
+
}
|
321 |
+
Vectorized<float> expm1() const {
|
322 |
+
return {Sleef_expm1f4_u10(_vec0), Sleef_expm1f4_u10(_vec1)};
|
323 |
+
}
|
324 |
+
Vectorized<float> C10_ALWAYS_INLINE exp_u20() const {
|
325 |
+
return exp();
|
326 |
+
}
|
327 |
+
|
328 |
+
Vectorized<float> C10_ALWAYS_INLINE log() const {
|
329 |
+
return {Sleef_logf4_u10(_vec0), Sleef_logf4_u10(_vec1)};
|
330 |
+
}
|
331 |
+
Vectorized<float> C10_ALWAYS_INLINE log10() const {
|
332 |
+
return {Sleef_log10f4_u10(_vec0), Sleef_log10f4_u10(_vec1)};
|
333 |
+
}
|
334 |
+
Vectorized<float> C10_ALWAYS_INLINE log1p() const {
|
335 |
+
return {Sleef_log1pf4_u10(_vec0), Sleef_log1pf4_u10(_vec1)};
|
336 |
+
}
|
337 |
+
Vectorized<float> C10_ALWAYS_INLINE log2() const {
|
338 |
+
return {Sleef_log2f4_u10(_vec0), Sleef_log2f4_u10(_vec1)};
|
339 |
+
}
|
340 |
+
Vectorized<float> C10_ALWAYS_INLINE ceil() const {
|
341 |
+
return {vec_ceil(_vec0), vec_ceil(_vec1)};
|
342 |
+
}
|
343 |
+
Vectorized<float> C10_ALWAYS_INLINE cos() const {
|
344 |
+
return {Sleef_cosf4_u10(_vec0), Sleef_cosf4_u10(_vec1)};
|
345 |
+
}
|
346 |
+
Vectorized<float> C10_ALWAYS_INLINE cosh() const {
|
347 |
+
return {Sleef_coshf4_u10(_vec0), Sleef_coshf4_u10(_vec1)};
|
348 |
+
}
|
349 |
+
Vectorized<float> C10_ALWAYS_INLINE floor() const {
|
350 |
+
return {vec_floor(_vec0), vec_floor(_vec1)};
|
351 |
+
}
|
352 |
+
Vectorized<float> C10_ALWAYS_INLINE neg() const {
|
353 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
354 |
+
}
|
355 |
+
|
356 |
+
Vectorized<float> C10_ALWAYS_INLINE round() const {
|
357 |
+
return {vec_round(_vec0), vec_round(_vec1)};
|
358 |
+
}
|
359 |
+
Vectorized<float> C10_ALWAYS_INLINE sin() const {
|
360 |
+
return {Sleef_sinf4_u10(_vec0), Sleef_sinf4_u10(_vec1)};
|
361 |
+
}
|
362 |
+
Vectorized<float> C10_ALWAYS_INLINE sinh() const {
|
363 |
+
return {Sleef_sinhf4_u10(_vec0), Sleef_sinhf4_u10(_vec1)};
|
364 |
+
}
|
365 |
+
Vectorized<float> C10_ALWAYS_INLINE tan() const {
|
366 |
+
return {Sleef_tanf4_u10(_vec0), Sleef_tanf4_u10(_vec1)};
|
367 |
+
}
|
368 |
+
Vectorized<float> C10_ALWAYS_INLINE tanh() const {
|
369 |
+
return {Sleef_tanhf4_u10(_vec0), Sleef_tanhf4_u10(_vec1)};
|
370 |
+
}
|
371 |
+
Vectorized<float> C10_ALWAYS_INLINE trunc() const {
|
372 |
+
return {vec_trunc(_vec0), vec_trunc(_vec1)};
|
373 |
+
}
|
374 |
+
|
375 |
+
Vectorized<float> C10_ALWAYS_INLINE frac() const {
|
376 |
+
return *this - trunc();
|
377 |
+
}
|
378 |
+
|
379 |
+
Vectorized<float> C10_ALWAYS_INLINE sqrt() const {
|
380 |
+
return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
|
381 |
+
}
|
382 |
+
Vectorized<float> C10_ALWAYS_INLINE reciprocal() const {
|
383 |
+
return Vectorized<float>(one) / (*this);
|
384 |
+
}
|
385 |
+
Vectorized<float> C10_ALWAYS_INLINE rsqrt() const {
|
386 |
+
return sqrt().reciprocal();
|
387 |
+
}
|
388 |
+
|
389 |
+
Vectorized<float> C10_ALWAYS_INLINE pow(const Vectorized<float>& exp) const {
|
390 |
+
return {Sleef_powf4_u10(_vec0, exp._vec0), Sleef_powf4_u10(_vec1, exp._vec1)};
|
391 |
+
}
|
392 |
+
|
393 |
+
Vectorized<float> fmod(const Vectorized<float>& b) const {
|
394 |
+
return {Sleef_fmodf4(_vec0, b._vec0),Sleef_fmodf4(_vec1, b._vec1)};
|
395 |
+
}
|
396 |
+
|
397 |
+
Vectorized<float> hypot(const Vectorized<float>& b) const {
|
398 |
+
return {Sleef_hypotf4_u05(_vec0, b._vec0), Sleef_hypotf4_u05(_vec1, b._vec1)};
|
399 |
+
}
|
400 |
+
|
401 |
+
Vectorized<float> nextafter(const Vectorized<float>& b) const {
|
402 |
+
return {Sleef_nextafterf4(_vec0, b._vec0), Sleef_nextafterf4(_vec1, b._vec1)};
|
403 |
+
}
|
404 |
+
|
405 |
+
Vectorized<float> igamma(const Vectorized<float>& x) const {
|
406 |
+
return mapbi(calc_igamma, x);
|
407 |
+
}
|
408 |
+
|
409 |
+
Vectorized<float> igammac(const Vectorized<float>& x) const {
|
410 |
+
return mapbi(calc_igammac, x);
|
411 |
+
}
|
412 |
+
|
413 |
+
Vectorized<float> i0() const {
|
414 |
+
return map(calc_i0);
|
415 |
+
}
|
416 |
+
|
417 |
+
Vectorized<float> i0e() const {
|
418 |
+
return map(calc_i0e);
|
419 |
+
}
|
420 |
+
|
421 |
+
Vectorized<float> digamma() const {
|
422 |
+
return map(calc_digamma);
|
423 |
+
}
|
424 |
+
|
425 |
+
DEFINE_MEMBER_OP(operator==, float, vec_cmpeq)
|
426 |
+
DEFINE_MEMBER_OP(operator!=, float, vec_cmpne)
|
427 |
+
DEFINE_MEMBER_OP(operator<, float, vec_cmplt)
|
428 |
+
DEFINE_MEMBER_OP(operator<=, float, vec_cmple)
|
429 |
+
DEFINE_MEMBER_OP(operator>, float, vec_cmpgt)
|
430 |
+
DEFINE_MEMBER_OP(operator>=, float, vec_cmpge)
|
431 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, float, vec_cmpeq)
|
432 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, float, vec_cmpne)
|
433 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, float, vec_cmplt)
|
434 |
+
DEFINE_MEMBER_OP_AND_ONE(le, float, vec_cmple)
|
435 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, float, vec_cmpgt)
|
436 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, float, vec_cmpge)
|
437 |
+
DEFINE_MEMBER_OP(operator+, float, vec_add)
|
438 |
+
DEFINE_MEMBER_OP(operator-, float, vec_sub)
|
439 |
+
DEFINE_MEMBER_OP(operator*, float, vec_mul)
|
440 |
+
DEFINE_MEMBER_OP(operator/, float, vec_div)
|
441 |
+
DEFINE_MEMBER_OP(maximum, float, vec_max_nan2)
|
442 |
+
DEFINE_MEMBER_OP(minimum, float, vec_min_nan2)
|
443 |
+
DEFINE_MEMBER_OP(operator&, float, vec_and)
|
444 |
+
DEFINE_MEMBER_OP(operator|, float, vec_or)
|
445 |
+
DEFINE_MEMBER_OP(operator^, float, vec_xor)
|
446 |
+
DEFINE_MEMBER_TERNARY_OP(madd, float, vec_madd)
|
447 |
+
};
|
448 |
+
|
449 |
+
template <>
|
450 |
+
Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
451 |
+
return a.maximum(b);
|
452 |
+
}
|
453 |
+
|
454 |
+
template <>
|
455 |
+
Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
456 |
+
return a.minimum(b);
|
457 |
+
}
|
458 |
+
|
459 |
+
} // namespace
|
460 |
+
} // namespace vec
|
461 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
namespace at {
|
7 |
+
namespace vec {
|
8 |
+
// See Note [CPU_CAPABILITY namespace]
|
9 |
+
inline namespace CPU_CAPABILITY {
|
10 |
+
|
11 |
+
template <>
|
12 |
+
class Vectorized<int16_t> {
|
13 |
+
private:
|
14 |
+
union {
|
15 |
+
struct {
|
16 |
+
vint16 _vec0;
|
17 |
+
vint16 _vec1;
|
18 |
+
};
|
19 |
+
struct {
|
20 |
+
vbool16 _vecb0;
|
21 |
+
vbool16 _vecb1;
|
22 |
+
};
|
23 |
+
|
24 |
+
} __attribute__((__may_alias__));
|
25 |
+
|
26 |
+
public:
|
27 |
+
using value_type = int16_t;
|
28 |
+
using vec_internal_type = vint16;
|
29 |
+
using vec_internal_mask_type = vbool16;
|
30 |
+
using size_type = int;
|
31 |
+
static constexpr size_type size() {
|
32 |
+
return 16;
|
33 |
+
}
|
34 |
+
Vectorized() {}
|
35 |
+
C10_ALWAYS_INLINE Vectorized(vint16 v) : _vec0{v}, _vec1{v} {}
|
36 |
+
C10_ALWAYS_INLINE Vectorized(vbool16 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
37 |
+
C10_ALWAYS_INLINE Vectorized(vint16 v1, vint16 v2) : _vec0{v1}, _vec1{v2} {}
|
38 |
+
C10_ALWAYS_INLINE Vectorized(vbool16 v1, vbool16 v2) : _vecb0{v1}, _vecb1{v2} {}
|
39 |
+
C10_ALWAYS_INLINE Vectorized(int16_t scalar)
|
40 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
41 |
+
|
42 |
+
C10_ALWAYS_INLINE Vectorized(
|
43 |
+
int16_t scalar1,
|
44 |
+
int16_t scalar2,
|
45 |
+
int16_t scalar3,
|
46 |
+
int16_t scalar4,
|
47 |
+
int16_t scalar5,
|
48 |
+
int16_t scalar6,
|
49 |
+
int16_t scalar7,
|
50 |
+
int16_t scalar8,
|
51 |
+
int16_t scalar9,
|
52 |
+
int16_t scalar10,
|
53 |
+
int16_t scalar11,
|
54 |
+
int16_t scalar12,
|
55 |
+
int16_t scalar13,
|
56 |
+
int16_t scalar14,
|
57 |
+
int16_t scalar15,
|
58 |
+
int16_t scalar16)
|
59 |
+
: _vec0{vint16{
|
60 |
+
scalar1,
|
61 |
+
scalar2,
|
62 |
+
scalar3,
|
63 |
+
scalar4,
|
64 |
+
scalar5,
|
65 |
+
scalar6,
|
66 |
+
scalar7,
|
67 |
+
scalar8}},
|
68 |
+
_vec1{vint16{
|
69 |
+
scalar9,
|
70 |
+
scalar10,
|
71 |
+
scalar11,
|
72 |
+
scalar12,
|
73 |
+
scalar13,
|
74 |
+
scalar14,
|
75 |
+
scalar15,
|
76 |
+
scalar16}} {}
|
77 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
78 |
+
return _vec0;
|
79 |
+
}
|
80 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
81 |
+
return _vec1;
|
82 |
+
}
|
83 |
+
|
84 |
+
template <uint64_t mask>
|
85 |
+
static std::enable_if_t<mask == 0, Vectorized<int16_t>> C10_ALWAYS_INLINE
|
86 |
+
blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
87 |
+
return a;
|
88 |
+
}
|
89 |
+
|
90 |
+
template <uint64_t mask>
|
91 |
+
static std::enable_if_t<(mask & 65535) == 65535, Vectorized<int16_t>>
|
92 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
93 |
+
return b;
|
94 |
+
}
|
95 |
+
|
96 |
+
template <uint64_t mask>
|
97 |
+
static std::enable_if_t<mask == 255, Vectorized<int16_t>> C10_ALWAYS_INLINE
|
98 |
+
blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
99 |
+
return {b._vec0, a._vec1};
|
100 |
+
}
|
101 |
+
|
102 |
+
template <uint64_t mask>
|
103 |
+
static std::enable_if_t<(mask > 0 && mask < 255), Vectorized<int16_t>>
|
104 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
105 |
+
constexpr int16_t g0 = (mask & 1) * 0xffff;
|
106 |
+
constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
|
107 |
+
constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
|
108 |
+
constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
|
109 |
+
constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
|
110 |
+
constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
|
111 |
+
constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
|
112 |
+
constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
|
113 |
+
const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
|
114 |
+
|
115 |
+
return {(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), a._vec1};
|
116 |
+
}
|
117 |
+
|
118 |
+
template <uint64_t mask>
|
119 |
+
static std::enable_if_t<
|
120 |
+
(mask > 255 && (mask & 65535) != 65535 && ((mask & 255) == 255)),
|
121 |
+
Vectorized<int16_t>>
|
122 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
123 |
+
constexpr int16_t g0_2 = (mask & 1) * 0xffff;
|
124 |
+
constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
|
125 |
+
constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
|
126 |
+
constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
|
127 |
+
constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
|
128 |
+
constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
|
129 |
+
constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
|
130 |
+
constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
|
131 |
+
|
132 |
+
const vint16 mask_2nd =
|
133 |
+
vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
|
134 |
+
// generated masks
|
135 |
+
return {b._vec0, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
|
136 |
+
}
|
137 |
+
|
138 |
+
template <uint64_t mask>
|
139 |
+
static std::enable_if_t<
|
140 |
+
(mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) == 0)),
|
141 |
+
Vectorized<int16_t>>
|
142 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
143 |
+
constexpr int16_t mask2 = (mask & 65535) >> 16;
|
144 |
+
constexpr int16_t g0_2 = (mask & 1) * 0xffff;
|
145 |
+
constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
|
146 |
+
constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
|
147 |
+
constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
|
148 |
+
constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
|
149 |
+
constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
|
150 |
+
constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
|
151 |
+
constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
|
152 |
+
|
153 |
+
const vint16 mask_2nd =
|
154 |
+
vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
|
155 |
+
// generated masks
|
156 |
+
return {a, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
|
157 |
+
}
|
158 |
+
|
159 |
+
template <uint64_t mask>
|
160 |
+
static std::enable_if_t<
|
161 |
+
(mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) != 0) &&
|
162 |
+
((mask & 255) != 255)),
|
163 |
+
Vectorized<int16_t>>
|
164 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
165 |
+
constexpr int16_t g0 = (mask & 1) * 0xffff;
|
166 |
+
constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
|
167 |
+
constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
|
168 |
+
constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
|
169 |
+
constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
|
170 |
+
constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
|
171 |
+
constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
|
172 |
+
constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
|
173 |
+
constexpr int16_t mask2 = (mask & 65535) >> 16;
|
174 |
+
constexpr int16_t g0_2 = (mask & 1) * 0xffff;
|
175 |
+
constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
|
176 |
+
constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
|
177 |
+
constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
|
178 |
+
constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
|
179 |
+
constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
|
180 |
+
constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
|
181 |
+
constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
|
182 |
+
|
183 |
+
const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
|
184 |
+
const vint16 mask_2nd =
|
185 |
+
vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
|
186 |
+
// generated masks
|
187 |
+
return {
|
188 |
+
(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st),
|
189 |
+
(vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
|
190 |
+
}
|
191 |
+
|
192 |
+
static Vectorized<int16_t> C10_ALWAYS_INLINE blendv(
|
193 |
+
const Vectorized<int16_t>& a,
|
194 |
+
const Vectorized<int16_t>& b,
|
195 |
+
const Vectorized<int16_t>& mask) {
|
196 |
+
// the mask used here returned by comparision of vec256
|
197 |
+
// assuming this we can use the same mask directly with vec_sel
|
198 |
+
// warning intel style mask will not work properly
|
199 |
+
return {
|
200 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
201 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
202 |
+
}
|
203 |
+
|
204 |
+
template <typename step_t>
|
205 |
+
static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
|
206 |
+
return Vectorized<int16_t>(
|
207 |
+
base,
|
208 |
+
base + step,
|
209 |
+
base + 2 * step,
|
210 |
+
base + 3 * step,
|
211 |
+
base + 4 * step,
|
212 |
+
base + 5 * step,
|
213 |
+
base + 6 * step,
|
214 |
+
base + 7 * step,
|
215 |
+
base + 8 * step,
|
216 |
+
base + 9 * step,
|
217 |
+
base + 10 * step,
|
218 |
+
base + 11 * step,
|
219 |
+
base + 12 * step,
|
220 |
+
base + 13 * step,
|
221 |
+
base + 14 * step,
|
222 |
+
base + 15 * step);
|
223 |
+
}
|
224 |
+
static Vectorized<int16_t> set(
|
225 |
+
const Vectorized<int16_t>& a,
|
226 |
+
const Vectorized<int16_t>& b,
|
227 |
+
size_t count = size()) {
|
228 |
+
switch (count) {
|
229 |
+
case 0:
|
230 |
+
return a;
|
231 |
+
case 1:
|
232 |
+
return blend<1>(a, b);
|
233 |
+
case 2:
|
234 |
+
return blend<3>(a, b);
|
235 |
+
case 3:
|
236 |
+
return blend<7>(a, b);
|
237 |
+
case 4:
|
238 |
+
return blend<15>(a, b);
|
239 |
+
case 5:
|
240 |
+
return blend<31>(a, b);
|
241 |
+
case 6:
|
242 |
+
return blend<63>(a, b);
|
243 |
+
case 7:
|
244 |
+
return blend<127>(a, b);
|
245 |
+
case 8:
|
246 |
+
return blend<255>(a, b);
|
247 |
+
case 9:
|
248 |
+
return blend<511>(a, b);
|
249 |
+
case 10:
|
250 |
+
return blend<1023>(a, b);
|
251 |
+
case 11:
|
252 |
+
return blend<2047>(a, b);
|
253 |
+
case 12:
|
254 |
+
return blend<4095>(a, b);
|
255 |
+
case 13:
|
256 |
+
return blend<8191>(a, b);
|
257 |
+
case 14:
|
258 |
+
return blend<16383>(a, b);
|
259 |
+
case 15:
|
260 |
+
return blend<32767>(a, b);
|
261 |
+
}
|
262 |
+
return b;
|
263 |
+
}
|
264 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
265 |
+
loadu(const void* ptr, int count = size()) {
|
266 |
+
if (count == size()) {
|
267 |
+
return {
|
268 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
269 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
270 |
+
}
|
271 |
+
|
272 |
+
__at_align__ value_type tmp_values[size()] = {};
|
273 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
274 |
+
|
275 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
276 |
+
}
|
277 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
278 |
+
if (count == size()) {
|
279 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
280 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
281 |
+
} else if (count > 0) {
|
282 |
+
__at_align__ value_type tmp_values[size()];
|
283 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
284 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
285 |
+
std::memcpy(ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
286 |
+
}
|
287 |
+
}
|
288 |
+
const int16_t& operator[](int idx) const = delete;
|
289 |
+
int16_t& operator[](int idx) = delete;
|
290 |
+
|
291 |
+
Vectorized<int16_t> angle() const {
|
292 |
+
return blendv(
|
293 |
+
Vectorized<int16_t>(0), Vectorized<int16_t>(c10::pi<int16_t>), *this < Vectorized<int16_t>(0));
|
294 |
+
}
|
295 |
+
Vectorized<int16_t> real() const {
|
296 |
+
return *this;
|
297 |
+
}
|
298 |
+
Vectorized<int16_t> imag() const {
|
299 |
+
return Vectorized<int16_t>{0};
|
300 |
+
}
|
301 |
+
Vectorized<int16_t> conj() const {
|
302 |
+
return *this;
|
303 |
+
}
|
304 |
+
|
305 |
+
Vectorized<int16_t> C10_ALWAYS_INLINE abs() const {
|
306 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
307 |
+
}
|
308 |
+
|
309 |
+
Vectorized<int16_t> C10_ALWAYS_INLINE neg() const {
|
310 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
311 |
+
}
|
312 |
+
|
313 |
+
DEFINE_MEMBER_UNARY_OP(operator~, int16_t, vec_not)
|
314 |
+
DEFINE_MEMBER_OP(operator==, int16_t, vec_cmpeq)
|
315 |
+
DEFINE_MEMBER_OP(operator!=, int16_t, vec_cmpne)
|
316 |
+
DEFINE_MEMBER_OP(operator<, int16_t, vec_cmplt)
|
317 |
+
DEFINE_MEMBER_OP(operator<=, int16_t, vec_cmple)
|
318 |
+
DEFINE_MEMBER_OP(operator>, int16_t, vec_cmpgt)
|
319 |
+
DEFINE_MEMBER_OP(operator>=, int16_t, vec_cmpge)
|
320 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, int16_t, vec_cmpeq)
|
321 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, int16_t, vec_cmpne)
|
322 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, int16_t, vec_cmplt)
|
323 |
+
DEFINE_MEMBER_OP_AND_ONE(le, int16_t, vec_cmple)
|
324 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, int16_t, vec_cmpgt)
|
325 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, int16_t, vec_cmpge)
|
326 |
+
DEFINE_MEMBER_OP(operator+, int16_t, vec_add)
|
327 |
+
DEFINE_MEMBER_OP(operator-, int16_t, vec_sub)
|
328 |
+
DEFINE_MEMBER_OP(operator*, int16_t, vec_mul)
|
329 |
+
DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int16_t, /)
|
330 |
+
DEFINE_MEMBER_OP(maximum, int16_t, vec_max)
|
331 |
+
DEFINE_MEMBER_OP(minimum, int16_t, vec_min)
|
332 |
+
DEFINE_MEMBER_OP(operator&, int16_t, vec_and)
|
333 |
+
DEFINE_MEMBER_OP(operator|, int16_t, vec_or)
|
334 |
+
DEFINE_MEMBER_OP(operator^, int16_t, vec_xor)
|
335 |
+
};
|
336 |
+
|
337 |
+
template <>
|
338 |
+
Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
339 |
+
vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
|
340 |
+
vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1());
|
341 |
+
return Vectorized<int16_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
|
342 |
+
}
|
343 |
+
|
344 |
+
template <>
|
345 |
+
Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
346 |
+
vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
|
347 |
+
vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1()) ;
|
348 |
+
return Vectorized<int16_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
|
349 |
+
}
|
350 |
+
|
351 |
+
template <>
|
352 |
+
Vectorized<int16_t> inline maximum(
|
353 |
+
const Vectorized<int16_t>& a,
|
354 |
+
const Vectorized<int16_t>& b) {
|
355 |
+
return a.maximum(b);
|
356 |
+
}
|
357 |
+
|
358 |
+
template <>
|
359 |
+
Vectorized<int16_t> inline minimum(
|
360 |
+
const Vectorized<int16_t>& a,
|
361 |
+
const Vectorized<int16_t>& b) {
|
362 |
+
return a.minimum(b);
|
363 |
+
}
|
364 |
+
|
365 |
+
|
366 |
+
} // namespace
|
367 |
+
} // namespace vec
|
368 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
namespace at {
|
7 |
+
namespace vec {
|
8 |
+
// See Note [CPU_CAPABILITY namespace]
|
9 |
+
inline namespace CPU_CAPABILITY {
|
10 |
+
|
11 |
+
template <>
|
12 |
+
class Vectorized<int32_t> {
|
13 |
+
private:
|
14 |
+
union {
|
15 |
+
struct {
|
16 |
+
vint32 _vec0;
|
17 |
+
vint32 _vec1;
|
18 |
+
};
|
19 |
+
struct {
|
20 |
+
vbool32 _vecb0;
|
21 |
+
vbool32 _vecb1;
|
22 |
+
};
|
23 |
+
|
24 |
+
} __attribute__((__may_alias__));
|
25 |
+
|
26 |
+
public:
|
27 |
+
using value_type = int32_t;
|
28 |
+
using vec_internal_type = vint32;
|
29 |
+
using vec_internal_mask_type = vbool32;
|
30 |
+
using size_type = int;
|
31 |
+
static constexpr size_type size() {
|
32 |
+
return 8;
|
33 |
+
}
|
34 |
+
Vectorized() {}
|
35 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
|
36 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
37 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
|
38 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
39 |
+
C10_ALWAYS_INLINE Vectorized(int32_t scalar)
|
40 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
41 |
+
C10_ALWAYS_INLINE Vectorized(
|
42 |
+
int32_t scalar1,
|
43 |
+
int32_t scalar2,
|
44 |
+
int32_t scalar3,
|
45 |
+
int32_t scalar4,
|
46 |
+
int32_t scalar5,
|
47 |
+
int32_t scalar6,
|
48 |
+
int32_t scalar7,
|
49 |
+
int32_t scalar8)
|
50 |
+
: _vec0{vint32{scalar1, scalar2, scalar3, scalar4}},
|
51 |
+
_vec1{vint32{scalar5, scalar6, scalar7, scalar8}} {}
|
52 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
53 |
+
return _vec0;
|
54 |
+
}
|
55 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
56 |
+
return _vec1;
|
57 |
+
}
|
58 |
+
|
59 |
+
template <uint64_t mask>
|
60 |
+
static std::enable_if_t<mask == 0, Vectorized<int32_t>> C10_ALWAYS_INLINE
|
61 |
+
blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
62 |
+
return a;
|
63 |
+
}
|
64 |
+
|
65 |
+
template <uint64_t mask>
|
66 |
+
static std::enable_if_t<(mask & 255) == 255, Vectorized<int32_t>> C10_ALWAYS_INLINE
|
67 |
+
blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
68 |
+
return b;
|
69 |
+
}
|
70 |
+
|
71 |
+
template <uint64_t mask>
|
72 |
+
static std::enable_if_t<mask == 15, Vectorized<int32_t>> C10_ALWAYS_INLINE
|
73 |
+
blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
74 |
+
return {b._vec0, a._vec1};
|
75 |
+
}
|
76 |
+
|
77 |
+
template <uint64_t mask>
|
78 |
+
static std::enable_if_t<(mask > 0 && mask < 15), Vectorized<int32_t>>
|
79 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
80 |
+
constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
|
81 |
+
constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
|
82 |
+
constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
|
83 |
+
constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
|
84 |
+
const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
|
85 |
+
|
86 |
+
return {(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), a._vec1};
|
87 |
+
}
|
88 |
+
|
89 |
+
template <uint64_t mask>
|
90 |
+
static std::enable_if_t<
|
91 |
+
(mask > 15 && (mask & 255) != 255 && ((mask & 15) == 15)),
|
92 |
+
Vectorized<int32_t>>
|
93 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
94 |
+
constexpr uint32_t mask2 = (mask & 255) >> 4;
|
95 |
+
constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
|
96 |
+
constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
|
97 |
+
constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
|
98 |
+
constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
|
99 |
+
|
100 |
+
const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
|
101 |
+
// generated masks
|
102 |
+
return {b._vec0, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
|
103 |
+
}
|
104 |
+
|
105 |
+
template <uint64_t mask>
|
106 |
+
static std::enable_if_t<
|
107 |
+
(mask > 15 && ((mask & 255) != 255) && ((mask & 15) == 0)),
|
108 |
+
Vectorized<int32_t>>
|
109 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
110 |
+
constexpr uint32_t mask2 = (mask & 255) >> 4;
|
111 |
+
constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
|
112 |
+
constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
|
113 |
+
constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
|
114 |
+
constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
|
115 |
+
|
116 |
+
const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
|
117 |
+
// generated masks
|
118 |
+
return {a, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
|
119 |
+
}
|
120 |
+
|
121 |
+
template <uint64_t mask>
|
122 |
+
static std::enable_if_t<
|
123 |
+
(mask > 15 && ((mask & 255) != 255) && ((mask & 15) != 0) &&
|
124 |
+
((mask & 15) != 15)),
|
125 |
+
Vectorized<int32_t>>
|
126 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
127 |
+
constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
|
128 |
+
constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
|
129 |
+
constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
|
130 |
+
constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
|
131 |
+
constexpr uint32_t mask2 = (mask & 255) >> 4;
|
132 |
+
constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
|
133 |
+
constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
|
134 |
+
constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
|
135 |
+
constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
|
136 |
+
|
137 |
+
const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
|
138 |
+
const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
|
139 |
+
// generated masks
|
140 |
+
return {
|
141 |
+
(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st),
|
142 |
+
(vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
|
143 |
+
}
|
144 |
+
|
145 |
+
static Vectorized<int32_t> C10_ALWAYS_INLINE blendv(
|
146 |
+
const Vectorized<int32_t>& a,
|
147 |
+
const Vectorized<int32_t>& b,
|
148 |
+
const Vectorized<int32_t>& mask) {
|
149 |
+
// the mask used here returned by comparision of vec256
|
150 |
+
// assuming this we can use the same mask directly with vec_sel
|
151 |
+
// warning intel style mask will not work properly
|
152 |
+
return {
|
153 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
154 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
155 |
+
}
|
156 |
+
|
157 |
+
template <typename step_t>
|
158 |
+
static Vectorized<int32_t> arange(int32_t base = 0.f, step_t step = static_cast<step_t>(1)) {
|
159 |
+
return Vectorized<int32_t>(
|
160 |
+
base,
|
161 |
+
base + step,
|
162 |
+
base + 2 * step,
|
163 |
+
base + 3 * step,
|
164 |
+
base + 4 * step,
|
165 |
+
base + 5 * step,
|
166 |
+
base + 6 * step,
|
167 |
+
base + 7 * step);
|
168 |
+
}
|
169 |
+
static Vectorized<int32_t> set(
|
170 |
+
const Vectorized<int32_t>& a,
|
171 |
+
const Vectorized<int32_t>& b,
|
172 |
+
size_t count = size()) {
|
173 |
+
switch (count) {
|
174 |
+
case 0:
|
175 |
+
return a;
|
176 |
+
case 1:
|
177 |
+
return blend<1>(a, b);
|
178 |
+
case 2:
|
179 |
+
return blend<3>(a, b);
|
180 |
+
case 3:
|
181 |
+
return blend<7>(a, b);
|
182 |
+
case 4:
|
183 |
+
return blend<15>(a, b);
|
184 |
+
case 5:
|
185 |
+
return blend<31>(a, b);
|
186 |
+
case 6:
|
187 |
+
return blend<63>(a, b);
|
188 |
+
case 7:
|
189 |
+
return blend<127>(a, b);
|
190 |
+
}
|
191 |
+
|
192 |
+
return b;
|
193 |
+
}
|
194 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
195 |
+
loadu(const void* ptr, int count = size()) {
|
196 |
+
if (count == size()) {
|
197 |
+
return {
|
198 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
199 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
200 |
+
}
|
201 |
+
|
202 |
+
__at_align__ value_type tmp_values[size()] = {};
|
203 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
204 |
+
|
205 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
206 |
+
}
|
207 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
208 |
+
if (count == size()) {
|
209 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
210 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
211 |
+
} else if (count > 0) {
|
212 |
+
__at_align__ value_type tmp_values[size()];
|
213 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
214 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
215 |
+
std::memcpy(
|
216 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
217 |
+
}
|
218 |
+
}
|
219 |
+
const int32_t& operator[](int idx) const = delete;
|
220 |
+
int32_t& operator[](int idx) = delete;
|
221 |
+
|
222 |
+
Vectorized<int32_t> angle() const {
|
223 |
+
return blendv(
|
224 |
+
Vectorized<int32_t>(0), Vectorized<int32_t>(c10::pi<int32_t>), *this < Vectorized<int32_t>(0));
|
225 |
+
}
|
226 |
+
Vectorized<int32_t> real() const {
|
227 |
+
return *this;
|
228 |
+
}
|
229 |
+
Vectorized<int32_t> imag() const {
|
230 |
+
return Vectorized<int32_t>{0};
|
231 |
+
}
|
232 |
+
Vectorized<int32_t> conj() const {
|
233 |
+
return *this;
|
234 |
+
}
|
235 |
+
|
236 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE abs() const {
|
237 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
238 |
+
}
|
239 |
+
|
240 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE neg() const {
|
241 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
242 |
+
}
|
243 |
+
|
244 |
+
DEFINE_MEMBER_UNARY_OP(operator~, int32_t, vec_not)
|
245 |
+
DEFINE_MEMBER_OP(operator==, int32_t, vec_cmpeq)
|
246 |
+
DEFINE_MEMBER_OP(operator!=, int32_t, vec_cmpne)
|
247 |
+
DEFINE_MEMBER_OP(operator<, int32_t, vec_cmplt)
|
248 |
+
DEFINE_MEMBER_OP(operator<=, int32_t, vec_cmple)
|
249 |
+
DEFINE_MEMBER_OP(operator>, int32_t, vec_cmpgt)
|
250 |
+
DEFINE_MEMBER_OP(operator>=, int32_t, vec_cmpge)
|
251 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, int32_t, vec_cmpeq)
|
252 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, int32_t, vec_cmpne)
|
253 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, int32_t, vec_cmplt)
|
254 |
+
DEFINE_MEMBER_OP_AND_ONE(le, int32_t, vec_cmple)
|
255 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, int32_t, vec_cmpgt)
|
256 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, int32_t, vec_cmpge)
|
257 |
+
DEFINE_MEMBER_OP(operator+, int32_t, vec_add)
|
258 |
+
DEFINE_MEMBER_OP(operator-, int32_t, vec_sub)
|
259 |
+
DEFINE_MEMBER_OP(operator*, int32_t, vec_mul)
|
260 |
+
DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int32_t, /)
|
261 |
+
DEFINE_MEMBER_OP(maximum, int32_t, vec_max)
|
262 |
+
DEFINE_MEMBER_OP(minimum, int32_t, vec_min)
|
263 |
+
DEFINE_MEMBER_OP(operator&, int32_t, vec_and)
|
264 |
+
DEFINE_MEMBER_OP(operator|, int32_t, vec_or)
|
265 |
+
DEFINE_MEMBER_OP(operator^, int32_t, vec_xor)
|
266 |
+
};
|
267 |
+
|
268 |
+
template <>
|
269 |
+
Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
270 |
+
vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
|
271 |
+
vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
|
272 |
+
return Vectorized<int32_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
|
273 |
+
}
|
274 |
+
|
275 |
+
template <>
|
276 |
+
Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
277 |
+
vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
|
278 |
+
vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
|
279 |
+
return Vectorized<int32_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
|
280 |
+
}
|
281 |
+
|
282 |
+
template <>
|
283 |
+
Vectorized<int32_t> inline maximum(
|
284 |
+
const Vectorized<int32_t>& a,
|
285 |
+
const Vectorized<int32_t>& b) {
|
286 |
+
return a.maximum(b);
|
287 |
+
}
|
288 |
+
|
289 |
+
template <>
|
290 |
+
Vectorized<int32_t> inline minimum(
|
291 |
+
const Vectorized<int32_t>& a,
|
292 |
+
const Vectorized<int32_t>& b) {
|
293 |
+
return a.minimum(b);
|
294 |
+
}
|
295 |
+
|
296 |
+
} // namespace
|
297 |
+
} // namespace vec
|
298 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
namespace at {
|
7 |
+
namespace vec {
|
8 |
+
// See Note [CPU_CAPABILITY namespace]
|
9 |
+
inline namespace CPU_CAPABILITY {
|
10 |
+
|
11 |
+
template <>
|
12 |
+
class Vectorized<int64_t> {
|
13 |
+
private:
|
14 |
+
union {
|
15 |
+
struct {
|
16 |
+
vint64 _vec0;
|
17 |
+
vint64 _vec1;
|
18 |
+
};
|
19 |
+
struct {
|
20 |
+
vbool64 _vecb0;
|
21 |
+
vbool64 _vecb1;
|
22 |
+
};
|
23 |
+
|
24 |
+
} __attribute__((__may_alias__));
|
25 |
+
|
26 |
+
public:
|
27 |
+
using value_type = int64_t;
|
28 |
+
using vec_internal_type = vint64;
|
29 |
+
using vec_internal_mask_type = vbool64;
|
30 |
+
using size_type = int;
|
31 |
+
using ElementType = signed long long;
|
32 |
+
static constexpr size_type size() {
|
33 |
+
return 4;
|
34 |
+
}
|
35 |
+
Vectorized() {}
|
36 |
+
C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {}
|
37 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
38 |
+
C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {}
|
39 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
|
40 |
+
C10_ALWAYS_INLINE Vectorized(int64_t scalar)
|
41 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
42 |
+
C10_ALWAYS_INLINE Vectorized(
|
43 |
+
int64_t scalar1,
|
44 |
+
int64_t scalar2,
|
45 |
+
int64_t scalar3,
|
46 |
+
int64_t scalar4)
|
47 |
+
: _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {}
|
48 |
+
|
49 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
50 |
+
return _vec0;
|
51 |
+
}
|
52 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
53 |
+
return _vec1;
|
54 |
+
}
|
55 |
+
|
56 |
+
template <uint64_t mask>
|
57 |
+
static std::enable_if_t<mask == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
58 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
59 |
+
return a;
|
60 |
+
}
|
61 |
+
|
62 |
+
template <uint64_t mask>
|
63 |
+
static std::enable_if_t<mask == 3, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
64 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
65 |
+
return {b._vec0, a._vec1};
|
66 |
+
}
|
67 |
+
|
68 |
+
template <uint64_t mask>
|
69 |
+
static std::enable_if_t<(mask & 15) == 15, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
70 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
71 |
+
return b;
|
72 |
+
}
|
73 |
+
|
74 |
+
template <uint64_t mask>
|
75 |
+
static std::enable_if_t<(mask > 0 && mask < 3), Vectorized<int64_t>> C10_ALWAYS_INLINE
|
76 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
77 |
+
constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
|
78 |
+
constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
|
79 |
+
const vbool64 mask_1st = (vbool64){g0, g1};
|
80 |
+
return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1};
|
81 |
+
}
|
82 |
+
|
83 |
+
template <uint64_t mask>
|
84 |
+
static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized<int64_t>>
|
85 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
86 |
+
constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
|
87 |
+
constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
|
88 |
+
|
89 |
+
const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
|
90 |
+
return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
|
91 |
+
}
|
92 |
+
|
93 |
+
template <uint64_t mask>
|
94 |
+
static std::enable_if_t<
|
95 |
+
(mask > 3) && (mask & 3) != 0 && (mask & 15) != 15,
|
96 |
+
Vectorized<int64_t>>
|
97 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
98 |
+
constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
|
99 |
+
constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
|
100 |
+
constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
|
101 |
+
constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
|
102 |
+
|
103 |
+
const vbool64 mask_1st = (vbool64){g0, g1};
|
104 |
+
const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
|
105 |
+
return {
|
106 |
+
(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st),
|
107 |
+
(vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
|
108 |
+
}
|
109 |
+
|
110 |
+
static Vectorized<int64_t> C10_ALWAYS_INLINE blendv(
|
111 |
+
const Vectorized<int64_t>& a,
|
112 |
+
const Vectorized<int64_t>& b,
|
113 |
+
const Vectorized<int64_t>& mask) {
|
114 |
+
// the mask used here returned by comparision of vec256
|
115 |
+
|
116 |
+
return {
|
117 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
118 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
119 |
+
}
|
120 |
+
template <typename step_t>
|
121 |
+
static Vectorized<int64_t> arange(int64_t base = 0., step_t step = static_cast<step_t>(1)) {
|
122 |
+
return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
|
123 |
+
}
|
124 |
+
|
125 |
+
static Vectorized<int64_t> C10_ALWAYS_INLINE
|
126 |
+
set(const Vectorized<int64_t>& a,
|
127 |
+
const Vectorized<int64_t>& b,
|
128 |
+
size_t count = size()) {
|
129 |
+
switch (count) {
|
130 |
+
case 0:
|
131 |
+
return a;
|
132 |
+
case 1:
|
133 |
+
return blend<1>(a, b);
|
134 |
+
case 2:
|
135 |
+
return blend<3>(a, b);
|
136 |
+
case 3:
|
137 |
+
return blend<7>(a, b);
|
138 |
+
}
|
139 |
+
|
140 |
+
return b;
|
141 |
+
}
|
142 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
143 |
+
loadu(const void* ptr, int count = size()) {
|
144 |
+
if (count == size()) {
|
145 |
+
static_assert(sizeof(double) == sizeof(value_type));
|
146 |
+
const double* dptr = reinterpret_cast<const double*>(ptr);
|
147 |
+
return {// treat it as double load
|
148 |
+
(vint64)vec_vsx_ld(offset0, dptr),
|
149 |
+
(vint64)vec_vsx_ld(offset16, dptr)};
|
150 |
+
}
|
151 |
+
|
152 |
+
__at_align__ double tmp_values[size()] = {};
|
153 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
154 |
+
|
155 |
+
return {
|
156 |
+
(vint64)vec_vsx_ld(offset0, tmp_values),
|
157 |
+
(vint64)vec_vsx_ld(offset16, tmp_values)};
|
158 |
+
}
|
159 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
160 |
+
if (count == size()) {
|
161 |
+
double* dptr = reinterpret_cast<double*>(ptr);
|
162 |
+
vec_vsx_st((vfloat64)_vec0, offset0, dptr);
|
163 |
+
vec_vsx_st((vfloat64)_vec1, offset16, dptr);
|
164 |
+
} else if (count > 0) {
|
165 |
+
__at_align__ double tmp_values[size()];
|
166 |
+
vec_vsx_st((vfloat64)_vec0, offset0, tmp_values);
|
167 |
+
vec_vsx_st((vfloat64)_vec1, offset16, tmp_values);
|
168 |
+
std::memcpy(
|
169 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
170 |
+
}
|
171 |
+
}
|
172 |
+
const int64_t& operator[](int idx) const = delete;
|
173 |
+
int64_t& operator[](int idx) = delete;
|
174 |
+
|
175 |
+
Vectorized<int64_t> angle() const {
|
176 |
+
return blendv(
|
177 |
+
Vectorized<int64_t>(0), Vectorized<int64_t>(c10::pi<int64_t>), *this < Vectorized<int64_t>(0));
|
178 |
+
}
|
179 |
+
Vectorized<int64_t> real() const {
|
180 |
+
return *this;
|
181 |
+
}
|
182 |
+
Vectorized<int64_t> imag() const {
|
183 |
+
return Vectorized<int64_t>{0};
|
184 |
+
}
|
185 |
+
Vectorized<int64_t> conj() const {
|
186 |
+
return *this;
|
187 |
+
}
|
188 |
+
|
189 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE abs() const {
|
190 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
191 |
+
}
|
192 |
+
|
193 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE neg() const {
|
194 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
195 |
+
}
|
196 |
+
|
197 |
+
DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not)
|
198 |
+
DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq)
|
199 |
+
DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne)
|
200 |
+
DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt)
|
201 |
+
DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple)
|
202 |
+
DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt)
|
203 |
+
DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge)
|
204 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq)
|
205 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne)
|
206 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt)
|
207 |
+
DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple)
|
208 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt)
|
209 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge)
|
210 |
+
DEFINE_MEMBER_OP(operator+, int64_t, vec_add)
|
211 |
+
DEFINE_MEMBER_OP(operator-, int64_t, vec_sub)
|
212 |
+
DEFINE_MEMBER_OP(operator*, int64_t, vec_mul)
|
213 |
+
DEFINE_MEMBER_OP(operator/, int64_t, vec_div)
|
214 |
+
DEFINE_MEMBER_OP(maximum, int64_t, vec_max)
|
215 |
+
DEFINE_MEMBER_OP(minimum, int64_t, vec_min)
|
216 |
+
DEFINE_MEMBER_OP(operator&, int64_t, vec_and)
|
217 |
+
DEFINE_MEMBER_OP(operator|, int64_t, vec_or)
|
218 |
+
DEFINE_MEMBER_OP(operator^, int64_t, vec_xor)
|
219 |
+
};
|
220 |
+
|
221 |
+
template <>
|
222 |
+
Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
223 |
+
vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
|
224 |
+
vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
|
225 |
+
return Vectorized<int64_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
|
226 |
+
}
|
227 |
+
|
228 |
+
template <>
|
229 |
+
Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
230 |
+
vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
|
231 |
+
vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
|
232 |
+
return Vectorized<int64_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
|
233 |
+
}
|
234 |
+
|
235 |
+
template <>
|
236 |
+
Vectorized<int64_t> inline maximum(
|
237 |
+
const Vectorized<int64_t>& a,
|
238 |
+
const Vectorized<int64_t>& b) {
|
239 |
+
return a.maximum(b);
|
240 |
+
}
|
241 |
+
|
242 |
+
template <>
|
243 |
+
Vectorized<int64_t> inline minimum(
|
244 |
+
const Vectorized<int64_t>& a,
|
245 |
+
const Vectorized<int64_t>& b) {
|
246 |
+
return a.minimum(b);
|
247 |
+
}
|
248 |
+
|
249 |
+
} // namespace
|
250 |
+
} // namespace vec
|
251 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
6 |
+
#include <c10/util/qint32.h>
|
7 |
+
#include <array>
|
8 |
+
|
9 |
+
// This file defines Vectorized<> for the quantized types.
|
10 |
+
//
|
11 |
+
//
|
12 |
+
// Currently, we simply use these classes as efficient converters between
|
13 |
+
// the quantized types and Vectorized<float>, usually in bandwidth-bound cases
|
14 |
+
// where doing the arithmetic in full-precision is acceptable (e.g.
|
15 |
+
// elementwise operators).
|
16 |
+
//
|
17 |
+
//
|
18 |
+
// Conversions are as follows:
|
19 |
+
// Vectorized<qint32> -> 1x Vectorized<float>
|
20 |
+
//
|
21 |
+
// The size of the returned float vector is specified by the special
|
22 |
+
// constexpr function float_num_vecs. The type of the value returned
|
23 |
+
// from dequantize (and expected as an argument to quantize) is
|
24 |
+
// specified by float_vec_return_type.
|
25 |
+
//
|
26 |
+
// When writing kernels with these vectors, it is expected that floating-
|
27 |
+
// point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
|
28 |
+
// iterations.
|
29 |
+
|
30 |
+
namespace at {
|
31 |
+
namespace vec {
|
32 |
+
inline namespace CPU_CAPABILITY {
|
33 |
+
|
34 |
+
template <>
|
35 |
+
struct Vectorized<c10::qint32> {
|
36 |
+
private:
|
37 |
+
union {
|
38 |
+
struct {
|
39 |
+
vint32 _vec0;
|
40 |
+
vint32 _vec1;
|
41 |
+
};
|
42 |
+
struct {
|
43 |
+
vbool32 _vecb0;
|
44 |
+
vbool32 _vecb1;
|
45 |
+
};
|
46 |
+
|
47 |
+
} __attribute__((__may_alias__));
|
48 |
+
|
49 |
+
public:
|
50 |
+
Vectorized() {}
|
51 |
+
|
52 |
+
using size_type = int;
|
53 |
+
static constexpr size_type size() {
|
54 |
+
return 8;
|
55 |
+
}
|
56 |
+
|
57 |
+
static constexpr size_t float_num_vecs() {
|
58 |
+
return 1;
|
59 |
+
}
|
60 |
+
static constexpr int int_num_vecs() {
|
61 |
+
return 1;
|
62 |
+
}
|
63 |
+
using float_vec_return_type = std::array<Vectorized<float>, 1>;
|
64 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
|
65 |
+
using value_type = c10::qint32::underlying;
|
66 |
+
using vec_internal_type = vint32;
|
67 |
+
using vec_internal_mask_type = vbool32;
|
68 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
|
69 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
70 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
|
71 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
72 |
+
|
73 |
+
Vectorized(const c10::qint32& val)
|
74 |
+
: _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
|
75 |
+
|
76 |
+
static Vectorized<c10::qint32> C10_ALWAYS_INLINE
|
77 |
+
loadu(const void* ptr, int count = size()) {
|
78 |
+
if (count == size()) {
|
79 |
+
return {
|
80 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
81 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
82 |
+
}
|
83 |
+
|
84 |
+
__at_align__ value_type tmp_values[size()] = {};
|
85 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
86 |
+
|
87 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
88 |
+
}
|
89 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
90 |
+
if (count == size()) {
|
91 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
92 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
93 |
+
} else if (count > 0) {
|
94 |
+
__at_align__ value_type tmp_values[size()];
|
95 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
96 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
97 |
+
std::memcpy(
|
98 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
103 |
+
return _vec0;
|
104 |
+
}
|
105 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
106 |
+
return _vec1;
|
107 |
+
}
|
108 |
+
|
109 |
+
float_vec_return_type dequantize(
|
110 |
+
Vectorized<float> scale,
|
111 |
+
Vectorized<float> zero_point,
|
112 |
+
Vectorized<float> scale_zp_premul) const {
|
113 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
114 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
115 |
+
vfloat32 scale_vec0 = scale.vec0();
|
116 |
+
vfloat32 scale_vec1 = scale.vec1();
|
117 |
+
vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
|
118 |
+
vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
|
119 |
+
return {Vectorized<float>{
|
120 |
+
vec_madd(scale_vec0, float_vals0, scale_zp_premul0),
|
121 |
+
vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}};
|
122 |
+
}
|
123 |
+
|
124 |
+
float_vec_return_type dequantize(
|
125 |
+
Vectorized<float> scale,
|
126 |
+
Vectorized<float> zero_point) const {
|
127 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
128 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
129 |
+
vfloat32 scale_vec0 = scale.vec0();
|
130 |
+
vfloat32 scale_vec1 = scale.vec1();
|
131 |
+
vfloat32 zero_point0 = zero_point.vec0();
|
132 |
+
vfloat32 zero_point1 = zero_point.vec1();
|
133 |
+
return {Vectorized<float>{
|
134 |
+
(float_vals0 - zero_point0) * scale_vec0,
|
135 |
+
(float_vals1 - zero_point1) * scale_vec1}};
|
136 |
+
}
|
137 |
+
|
138 |
+
static Vectorized<c10::qint32> quantize(
|
139 |
+
const float_vec_return_type& rhs,
|
140 |
+
float scale,
|
141 |
+
int32_t zero_point,
|
142 |
+
float inverse_scale) {
|
143 |
+
Vectorized<c10::qint32> retval;
|
144 |
+
|
145 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
146 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
147 |
+
vfloat32 inverse_scale_v = vec_splats(inverse_scale);
|
148 |
+
vfloat32 vec_zero_point = vec_splats((float)(zero_point));
|
149 |
+
Vectorized<float> vf0 = rhs[0];
|
150 |
+
|
151 |
+
vfloat32 vecf0 = vf0.vec0();
|
152 |
+
vfloat32 vecf1 = vf0.vec1();
|
153 |
+
vecf0 = vec_mul(vecf0, inverse_scale_v);
|
154 |
+
vecf1 = vec_mul(vecf1, inverse_scale_v);
|
155 |
+
vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
|
156 |
+
vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
|
157 |
+
vint32 veci0 = vec_signed(vecf0);
|
158 |
+
vint32 veci1 = vec_signed(vecf1);
|
159 |
+
|
160 |
+
veci0 = vec_max(veci0, vmin);
|
161 |
+
veci1 = vec_max(veci1, vmin);
|
162 |
+
veci0 = vec_min(veci0, vmax);
|
163 |
+
veci1 = vec_min(veci1, vmax);
|
164 |
+
|
165 |
+
return {veci0, veci1};
|
166 |
+
}
|
167 |
+
|
168 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
169 |
+
return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
|
170 |
+
}
|
171 |
+
|
172 |
+
Vectorized<c10::qint32> relu6(
|
173 |
+
Vectorized<c10::qint32> zero_point,
|
174 |
+
Vectorized<c10::qint32> q_six) const {
|
175 |
+
vint32 max0 = vec_max(_vec0, zero_point._vec0);
|
176 |
+
vint32 max1 = vec_max(_vec1, zero_point._vec1);
|
177 |
+
return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
|
178 |
+
}
|
179 |
+
|
180 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
181 |
+
return {*this - b};
|
182 |
+
}
|
183 |
+
|
184 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
185 |
+
const int_vec_return_type& inp,
|
186 |
+
float multiplier,
|
187 |
+
int32_t zero_point) {
|
188 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
189 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
190 |
+
vfloat32 vec_mult = vec_splats(multiplier);
|
191 |
+
vint32 vec_zero_point = vec_splats(zero_point);
|
192 |
+
Vectorized<c10::qint32> vi = inp[0];
|
193 |
+
vfloat32 vecf0 = vec_float(vi.vec0());
|
194 |
+
vfloat32 vecf1 = vec_float(vi.vec1());
|
195 |
+
|
196 |
+
vecf0 = vec_mul(vecf0, vec_mult);
|
197 |
+
vecf1 = vec_mul(vecf1, vec_mult);
|
198 |
+
|
199 |
+
vecf0 = vec_rint(vecf0);
|
200 |
+
vecf1 = vec_rint(vecf1);
|
201 |
+
|
202 |
+
vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point);
|
203 |
+
vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point);
|
204 |
+
|
205 |
+
veci0 = vec_max(veci0, vmin);
|
206 |
+
veci1 = vec_max(veci1, vmin);
|
207 |
+
veci0 = vec_min(veci0, vmax);
|
208 |
+
veci1 = vec_min(veci1, vmax);
|
209 |
+
|
210 |
+
return {veci0, veci1};
|
211 |
+
}
|
212 |
+
|
213 |
+
DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq)
|
214 |
+
DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne)
|
215 |
+
DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt)
|
216 |
+
DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple)
|
217 |
+
DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt)
|
218 |
+
DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge)
|
219 |
+
DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add)
|
220 |
+
DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub)
|
221 |
+
DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul)
|
222 |
+
DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /)
|
223 |
+
DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max)
|
224 |
+
DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min)
|
225 |
+
DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and)
|
226 |
+
DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or)
|
227 |
+
DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor)
|
228 |
+
};
|
229 |
+
|
230 |
+
template <>
|
231 |
+
Vectorized<c10::qint32> inline maximum(
|
232 |
+
const Vectorized<c10::qint32>& a,
|
233 |
+
const Vectorized<c10::qint32>& b) {
|
234 |
+
return a.maximum(b);
|
235 |
+
}
|
236 |
+
|
237 |
+
template <>
|
238 |
+
Vectorized<c10::qint32> inline minimum(
|
239 |
+
const Vectorized<c10::qint32>& a,
|
240 |
+
const Vectorized<c10::qint32>& b) {
|
241 |
+
return a.minimum(b);
|
242 |
+
}
|
243 |
+
} // namespace
|
244 |
+
} // namespace vec
|
245 |
+
} // namespace at
|