Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h +3 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h +46 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h +164 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Array.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h +2 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h +135 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h +31 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h +397 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h +209 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h +337 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h +191 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h +201 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h +111 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List.h +490 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h +360 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h +194 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h +140 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h +242 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h +83 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Range.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h +16 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h +92 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h +276 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h +1039 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h +173 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h +75 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h +95 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h +96 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h +151 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h +2180 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/blob.h +208 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h +88 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h +19 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h +101 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function.h +111 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h +483 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/functional.h +54 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
|
5 |
+
namespace c10 {
|
6 |
+
struct OperatorName;
|
7 |
+
}
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
// check if an op is a custom op (i.e. did not come from native_functions.yaml)
|
12 |
+
TORCH_API bool is_custom_op(const c10::OperatorName& opName);
|
13 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/QScheme.h>
|
3 |
+
|
4 |
+
// Forward declarations of core ATen types used in dispatch functions
|
5 |
+
namespace c10 {
|
6 |
+
|
7 |
+
template<typename T>
|
8 |
+
class List;
|
9 |
+
template<typename T>
|
10 |
+
class IListRef;
|
11 |
+
class Stream;
|
12 |
+
class Scalar;
|
13 |
+
class SymInt;
|
14 |
+
class SymIntList;
|
15 |
+
struct Storage;
|
16 |
+
struct TensorOptions;
|
17 |
+
template <typename T>
|
18 |
+
class ArrayRef;
|
19 |
+
template <typename T>
|
20 |
+
class OptionalArrayRef;
|
21 |
+
|
22 |
+
} // namespace c10
|
23 |
+
|
24 |
+
namespace at {
|
25 |
+
|
26 |
+
class Tensor;
|
27 |
+
class OptionalTensorRef;
|
28 |
+
struct Dimname;
|
29 |
+
struct Generator;
|
30 |
+
using TensorList = c10::ArrayRef<Tensor>;
|
31 |
+
using ITensorListRef = c10::IListRef<Tensor>;
|
32 |
+
using IOptTensorListRef = c10::IListRef<OptionalTensorRef>;
|
33 |
+
using DimnameList = c10::ArrayRef<Dimname>;
|
34 |
+
using IntArrayRef = c10::ArrayRef<int64_t>;
|
35 |
+
using OptionalIntArrayRef = c10::OptionalArrayRef<int64_t>;
|
36 |
+
using OptionalSymIntArrayRef = c10::OptionalArrayRef<c10::SymInt>;
|
37 |
+
|
38 |
+
using c10::Stream;
|
39 |
+
using c10::Storage;
|
40 |
+
using c10::QScheme;
|
41 |
+
using c10::Scalar;
|
42 |
+
using c10::SymInt;
|
43 |
+
using c10::SymIntList;
|
44 |
+
using c10::TensorOptions;
|
45 |
+
|
46 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// This global header must not depend on native_functions.yaml or
|
2 |
+
// incremental builds will be next to useless
|
3 |
+
#pragma push_macro("TORCH_ASSERT_NO_OPERATORS")
|
4 |
+
#define TORCH_ASSERT_NO_OPERATORS
|
5 |
+
|
6 |
+
// This macro doesn't work if defined after the first time inttypes.h
|
7 |
+
// is included, so won't work anywhere if not defined here.
|
8 |
+
#ifndef __STDC_FORMAT_MACROS
|
9 |
+
#define __STDC_FORMAT_MACROS
|
10 |
+
#endif
|
11 |
+
#include <cinttypes>
|
12 |
+
|
13 |
+
// This list of headers was generated using a script that finds
|
14 |
+
// high-impact headers and then manually tweaked to remove OS specific
|
15 |
+
// or duplicate headers (e.g. <cassert> and <assert.h>) and to remove
|
16 |
+
// "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10).
|
17 |
+
|
18 |
+
// To generate the initial list:
|
19 |
+
// 1. Build pytorch from scratch with all build caching disabled
|
20 |
+
// 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing)
|
21 |
+
// $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json
|
22 |
+
// 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/
|
23 |
+
// $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json
|
24 |
+
// Where the threshold can be tweaked until c10 and some of ATen
|
25 |
+
// core are included but TORCH_ASSERT_NO_OPERATORS still passes.
|
26 |
+
|
27 |
+
#include <cerrno>
|
28 |
+
#include <cmath>
|
29 |
+
#include <cstddef>
|
30 |
+
#include <cstdint>
|
31 |
+
#include <cstdlib>
|
32 |
+
#include <cstring>
|
33 |
+
|
34 |
+
#include <algorithm>
|
35 |
+
#include <array>
|
36 |
+
#include <atomic>
|
37 |
+
#include <chrono>
|
38 |
+
#include <complex>
|
39 |
+
#include <deque>
|
40 |
+
#include <exception>
|
41 |
+
#include <functional>
|
42 |
+
#include <initializer_list>
|
43 |
+
#include <iomanip>
|
44 |
+
#include <iosfwd>
|
45 |
+
#include <iterator>
|
46 |
+
#include <limits>
|
47 |
+
#include <list>
|
48 |
+
#include <map>
|
49 |
+
#include <memory>
|
50 |
+
#include <mutex>
|
51 |
+
#include <new>
|
52 |
+
#include <numeric>
|
53 |
+
#include <ostream>
|
54 |
+
#include <sstream>
|
55 |
+
#include <stdexcept>
|
56 |
+
#include <string>
|
57 |
+
#include <tuple>
|
58 |
+
#include <type_traits>
|
59 |
+
#include <typeindex>
|
60 |
+
#include <typeinfo>
|
61 |
+
#include <unordered_map>
|
62 |
+
#include <unordered_set>
|
63 |
+
#include <utility>
|
64 |
+
#include <vector>
|
65 |
+
|
66 |
+
#include <c10/core/Allocator.h>
|
67 |
+
#include <c10/core/AutogradState.h>
|
68 |
+
#include <c10/core/Backend.h>
|
69 |
+
#include <c10/core/DefaultDtype.h>
|
70 |
+
#include <c10/core/Device.h>
|
71 |
+
#include <c10/core/DeviceType.h>
|
72 |
+
#include <c10/core/DispatchKey.h>
|
73 |
+
#include <c10/core/DispatchKeySet.h>
|
74 |
+
#include <c10/core/GeneratorImpl.h>
|
75 |
+
#include <c10/core/InferenceMode.h>
|
76 |
+
#include <c10/core/Layout.h>
|
77 |
+
#include <c10/core/MemoryFormat.h>
|
78 |
+
#include <c10/core/OptionalRef.h>
|
79 |
+
#include <c10/core/QScheme.h>
|
80 |
+
#include <c10/core/Scalar.h>
|
81 |
+
#include <c10/core/ScalarType.h>
|
82 |
+
#include <c10/core/ScalarTypeToTypeMeta.h>
|
83 |
+
#include <c10/core/Storage.h>
|
84 |
+
#include <c10/core/StorageImpl.h>
|
85 |
+
#include <c10/core/SymBool.h>
|
86 |
+
#include <c10/core/SymFloat.h>
|
87 |
+
#include <c10/core/SymInt.h>
|
88 |
+
#include <c10/core/SymIntArrayRef.h>
|
89 |
+
#include <c10/core/SymNodeImpl.h>
|
90 |
+
#include <c10/core/TensorImpl.h>
|
91 |
+
#include <c10/core/TensorOptions.h>
|
92 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
93 |
+
#include <c10/core/WrapDimMinimal.h>
|
94 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
95 |
+
#include <c10/core/impl/PyInterpreter.h>
|
96 |
+
#include <c10/core/impl/SizesAndStrides.h>
|
97 |
+
|
98 |
+
#include <c10/macros/Export.h>
|
99 |
+
#include <c10/macros/Macros.h>
|
100 |
+
|
101 |
+
#include <c10/util/AlignOf.h>
|
102 |
+
#include <c10/util/ArrayRef.h>
|
103 |
+
#include <c10/util/BFloat16.h>
|
104 |
+
#include <c10/util/C++17.h>
|
105 |
+
#include <c10/util/ConstexprCrc.h>
|
106 |
+
#include <c10/util/Deprecated.h>
|
107 |
+
#include <c10/util/DimVector.h>
|
108 |
+
#include <c10/util/Exception.h>
|
109 |
+
#include <c10/util/ExclusivelyOwned.h>
|
110 |
+
#include <c10/util/Flags.h>
|
111 |
+
#include <c10/util/Float8_e4m3fn.h>
|
112 |
+
#include <c10/util/Float8_e5m2.h>
|
113 |
+
#include <c10/util/FunctionRef.h>
|
114 |
+
#include <c10/util/Half.h>
|
115 |
+
#include <c10/util/IdWrapper.h>
|
116 |
+
#include <c10/util/Logging.h>
|
117 |
+
#include <c10/util/MaybeOwned.h>
|
118 |
+
#include <c10/util/Metaprogramming.h>
|
119 |
+
#include <c10/util/Optional.h>
|
120 |
+
#include <c10/util/Registry.h>
|
121 |
+
#include <c10/util/SmallVector.h>
|
122 |
+
#include <c10/util/StringUtil.h>
|
123 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
124 |
+
#include <c10/util/Type.h>
|
125 |
+
#include <c10/util/TypeCast.h>
|
126 |
+
#include <c10/util/TypeIndex.h>
|
127 |
+
#include <c10/util/TypeList.h>
|
128 |
+
#include <c10/util/TypeSafeSignMath.h>
|
129 |
+
#include <c10/util/TypeTraits.h>
|
130 |
+
#include <c10/util/UniqueVoidPtr.h>
|
131 |
+
#include <c10/util/accumulate.h>
|
132 |
+
#include <c10/util/bit_cast.h>
|
133 |
+
#include <c10/util/bits.h>
|
134 |
+
#include <c10/util/complex.h>
|
135 |
+
#include <c10/util/floating_point_utils.h>
|
136 |
+
#include <c10/util/in_place.h>
|
137 |
+
#include <c10/util/intrusive_ptr.h>
|
138 |
+
#include <c10/util/irange.h>
|
139 |
+
#include <c10/util/llvmMathExtras.h>
|
140 |
+
#include <c10/util/python_stub.h>
|
141 |
+
#include <c10/util/qint32.h>
|
142 |
+
#include <c10/util/qint8.h>
|
143 |
+
#include <c10/util/quint2x4.h>
|
144 |
+
#include <c10/util/quint4x2.h>
|
145 |
+
#include <c10/util/quint8.h>
|
146 |
+
#include <c10/util/safe_numerics.h>
|
147 |
+
#include <c10/util/string_utils.h>
|
148 |
+
#include <c10/util/string_view.h>
|
149 |
+
#include <c10/util/typeid.h>
|
150 |
+
|
151 |
+
#include <ATen/StorageUtils.h>
|
152 |
+
#include <ATen/core/ATen_fwd.h>
|
153 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
154 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
155 |
+
#include <ATen/core/DimVector.h>
|
156 |
+
#include <ATen/core/Dimname.h>
|
157 |
+
#include <ATen/core/Generator.h>
|
158 |
+
#include <ATen/core/NamedTensor.h>
|
159 |
+
#include <ATen/core/QuantizerBase.h>
|
160 |
+
#include <ATen/core/TensorAccessor.h>
|
161 |
+
#include <ATen/core/TensorBase.h>
|
162 |
+
#include <ATen/core/symbol.h>
|
163 |
+
|
164 |
+
#pragma pop_macro("TORCH_ASSERT_NO_OPERATORS")
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Array.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// A fixed-size array type usable from both host and
|
4 |
+
// device code.
|
5 |
+
|
6 |
+
#include <c10/macros/Macros.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
|
9 |
+
namespace at { namespace detail {
|
10 |
+
|
11 |
+
template <typename T, int size_>
|
12 |
+
struct Array {
|
13 |
+
T data[size_];
|
14 |
+
|
15 |
+
C10_HOST_DEVICE T operator[](int i) const {
|
16 |
+
return data[i];
|
17 |
+
}
|
18 |
+
C10_HOST_DEVICE T& operator[](int i) {
|
19 |
+
return data[i];
|
20 |
+
}
|
21 |
+
#if defined(USE_ROCM)
|
22 |
+
C10_HOST_DEVICE Array() = default;
|
23 |
+
C10_HOST_DEVICE Array(const Array&) = default;
|
24 |
+
C10_HOST_DEVICE Array& operator=(const Array&) = default;
|
25 |
+
#else
|
26 |
+
Array() = default;
|
27 |
+
Array(const Array&) = default;
|
28 |
+
Array& operator=(const Array&) = default;
|
29 |
+
#endif
|
30 |
+
static constexpr int size(){return size_;}
|
31 |
+
// Fill the array with x.
|
32 |
+
C10_HOST_DEVICE Array(T x) {
|
33 |
+
for (int i = 0; i < size_; i++) {
|
34 |
+
data[i] = x;
|
35 |
+
}
|
36 |
+
}
|
37 |
+
};
|
38 |
+
|
39 |
+
}}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#include <c10/util/Backtrace.h>
|
2 |
+
#include <c10/util/Type.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <c10/core/TensorOptions.h>
|
2 |
+
|
3 |
+
namespace c10 { namespace impl {
|
4 |
+
|
5 |
+
inline c10::optional<MemoryFormat>
|
6 |
+
check_tensor_options_and_extract_memory_format(
|
7 |
+
const TensorOptions& options,
|
8 |
+
c10::optional<MemoryFormat> memory_format) {
|
9 |
+
TORCH_CHECK(
|
10 |
+
options.requires_grad_opt() == c10::nullopt ||
|
11 |
+
options.requires_grad_opt().value() == false,
|
12 |
+
"Operators taking TensorOptions cannot take a TensorOptions with "
|
13 |
+
"options.requires_grad set as true. This isn't implemented yet.");
|
14 |
+
TORCH_CHECK(
|
15 |
+
!(options.has_memory_format() && memory_format.has_value()),
|
16 |
+
"Cannot set memory_format both in TensorOptions and explicit argument; please delete "
|
17 |
+
"the redundant setter.");
|
18 |
+
if (memory_format.has_value()) {
|
19 |
+
return memory_format;
|
20 |
+
} else {
|
21 |
+
return options.memory_format_opt();
|
22 |
+
}
|
23 |
+
}
|
24 |
+
|
25 |
+
}} // namespace impl namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Backend.h>
|
4 |
+
#include <c10/core/ScalarType.h>
|
5 |
+
#include <c10/core/Layout.h>
|
6 |
+
#include <c10/core/TensorOptions.h>
|
7 |
+
#include <c10/core/Storage.h>
|
8 |
+
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
|
11 |
+
|
12 |
+
namespace at {
|
13 |
+
|
14 |
+
class Tensor;
|
15 |
+
|
16 |
+
// This class specifies a Backend and a ScalarType. Currently, it primarily
|
17 |
+
// serves as a replacement return value for Tensor::type(). Previously,
|
18 |
+
// Tensor::type() returned Type&, but we are changing Type to not be
|
19 |
+
// dtype-specific.
|
20 |
+
class TORCH_API DeprecatedTypeProperties {
|
21 |
+
public:
|
22 |
+
DeprecatedTypeProperties(Backend backend, ScalarType scalar_type)
|
23 |
+
: backend_(backend), scalar_type_(scalar_type) {}
|
24 |
+
|
25 |
+
Backend backend() const {
|
26 |
+
return backend_;
|
27 |
+
}
|
28 |
+
|
29 |
+
Layout layout() const {
|
30 |
+
return layout_from_backend(backend_);
|
31 |
+
}
|
32 |
+
|
33 |
+
bool is_sparse() const {
|
34 |
+
return layout_from_backend(backend()) == kSparse;
|
35 |
+
}
|
36 |
+
|
37 |
+
bool is_sparse_csr() const {
|
38 |
+
return layout_from_backend(backend()) == kSparseCsr;
|
39 |
+
}
|
40 |
+
|
41 |
+
c10::DeviceType device_type() const {
|
42 |
+
return backendToDeviceType(backend_);
|
43 |
+
}
|
44 |
+
|
45 |
+
bool is_cuda() const {
|
46 |
+
return backendToDeviceType(backend_) == kCUDA;
|
47 |
+
}
|
48 |
+
|
49 |
+
ScalarType scalarType() const {
|
50 |
+
return scalar_type_;
|
51 |
+
}
|
52 |
+
|
53 |
+
caffe2::TypeMeta typeMeta() const {
|
54 |
+
return scalarTypeToTypeMeta(scalar_type_);
|
55 |
+
}
|
56 |
+
|
57 |
+
bool operator==(const DeprecatedTypeProperties& other) const {
|
58 |
+
return backend_ == other.backend() && scalar_type_ == other.scalarType();
|
59 |
+
}
|
60 |
+
|
61 |
+
bool operator!=(const DeprecatedTypeProperties& other) const {
|
62 |
+
return !(*this == other);
|
63 |
+
}
|
64 |
+
|
65 |
+
std::string toString() const {
|
66 |
+
std::string base_str;
|
67 |
+
if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) {
|
68 |
+
base_str = "UndefinedType";
|
69 |
+
} else {
|
70 |
+
base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type";
|
71 |
+
}
|
72 |
+
return base_str;
|
73 |
+
}
|
74 |
+
|
75 |
+
DeprecatedTypeProperties & toBackend(Backend b) const {
|
76 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
77 |
+
b, scalar_type_);
|
78 |
+
}
|
79 |
+
|
80 |
+
DeprecatedTypeProperties & toScalarType(ScalarType s) const {
|
81 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
82 |
+
backend_, s);
|
83 |
+
}
|
84 |
+
|
85 |
+
DeprecatedTypeProperties & cpu() const {
|
86 |
+
return toBackend(Backend::CPU);
|
87 |
+
}
|
88 |
+
|
89 |
+
DeprecatedTypeProperties & cuda() const {
|
90 |
+
return toBackend(Backend::CUDA);
|
91 |
+
}
|
92 |
+
|
93 |
+
DeprecatedTypeProperties & hip() const {
|
94 |
+
return toBackend(Backend::HIP);
|
95 |
+
}
|
96 |
+
|
97 |
+
/// Constructs the `TensorOptions` from a type and a `device_index`.
|
98 |
+
TensorOptions options(int16_t device_index = -1) const {
|
99 |
+
return TensorOptions().dtype(typeMeta())
|
100 |
+
.device(device_type(), static_cast<c10::DeviceIndex>(device_index))
|
101 |
+
.layout(layout());
|
102 |
+
}
|
103 |
+
|
104 |
+
/// Constructs the `TensorOptions` from a type and a Device. Asserts that
|
105 |
+
/// the device type matches the device type of the type.
|
106 |
+
TensorOptions options(c10::optional<Device> device_opt) const {
|
107 |
+
if (!device_opt.has_value()) {
|
108 |
+
return options(-1);
|
109 |
+
} else {
|
110 |
+
Device device = device_opt.value();
|
111 |
+
AT_ASSERT(device.type() == device_type());
|
112 |
+
return options(device.index());
|
113 |
+
}
|
114 |
+
}
|
115 |
+
|
116 |
+
operator TensorOptions() const {
|
117 |
+
return options();
|
118 |
+
}
|
119 |
+
|
120 |
+
int64_t id() const {
|
121 |
+
return static_cast<int64_t>(backend()) *
|
122 |
+
static_cast<int64_t>(ScalarType::NumOptions) +
|
123 |
+
static_cast<int64_t>(scalarType());
|
124 |
+
}
|
125 |
+
|
126 |
+
Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
|
127 |
+
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
|
128 |
+
Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const;
|
129 |
+
|
130 |
+
private:
|
131 |
+
Backend backend_;
|
132 |
+
ScalarType scalar_type_;
|
133 |
+
};
|
134 |
+
|
135 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// In order to preserve bc, we make DeprecatedTypeProperties instances unique
|
4 |
+
// just like they are for Type.
|
5 |
+
|
6 |
+
#include <c10/core/Backend.h>
|
7 |
+
#include <c10/core/ScalarType.h>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
class DeprecatedTypeProperties;
|
12 |
+
|
13 |
+
struct TORCH_API DeprecatedTypePropertiesDeleter {
|
14 |
+
void operator()(DeprecatedTypeProperties * ptr);
|
15 |
+
};
|
16 |
+
|
17 |
+
class TORCH_API DeprecatedTypePropertiesRegistry {
|
18 |
+
public:
|
19 |
+
DeprecatedTypePropertiesRegistry();
|
20 |
+
|
21 |
+
DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const;
|
22 |
+
|
23 |
+
private:
|
24 |
+
std::unique_ptr<DeprecatedTypeProperties> registry
|
25 |
+
[static_cast<int>(Backend::NumOptions)]
|
26 |
+
[static_cast<int>(ScalarType::NumOptions)];
|
27 |
+
};
|
28 |
+
|
29 |
+
TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
|
30 |
+
|
31 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h
ADDED
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
#include <c10/util/TypeTraits.h>
|
6 |
+
#include <c10/util/TypeList.h>
|
7 |
+
#include <c10/util/intrusive_ptr.h>
|
8 |
+
#include <c10/util/order_preserving_flat_hash_map.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <ATen/core/TensorBody.h>
|
11 |
+
#include <ATen/core/jit_type_base.h>
|
12 |
+
|
13 |
+
namespace c10 {
|
14 |
+
struct IValue;
|
15 |
+
template<class Key, class Value> class Dict;
|
16 |
+
struct Type;
|
17 |
+
|
18 |
+
namespace impl {
|
19 |
+
|
20 |
+
using valid_dict_key_types = guts::typelist::typelist<
|
21 |
+
int64_t,
|
22 |
+
std::string,
|
23 |
+
double,
|
24 |
+
c10::complex<double>,
|
25 |
+
bool,
|
26 |
+
at::Tensor
|
27 |
+
>;
|
28 |
+
}
|
29 |
+
|
30 |
+
namespace detail {
|
31 |
+
|
32 |
+
struct DictKeyHash {
|
33 |
+
size_t operator()(const IValue& ivalue) const;
|
34 |
+
};
|
35 |
+
|
36 |
+
struct DictKeyEqualTo {
|
37 |
+
bool operator()(const IValue& lhs, const IValue& rhs) const;
|
38 |
+
};
|
39 |
+
|
40 |
+
struct DictImpl final : public c10::intrusive_ptr_target {
|
41 |
+
using dict_map_type = ska_ordered::order_preserving_flat_hash_map<IValue, IValue, DictKeyHash, DictKeyEqualTo>;
|
42 |
+
struct DictElementTypes final {
|
43 |
+
TypePtr keyType;
|
44 |
+
TypePtr valueType;
|
45 |
+
};
|
46 |
+
|
47 |
+
explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_)
|
48 |
+
: dict(std::move(dict_))
|
49 |
+
, elementTypes(std::move(elementTypes_)) {}
|
50 |
+
dict_map_type dict;
|
51 |
+
|
52 |
+
DictElementTypes elementTypes;
|
53 |
+
|
54 |
+
intrusive_ptr<DictImpl> copy() const;
|
55 |
+
friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs);
|
56 |
+
};
|
57 |
+
|
58 |
+
}
|
59 |
+
|
60 |
+
namespace impl {
|
61 |
+
template<class Key, class Value, class Iterator> class DictIterator;
|
62 |
+
|
63 |
+
/**
|
64 |
+
* A reference to an entry in the Dict.
|
65 |
+
* Use the `key()` and `value()` methods to read the element.
|
66 |
+
*/
|
67 |
+
template<class Key, class Value, class Iterator>
|
68 |
+
class DictEntryRef final {
|
69 |
+
public:
|
70 |
+
explicit DictEntryRef(Iterator iterator)
|
71 |
+
: iterator_(std::move(iterator)) {}
|
72 |
+
|
73 |
+
decltype(auto) key() const {
|
74 |
+
return iterator_->first.template to<Key>();
|
75 |
+
}
|
76 |
+
|
77 |
+
decltype(auto) value() const {
|
78 |
+
return iterator_->second.template to<Value>();
|
79 |
+
}
|
80 |
+
|
81 |
+
template<class Value_>
|
82 |
+
void setValue(Value_&& value) const {
|
83 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of setValue()");
|
84 |
+
iterator_->second = Value(std::forward<Value_>(value));
|
85 |
+
}
|
86 |
+
|
87 |
+
private:
|
88 |
+
// allow copying and moving, but only our friends (i.e. the Dict class) can do
|
89 |
+
// it. Copying/moving this reference wrapper would be too ambiguous to allow it
|
90 |
+
// in the public API.
|
91 |
+
DictEntryRef(const DictEntryRef&) = default;
|
92 |
+
DictEntryRef& operator=(const DictEntryRef&) = default;
|
93 |
+
DictEntryRef(DictEntryRef&&) noexcept = default;
|
94 |
+
DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default;
|
95 |
+
|
96 |
+
Iterator iterator_;
|
97 |
+
friend class DictIterator<Key, Value, Iterator>;
|
98 |
+
friend class Dict<Key, Value>;
|
99 |
+
};
|
100 |
+
|
101 |
+
// this wraps map_type::iterator to make sure user code can't rely
|
102 |
+
// on it being the type of the underlying map.
|
103 |
+
template<class Key, class Value, class Iterator>
|
104 |
+
class DictIterator final {
|
105 |
+
public:
|
106 |
+
// C++17 friendly std::iterator implementation
|
107 |
+
using iterator_category = std::forward_iterator_tag;
|
108 |
+
using value_type = DictEntryRef<Key, Value, Iterator>;
|
109 |
+
using difference_type = std::ptrdiff_t;
|
110 |
+
using pointer = value_type*;
|
111 |
+
using reference = value_type&;
|
112 |
+
|
113 |
+
explicit DictIterator() = default;
|
114 |
+
~DictIterator() = default;
|
115 |
+
|
116 |
+
DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {}
|
117 |
+
DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {}
|
118 |
+
DictIterator& operator=(const DictIterator& rhs) {
|
119 |
+
entryRef_ = rhs.entryRef_;
|
120 |
+
return *this;
|
121 |
+
}
|
122 |
+
DictIterator& operator=(DictIterator&& rhs) noexcept {
|
123 |
+
entryRef_ = std::move(rhs.entryRef_);
|
124 |
+
return *this;
|
125 |
+
}
|
126 |
+
|
127 |
+
DictIterator& operator++() {
|
128 |
+
++entryRef_.iterator_;
|
129 |
+
return *this;
|
130 |
+
}
|
131 |
+
|
132 |
+
DictIterator operator++(int) {
|
133 |
+
DictIterator copy(*this);
|
134 |
+
++*this;
|
135 |
+
return copy;
|
136 |
+
}
|
137 |
+
|
138 |
+
const DictEntryRef<Key, Value, Iterator>& operator*() const {
|
139 |
+
return entryRef_;
|
140 |
+
}
|
141 |
+
|
142 |
+
const DictEntryRef<Key, Value, Iterator>* operator->() const {
|
143 |
+
return &entryRef_;
|
144 |
+
}
|
145 |
+
|
146 |
+
friend difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) {
|
147 |
+
return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_;
|
148 |
+
}
|
149 |
+
|
150 |
+
private:
|
151 |
+
explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {}
|
152 |
+
|
153 |
+
const Iterator& get_iterator_() const {
|
154 |
+
return entryRef_.iterator_;
|
155 |
+
}
|
156 |
+
|
157 |
+
friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) {
|
158 |
+
return lhs.get_iterator_() == rhs.get_iterator_();
|
159 |
+
}
|
160 |
+
|
161 |
+
friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) {
|
162 |
+
return lhs.get_iterator_() != rhs.get_iterator_();
|
163 |
+
}
|
164 |
+
|
165 |
+
friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) {
|
166 |
+
return lhs.get_iterator_() < rhs.get_iterator_();
|
167 |
+
}
|
168 |
+
|
169 |
+
friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) {
|
170 |
+
return lhs.get_iterator_() <= rhs.get_iterator_();
|
171 |
+
}
|
172 |
+
|
173 |
+
friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) {
|
174 |
+
return lhs.get_iterator_() > rhs.get_iterator_();
|
175 |
+
}
|
176 |
+
|
177 |
+
friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) {
|
178 |
+
return lhs.get_iterator_() >= rhs.get_iterator_();
|
179 |
+
}
|
180 |
+
|
181 |
+
DictEntryRef<Key, Value, Iterator> entryRef_;
|
182 |
+
|
183 |
+
friend class DictIterator<Key, Value, typename c10::detail::DictImpl::dict_map_type::iterator>;
|
184 |
+
friend class Dict<Key, Value>;
|
185 |
+
};
|
186 |
+
|
187 |
+
template<class Key, class Value> Dict<Key, Value> toTypedDict(Dict<IValue, IValue> dict);
|
188 |
+
template<class Key, class Value> Dict<IValue, IValue> toGenericDict(Dict<Key, Value> dict);
|
189 |
+
}
|
190 |
+
|
191 |
+
/**
|
192 |
+
* An object of this class stores a map from Key to Value.
|
193 |
+
*
|
194 |
+
* This is a pointer type. After a copy, both Dicts
|
195 |
+
* will share the same storage:
|
196 |
+
*
|
197 |
+
* > Dict<int, string> a;
|
198 |
+
* > Dict<int, string> b = a;
|
199 |
+
* > b.insert(3, "three");
|
200 |
+
* > ASSERT("three" == a.at(3));
|
201 |
+
*
|
202 |
+
* We use this class in the PyTorch kernel API because that
|
203 |
+
* allows us to do optimizations and switch out the underlying
|
204 |
+
* map implementation without breaking backwards compatibility
|
205 |
+
* for the kernel API.
|
206 |
+
*/
|
207 |
+
template<class Key, class Value>
|
208 |
+
class Dict final {
|
209 |
+
private:
|
210 |
+
static_assert((std::is_same<IValue, Key>::value && std::is_same<IValue, Value>::value) || guts::typelist::contains<impl::valid_dict_key_types, Key>::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string.");
|
211 |
+
|
212 |
+
// impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map.
|
213 |
+
// We intentionally don't offer conversion from/to
|
214 |
+
// order_preserving_flat_hash_map, return references to it or something like that,
|
215 |
+
// because such operations would get expensive if we switch out
|
216 |
+
// the actual map implementation.
|
217 |
+
// This is an intrusive_ptr because Dict is a pointer type.
|
218 |
+
// Invariant: This will never be a nullptr, there will always be a valid
|
219 |
+
// DictImpl.
|
220 |
+
c10::intrusive_ptr<detail::DictImpl> impl_;
|
221 |
+
|
222 |
+
explicit Dict(c10::intrusive_ptr<detail::DictImpl>&& impl);
|
223 |
+
friend struct IValue;
|
224 |
+
template<class K, class V> friend Dict<K, V> impl::toTypedDict(Dict<IValue, IValue>);
|
225 |
+
template<class K, class V> friend Dict<IValue, IValue> impl::toGenericDict(Dict<K, V>);
|
226 |
+
|
227 |
+
public:
|
228 |
+
using key_type = Key;
|
229 |
+
using mapped_type = Value;
|
230 |
+
using size_type = typename detail::DictImpl::dict_map_type::size_type;
|
231 |
+
using iterator = impl::DictIterator<Key, Value, typename detail::DictImpl::dict_map_type::iterator>;
|
232 |
+
|
233 |
+
/**
|
234 |
+
* Creates an empty dict.
|
235 |
+
*/
|
236 |
+
explicit Dict();
|
237 |
+
|
238 |
+
/**
|
239 |
+
* Create a generic dict with runtime type information.
|
240 |
+
* This only works for c10::impl::GenericDict and is not part of the public API
|
241 |
+
* but only supposed to be used internally by PyTorch.
|
242 |
+
*/
|
243 |
+
explicit Dict(TypePtr keyType, TypePtr valueType);
|
244 |
+
|
245 |
+
~Dict() = default;
|
246 |
+
|
247 |
+
Dict(const Dict&) = default;
|
248 |
+
Dict& operator=(const Dict&) = default;
|
249 |
+
|
250 |
+
/**
|
251 |
+
* Create a new Dict pointing to a deep copy of the same data.
|
252 |
+
* The Dict returned is a new dict with separate storage.
|
253 |
+
* Changes in it are not reflected in the original dict or vice versa.
|
254 |
+
*/
|
255 |
+
Dict copy() const;
|
256 |
+
|
257 |
+
/**
|
258 |
+
* Returns an iterator to the first element of the container.
|
259 |
+
* If the container is empty, the returned iterator will be equal to end().
|
260 |
+
*/
|
261 |
+
iterator begin() const;
|
262 |
+
|
263 |
+
/**
|
264 |
+
* Returns an iterator to the element following the last element of the container.
|
265 |
+
* This element acts as a placeholder; attempting to access it results in undefined behavior.
|
266 |
+
*/
|
267 |
+
iterator end() const;
|
268 |
+
|
269 |
+
/**
|
270 |
+
* Checks if the container has no elements.
|
271 |
+
*/
|
272 |
+
bool empty() const;
|
273 |
+
|
274 |
+
/**
|
275 |
+
* Returns the number of elements in the container.
|
276 |
+
*/
|
277 |
+
size_type size() const;
|
278 |
+
|
279 |
+
/**
|
280 |
+
* Erases all elements from the container. After this call, size() returns zero.
|
281 |
+
* Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators.
|
282 |
+
*/
|
283 |
+
void clear() const;
|
284 |
+
|
285 |
+
/**
|
286 |
+
* Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key.
|
287 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
288 |
+
*
|
289 |
+
* @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place.
|
290 |
+
*/
|
291 |
+
template<class Key_, class Value_>
|
292 |
+
std::pair<iterator, bool> insert(Key_&& key, Value_&& value) const;
|
293 |
+
|
294 |
+
/**
|
295 |
+
* If an element with the given key already exists, it is overwritten with the given value.
|
296 |
+
* Otherwise, a new element with the given key and value are inserted.
|
297 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
298 |
+
*
|
299 |
+
* @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated.
|
300 |
+
*/
|
301 |
+
template<class Key_, class Value_>
|
302 |
+
std::pair<iterator, bool> insert_or_assign(Key_&& key, Value_&& value) const;
|
303 |
+
|
304 |
+
/**
|
305 |
+
* Removes the element pointed to by iter.
|
306 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
307 |
+
* The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter.
|
308 |
+
*/
|
309 |
+
void erase(iterator iter) const;
|
310 |
+
|
311 |
+
/**
|
312 |
+
* Removes the element with the given key, if it exists.
|
313 |
+
* May invalidate any references, pointers, or iterators referring to contained elements.
|
314 |
+
*
|
315 |
+
* @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
|
316 |
+
*/
|
317 |
+
C10_NODISCARD size_t erase(const Key& key) const;
|
318 |
+
|
319 |
+
/**
|
320 |
+
* Returns the mapped value of the element with key equivalent to key.
|
321 |
+
* If no such element exists, an exception of type std::out_of_range is thrown.
|
322 |
+
*/
|
323 |
+
Value at(const Key& key) const;
|
324 |
+
|
325 |
+
/**
|
326 |
+
* Finds an element with key equivalent to key.
|
327 |
+
*
|
328 |
+
* @return Iterator to an element with key equivalent to key.
|
329 |
+
* If no such element is found, past-the-end (see end()) iterator is returned.
|
330 |
+
*/
|
331 |
+
iterator find(const Key& key) const;
|
332 |
+
|
333 |
+
/**
|
334 |
+
* Checks if there is an element with key equivalent to key in the container.
|
335 |
+
*
|
336 |
+
* @return true if there is such an element, otherwise false.
|
337 |
+
*/
|
338 |
+
bool contains(const Key& key) const;
|
339 |
+
|
340 |
+
/**
|
341 |
+
* Increase the capacity so that at least count elements can be stored without
|
342 |
+
* having to reallocate or rehash.
|
343 |
+
*/
|
344 |
+
void reserve(size_type count) const;
|
345 |
+
|
346 |
+
/**
|
347 |
+
* Value equality comparison. This function implements Python-like semantics for
|
348 |
+
* equality: two dicts with the same identity (e.g. same pointer) trivially
|
349 |
+
* compare equal, otherwise each element is compared for equality.
|
350 |
+
*/
|
351 |
+
template <class Key_, class Value_>
|
352 |
+
friend bool operator==(
|
353 |
+
const Dict<Key_, Value_>& lhs,
|
354 |
+
const Dict<Key_, Value_>& rhs);
|
355 |
+
template <class Key_, class Value_>
|
356 |
+
friend bool operator!=(
|
357 |
+
const Dict<Key_, Value_>& lhs,
|
358 |
+
const Dict<Key_, Value_>& rhs);
|
359 |
+
|
360 |
+
/**
|
361 |
+
* Identity comparison. Returns true if and only if `rhs` represents the same
|
362 |
+
* Dict object as `this`.
|
363 |
+
*/
|
364 |
+
bool is(const Dict& rhs) const;
|
365 |
+
|
366 |
+
// private API for now because the return type will change to TypePtr
|
367 |
+
// instead of optional<TypePtr> once types are mandatory.
|
368 |
+
TypePtr keyType() const;
|
369 |
+
TypePtr valueType() const;
|
370 |
+
|
371 |
+
// [unsafe set type]
|
372 |
+
// These functions mutate the tagged type of this dictionary in place.
|
373 |
+
// There is no checking that the members of the dictionary are instances
|
374 |
+
// of the new types, nor is there a check that other IValues which
|
375 |
+
// hold references to this dictionary have the right static type.
|
376 |
+
// This functionality is used only in the unpickler, where at
|
377 |
+
// creation type the real type of the dictionary is unknown, but
|
378 |
+
// then later recovered from the static type information of the
|
379 |
+
// unpickled object.
|
380 |
+
void unsafeSetKeyType(TypePtr t);
|
381 |
+
void unsafeSetValueType(TypePtr t);
|
382 |
+
};
|
383 |
+
|
384 |
+
namespace impl {
|
385 |
+
// GenericDict is how IValue stores dicts. It is, however, not part of the
|
386 |
+
// public API. Kernels should use Dicts with concrete Key, Value types instead
|
387 |
+
// (maybe except for some internal prim ops).
|
388 |
+
using GenericDict = Dict<IValue, IValue>;
|
389 |
+
|
390 |
+
}
|
391 |
+
}
|
392 |
+
|
393 |
+
namespace torch {
|
394 |
+
template<class Key, class Value> using Dict = c10::Dict<Key, Value>;
|
395 |
+
}
|
396 |
+
|
397 |
+
#include <ATen/core/Dict_inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <c10/util/hash.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
namespace detail {
|
8 |
+
inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const {
|
9 |
+
if (lhs.isTensor() && rhs.isTensor()) {
|
10 |
+
// for tensors, we compare only by identity (following how it's done in Python).
|
11 |
+
return lhs.is(rhs);
|
12 |
+
}
|
13 |
+
// Otherwise, we first compare by identity for efficiency, then by value (see:
|
14 |
+
// [container equality])
|
15 |
+
return _fastEqualsForContainer(lhs, rhs);
|
16 |
+
}
|
17 |
+
}
|
18 |
+
|
19 |
+
template<class T> decltype(auto) getTypePtr();
|
20 |
+
std::string toString(const Type& type);
|
21 |
+
|
22 |
+
namespace impl {
|
23 |
+
|
24 |
+
template<class Key, class Value>
|
25 |
+
Dict<Key, Value> toTypedDict(GenericDict dict) {
|
26 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Key>() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Key types mismatch.");
|
27 |
+
TORCH_INTERNAL_ASSERT(*getTypePtr<Value>() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Value types mismatch.");
|
28 |
+
|
29 |
+
return Dict<Key, Value>(std::move(dict.impl_));
|
30 |
+
}
|
31 |
+
|
32 |
+
template<class Key, class Value>
|
33 |
+
GenericDict toGenericDict(Dict<Key, Value> dict) {
|
34 |
+
return GenericDict(std::move(dict.impl_));
|
35 |
+
}
|
36 |
+
}
|
37 |
+
|
38 |
+
namespace detail {
|
39 |
+
|
40 |
+
inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
|
41 |
+
if (ivalue.isInt()) {
|
42 |
+
return std::hash<int64_t>()(ivalue.toInt());
|
43 |
+
} else if (ivalue.isString()) {
|
44 |
+
return std::hash<c10::string_view>()(ivalue.toStringView());
|
45 |
+
} else if (ivalue.isDouble()) {
|
46 |
+
return std::hash<double>()(ivalue.toDouble());
|
47 |
+
} else if (ivalue.isComplexDouble()) {
|
48 |
+
return c10::hash<c10::complex<double>>()(ivalue.toComplexDouble());
|
49 |
+
} else if (ivalue.isBool()) {
|
50 |
+
return std::hash<bool>()(ivalue.toBool());
|
51 |
+
} else if (ivalue.isTensor()) {
|
52 |
+
return std::hash<TensorImpl*>()(ivalue.toTensor().unsafeGetTensorImpl());
|
53 |
+
} else if (ivalue.isDevice()) {
|
54 |
+
return std::hash<Device>()(ivalue.toDevice());
|
55 |
+
} else {
|
56 |
+
throw std::runtime_error(
|
57 |
+
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
|
58 |
+
}
|
59 |
+
}
|
60 |
+
|
61 |
+
inline intrusive_ptr<DictImpl> DictImpl::copy() const {
|
62 |
+
return make_intrusive<DictImpl>(dict, elementTypes);
|
63 |
+
}
|
64 |
+
|
65 |
+
}
|
66 |
+
|
67 |
+
template<class Key, class Value>
|
68 |
+
Dict<Key, Value>::Dict()
|
69 |
+
:Dict(make_intrusive<detail::DictImpl>(
|
70 |
+
detail::DictImpl::dict_map_type(),
|
71 |
+
detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
|
72 |
+
static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
73 |
+
static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
|
74 |
+
}
|
75 |
+
|
76 |
+
template<class Key, class Value>
|
77 |
+
Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
|
78 |
+
: Dict(make_intrusive<detail::DictImpl>(
|
79 |
+
detail::DictImpl::dict_map_type(),
|
80 |
+
detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
|
81 |
+
static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
82 |
+
static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
|
83 |
+
}
|
84 |
+
|
85 |
+
template<class Key, class Value>
|
86 |
+
Dict<Key, Value>::Dict(c10::intrusive_ptr<detail::DictImpl>&& impl): impl_(std::move(impl)) {}
|
87 |
+
|
88 |
+
template<class Key, class Value>
|
89 |
+
Dict<Key, Value> Dict<Key, Value>::copy() const {
|
90 |
+
return Dict<Key, Value>(impl_->copy());
|
91 |
+
}
|
92 |
+
|
93 |
+
template<class Key, class Value>
|
94 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::begin() const {
|
95 |
+
return iterator{impl_->dict.begin()};
|
96 |
+
}
|
97 |
+
|
98 |
+
template<class Key, class Value>
|
99 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::end() const {
|
100 |
+
return iterator{impl_->dict.end()};
|
101 |
+
}
|
102 |
+
|
103 |
+
template<class Key, class Value>
|
104 |
+
bool Dict<Key, Value>::empty() const {
|
105 |
+
return impl_->dict.empty();
|
106 |
+
}
|
107 |
+
|
108 |
+
template<class Key, class Value>
|
109 |
+
typename Dict<Key, Value>::size_type Dict<Key, Value>::size() const {
|
110 |
+
return impl_->dict.size();
|
111 |
+
}
|
112 |
+
|
113 |
+
template<class Key, class Value>
|
114 |
+
void Dict<Key, Value>::clear() const {
|
115 |
+
impl_->dict.clear();
|
116 |
+
}
|
117 |
+
|
118 |
+
template<class Key, class Value>
|
119 |
+
template<class Key_, class Value_>
|
120 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
|
121 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
|
122 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
|
123 |
+
auto inserted = impl_->dict.insert(std::pair<IValue, IValue>{
|
124 |
+
Key(std::forward<Key_>(key)),
|
125 |
+
Value(std::forward<Value_>(value))});
|
126 |
+
return {iterator{inserted.first}, inserted.second};
|
127 |
+
}
|
128 |
+
|
129 |
+
template<class Key, class Value>
|
130 |
+
template<class Key_, class Value_>
|
131 |
+
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
|
132 |
+
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
|
133 |
+
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
|
134 |
+
auto inserted = impl_->dict.insert_or_assign(
|
135 |
+
Key(std::forward<Key_>(key)),
|
136 |
+
Value(std::forward<Value_>(value)));
|
137 |
+
return {iterator{inserted.first}, inserted.second};
|
138 |
+
}
|
139 |
+
|
140 |
+
template<class Key, class Value>
|
141 |
+
void Dict<Key, Value>::erase(iterator iter) const {
|
142 |
+
impl_->dict.erase(iter.entryRef_.iterator_);
|
143 |
+
}
|
144 |
+
|
145 |
+
template<class Key, class Value>
|
146 |
+
C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
|
147 |
+
return impl_->dict.erase(key);
|
148 |
+
}
|
149 |
+
|
150 |
+
template<class Key, class Value>
|
151 |
+
Value Dict<Key, Value>::at(const Key& key) const {
|
152 |
+
return impl_->dict.at(key).template to<Value>();
|
153 |
+
}
|
154 |
+
|
155 |
+
template<class Key, class Value>
|
156 |
+
typename Dict<Key, Value>::iterator Dict<Key, Value>::find(const Key& key) const {
|
157 |
+
return iterator{impl_->dict.find(key)};
|
158 |
+
}
|
159 |
+
|
160 |
+
template<class Key, class Value>
|
161 |
+
bool Dict<Key, Value>::contains(const Key& key) const {
|
162 |
+
return end() != find(key);
|
163 |
+
}
|
164 |
+
|
165 |
+
template<class Key, class Value>
|
166 |
+
void Dict<Key, Value>::reserve(size_type count) const {
|
167 |
+
impl_->dict.reserve(count);
|
168 |
+
}
|
169 |
+
|
170 |
+
template<class Key, class Value>
|
171 |
+
TypePtr Dict<Key, Value>::keyType() const {
|
172 |
+
return impl_->elementTypes.keyType;
|
173 |
+
}
|
174 |
+
|
175 |
+
template<class Key, class Value>
|
176 |
+
TypePtr Dict<Key, Value>::valueType() const {
|
177 |
+
return impl_->elementTypes.valueType;
|
178 |
+
}
|
179 |
+
template <class Key, class Value>
|
180 |
+
void Dict<Key, Value>::unsafeSetKeyType(TypePtr t) {
|
181 |
+
impl_->elementTypes.keyType = std::move(t);
|
182 |
+
}
|
183 |
+
|
184 |
+
template <class Key, class Value>
|
185 |
+
void Dict<Key, Value>::unsafeSetValueType(TypePtr t) {
|
186 |
+
impl_->elementTypes.valueType = std::move(t);
|
187 |
+
}
|
188 |
+
|
189 |
+
template <class Key_, class Value_>
|
190 |
+
bool operator==(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
191 |
+
// Dicts with the same identity trivially compare equal.
|
192 |
+
if (lhs.impl_ == rhs.impl_) {
|
193 |
+
return true;
|
194 |
+
}
|
195 |
+
|
196 |
+
// Otherwise compare the values
|
197 |
+
return *lhs.impl_ == *rhs.impl_;
|
198 |
+
}
|
199 |
+
|
200 |
+
template <class Key_, class Value_>
|
201 |
+
bool operator!=(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
|
202 |
+
return !(lhs == rhs);
|
203 |
+
}
|
204 |
+
|
205 |
+
template <class Key, class Value>
|
206 |
+
bool Dict<Key, Value>::is(const Dict& rhs) const {
|
207 |
+
return this->impl_ == rhs.impl_;
|
208 |
+
}
|
209 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/DimVector.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
|
6 |
+
// Re-declaring 'DimVector' type and size inside 'at' namespace.
|
7 |
+
// This is done to avoid modifying every use into their 'c10'
|
8 |
+
// equivalent.
|
9 |
+
|
10 |
+
using c10::kDimVectorStaticSize;
|
11 |
+
using c10::DimVector;
|
12 |
+
|
13 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h
ADDED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Array.h>
|
4 |
+
#include <ATen/core/TransformationHelper.h>
|
5 |
+
#include <c10/util/Half.h>
|
6 |
+
#include <c10/util/BFloat16.h>
|
7 |
+
#include <c10/util/MathConstants.h>
|
8 |
+
#include <c10/util/Optional.h>
|
9 |
+
#include <c10/macros/Macros.h>
|
10 |
+
|
11 |
+
#include <type_traits>
|
12 |
+
#include <limits>
|
13 |
+
#include <cmath>
|
14 |
+
|
15 |
+
/**
|
16 |
+
* Distributions kernel adapted from THRandom.cpp
|
17 |
+
* The kernels try to follow std::random distributions signature
|
18 |
+
* For instance: in ATen
|
19 |
+
* auto gen = at::detail::createCPUGenerator();
|
20 |
+
* at::uniform_real_distribution<double> uniform(0, 1);
|
21 |
+
* auto sample = uniform(gen.get());
|
22 |
+
*
|
23 |
+
* vs std::random
|
24 |
+
*
|
25 |
+
* std::mt19937 gen;
|
26 |
+
* std::uniform_real_distribution uniform(0, 1);
|
27 |
+
* auto sample = uniform(gen);
|
28 |
+
*/
|
29 |
+
|
30 |
+
|
31 |
+
namespace at {
|
32 |
+
namespace {
|
33 |
+
|
34 |
+
/**
|
35 |
+
* Samples a discrete uniform distribution in the range [base, base+range) of type T
|
36 |
+
*/
|
37 |
+
template <typename T>
|
38 |
+
struct uniform_int_from_to_distribution {
|
39 |
+
|
40 |
+
C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
|
41 |
+
|
42 |
+
template <typename RNG>
|
43 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
44 |
+
if ((
|
45 |
+
std::is_same<T, int64_t>::value ||
|
46 |
+
std::is_same<T, double>::value ||
|
47 |
+
std::is_same<T, float>::value ||
|
48 |
+
std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
|
49 |
+
{
|
50 |
+
return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
|
51 |
+
} else {
|
52 |
+
return transformation::uniform_int_from_to<T>(generator->random(), range_, base_);
|
53 |
+
}
|
54 |
+
}
|
55 |
+
|
56 |
+
private:
|
57 |
+
uint64_t range_;
|
58 |
+
int64_t base_;
|
59 |
+
};
|
60 |
+
|
61 |
+
/**
|
62 |
+
* Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)]
|
63 |
+
*/
|
64 |
+
template <typename T>
|
65 |
+
struct uniform_int_full_range_distribution {
|
66 |
+
|
67 |
+
template <typename RNG>
|
68 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
69 |
+
return transformation::uniform_int_full_range<T>(generator->random64());
|
70 |
+
}
|
71 |
+
|
72 |
+
};
|
73 |
+
|
74 |
+
/**
|
75 |
+
* Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types
|
76 |
+
* and [0, 2^mantissa] for floating-point types.
|
77 |
+
*/
|
78 |
+
template <typename T>
|
79 |
+
struct uniform_int_distribution {
|
80 |
+
|
81 |
+
template <typename RNG>
|
82 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
83 |
+
if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) {
|
84 |
+
return transformation::uniform_int<T>(generator->random64());
|
85 |
+
} else {
|
86 |
+
return transformation::uniform_int<T>(generator->random());
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
};
|
91 |
+
|
92 |
+
/**
|
93 |
+
* Samples a uniform distribution in the range [from, to) of type T
|
94 |
+
*/
|
95 |
+
template <typename T>
|
96 |
+
struct uniform_real_distribution {
|
97 |
+
|
98 |
+
C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
|
99 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
|
100 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
|
101 |
+
from_ = from;
|
102 |
+
to_ = to;
|
103 |
+
}
|
104 |
+
|
105 |
+
template <typename RNG>
|
106 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
107 |
+
if constexpr (std::is_same_v<T, double>) {
|
108 |
+
return transformation::uniform_real<T>(generator->random64(), from_, to_);
|
109 |
+
} else {
|
110 |
+
return transformation::uniform_real<T>(generator->random(), from_, to_);
|
111 |
+
}
|
112 |
+
}
|
113 |
+
|
114 |
+
private:
|
115 |
+
T from_;
|
116 |
+
T to_;
|
117 |
+
};
|
118 |
+
|
119 |
+
// The SFINAE checks introduced in #39816 looks overcomplicated and must revisited
|
120 |
+
// https://github.com/pytorch/pytorch/issues/40052
|
121 |
+
#define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \
|
122 |
+
template <typename T> \
|
123 |
+
struct has_member_##member \
|
124 |
+
{ \
|
125 |
+
typedef char yes; \
|
126 |
+
typedef long no; \
|
127 |
+
template <typename U> static yes test(decltype(&U::member)); \
|
128 |
+
template <typename U> static no test(...); \
|
129 |
+
static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \
|
130 |
+
}
|
131 |
+
|
132 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample);
|
133 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample);
|
134 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample);
|
135 |
+
DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample);
|
136 |
+
|
137 |
+
#define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \
|
138 |
+
\
|
139 |
+
template <typename RNG, typename ret_type, \
|
140 |
+
typename std::enable_if_t<( \
|
141 |
+
has_member_next_##TYPE##_normal_sample<RNG>::value && \
|
142 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
143 |
+
), int> = 0> \
|
144 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
|
145 |
+
if (generator->next_##TYPE##_normal_sample()) { \
|
146 |
+
*ret = *(generator->next_##TYPE##_normal_sample()); \
|
147 |
+
generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \
|
148 |
+
return true; \
|
149 |
+
} \
|
150 |
+
return false; \
|
151 |
+
} \
|
152 |
+
\
|
153 |
+
template <typename RNG, typename ret_type, \
|
154 |
+
typename std::enable_if_t<( \
|
155 |
+
!has_member_next_##TYPE##_normal_sample<RNG>::value || \
|
156 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
157 |
+
), int> = 0> \
|
158 |
+
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \
|
159 |
+
return false; \
|
160 |
+
} \
|
161 |
+
\
|
162 |
+
template <typename RNG, typename ret_type, \
|
163 |
+
typename std::enable_if_t<( \
|
164 |
+
has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
165 |
+
), int> = 0> \
|
166 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \
|
167 |
+
generator->set_next_##TYPE##_normal_sample(cache); \
|
168 |
+
} \
|
169 |
+
\
|
170 |
+
template <typename RNG, typename ret_type, \
|
171 |
+
typename std::enable_if_t<( \
|
172 |
+
!has_member_set_next_##TYPE##_normal_sample<RNG>::value \
|
173 |
+
), int> = 0> \
|
174 |
+
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
|
175 |
+
}
|
176 |
+
|
177 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
|
178 |
+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
|
179 |
+
|
180 |
+
/**
|
181 |
+
* Samples a normal distribution using the Box-Muller method
|
182 |
+
* Takes mean and standard deviation as inputs
|
183 |
+
* Note that Box-muller method returns two samples at a time.
|
184 |
+
* Hence, we cache the "next" sample in the CPUGeneratorImpl class.
|
185 |
+
*/
|
186 |
+
template <typename T>
|
187 |
+
struct normal_distribution {
|
188 |
+
|
189 |
+
C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
|
190 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
|
191 |
+
mean = mean_in;
|
192 |
+
stdv = stdv_in;
|
193 |
+
}
|
194 |
+
|
195 |
+
template <typename RNG>
|
196 |
+
C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
|
197 |
+
dist_acctype<T> ret;
|
198 |
+
// return cached values if available
|
199 |
+
if constexpr (std::is_same_v<T, double>) {
|
200 |
+
if (maybe_get_next_double_normal_sample(generator, &ret)) {
|
201 |
+
return transformation::normal(ret, mean, stdv);
|
202 |
+
}
|
203 |
+
} else {
|
204 |
+
if (maybe_get_next_float_normal_sample(generator, &ret)) {
|
205 |
+
return transformation::normal(ret, mean, stdv);
|
206 |
+
}
|
207 |
+
}
|
208 |
+
// otherwise generate new normal values
|
209 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
210 |
+
const dist_acctype<T> u1 = uniform(generator);
|
211 |
+
const dist_acctype<T> u2 = uniform(generator);
|
212 |
+
const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2));
|
213 |
+
const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1;
|
214 |
+
if constexpr (std::is_same_v<T, double>) {
|
215 |
+
maybe_set_next_double_normal_sample(generator, r * ::sin(theta));
|
216 |
+
} else {
|
217 |
+
maybe_set_next_float_normal_sample(generator, r * ::sin(theta));
|
218 |
+
}
|
219 |
+
ret = r * ::cos(theta);
|
220 |
+
return transformation::normal(ret, mean, stdv);
|
221 |
+
}
|
222 |
+
|
223 |
+
private:
|
224 |
+
T mean;
|
225 |
+
T stdv;
|
226 |
+
};
|
227 |
+
|
228 |
+
template <typename T>
|
229 |
+
struct DiscreteDistributionType { using type = float; };
|
230 |
+
|
231 |
+
template <> struct DiscreteDistributionType<double> { using type = double; };
|
232 |
+
|
233 |
+
/**
|
234 |
+
* Samples a bernoulli distribution given a probability input
|
235 |
+
*/
|
236 |
+
template <typename T>
|
237 |
+
struct bernoulli_distribution {
|
238 |
+
|
239 |
+
C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
|
240 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
|
241 |
+
p = p_in;
|
242 |
+
}
|
243 |
+
|
244 |
+
template <typename RNG>
|
245 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
246 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
247 |
+
return transformation::bernoulli<T>(uniform(generator), p);
|
248 |
+
}
|
249 |
+
|
250 |
+
private:
|
251 |
+
T p;
|
252 |
+
};
|
253 |
+
|
254 |
+
/**
|
255 |
+
* Samples a geometric distribution given a probability input
|
256 |
+
*/
|
257 |
+
template <typename T>
|
258 |
+
struct geometric_distribution {
|
259 |
+
|
260 |
+
C10_HOST_DEVICE inline geometric_distribution(T p_in) {
|
261 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
|
262 |
+
p = p_in;
|
263 |
+
}
|
264 |
+
|
265 |
+
template <typename RNG>
|
266 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
267 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
268 |
+
return transformation::geometric<T>(uniform(generator), p);
|
269 |
+
}
|
270 |
+
|
271 |
+
private:
|
272 |
+
T p;
|
273 |
+
};
|
274 |
+
|
275 |
+
/**
|
276 |
+
* Samples an exponential distribution given a lambda input
|
277 |
+
*/
|
278 |
+
template <typename T>
|
279 |
+
struct exponential_distribution {
|
280 |
+
|
281 |
+
C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
|
282 |
+
|
283 |
+
template <typename RNG>
|
284 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
285 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
286 |
+
return transformation::exponential<T>(uniform(generator), lambda);
|
287 |
+
}
|
288 |
+
|
289 |
+
private:
|
290 |
+
T lambda;
|
291 |
+
};
|
292 |
+
|
293 |
+
/**
|
294 |
+
* Samples a cauchy distribution given median and sigma as inputs
|
295 |
+
*/
|
296 |
+
template <typename T>
|
297 |
+
struct cauchy_distribution {
|
298 |
+
|
299 |
+
C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
|
300 |
+
|
301 |
+
template <typename RNG>
|
302 |
+
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
303 |
+
uniform_real_distribution<T> uniform(0.0, 1.0);
|
304 |
+
return transformation::cauchy<T>(uniform(generator), median, sigma);
|
305 |
+
}
|
306 |
+
|
307 |
+
private:
|
308 |
+
T median;
|
309 |
+
T sigma;
|
310 |
+
};
|
311 |
+
|
312 |
+
/**
|
313 |
+
* Samples a lognormal distribution
|
314 |
+
* Takes mean and standard deviation as inputs
|
315 |
+
* Outputs two samples at a time
|
316 |
+
*/
|
317 |
+
template <typename T>
|
318 |
+
struct lognormal_distribution {
|
319 |
+
|
320 |
+
C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
|
321 |
+
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
|
322 |
+
mean = mean_in;
|
323 |
+
stdv = stdv_in;
|
324 |
+
}
|
325 |
+
|
326 |
+
template<typename RNG>
|
327 |
+
C10_HOST_DEVICE inline T operator()(RNG generator){
|
328 |
+
normal_distribution<T> normal(mean, stdv);
|
329 |
+
return transformation::log_normal<T>(normal(generator));
|
330 |
+
}
|
331 |
+
|
332 |
+
private:
|
333 |
+
T mean;
|
334 |
+
T stdv;
|
335 |
+
};
|
336 |
+
}
|
337 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ostream>
|
4 |
+
#include <string>
|
5 |
+
|
6 |
+
#include <c10/core/Scalar.h>
|
7 |
+
#include <ATen/core/Tensor.h>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
|
11 |
+
TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s);
|
12 |
+
TORCH_API std::string toString(const Scalar& s);
|
13 |
+
}
|
14 |
+
namespace at {
|
15 |
+
|
16 |
+
TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
|
17 |
+
TORCH_API std::ostream& print(
|
18 |
+
std::ostream& stream,
|
19 |
+
const Tensor& tensor,
|
20 |
+
int64_t linesize);
|
21 |
+
static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) {
|
22 |
+
return print(out,t,80);
|
23 |
+
}
|
24 |
+
TORCH_API void print(const Tensor & t, int64_t linesize=80);
|
25 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <mutex>
|
4 |
+
#include <deque>
|
5 |
+
#include <atomic>
|
6 |
+
#include <typeinfo>
|
7 |
+
#include <utility>
|
8 |
+
#include <cstddef>
|
9 |
+
#include <cstdint>
|
10 |
+
|
11 |
+
#include <c10/util/Exception.h>
|
12 |
+
#include <c10/util/C++17.h>
|
13 |
+
#include <c10/util/intrusive_ptr.h>
|
14 |
+
#include <c10/core/Device.h>
|
15 |
+
#include <c10/core/DispatchKeySet.h>
|
16 |
+
|
17 |
+
// For the record I don't think this is a correct pimpl idiom.
|
18 |
+
// Including Impl header in interface header defeats the purpose
|
19 |
+
// because you can't change Impl private members without forcing
|
20 |
+
// everything that included the interface to rebuild.
|
21 |
+
// Impl should be forward-declared in the interface header instead.
|
22 |
+
#include <c10/core/GeneratorImpl.h>
|
23 |
+
|
24 |
+
/**
|
25 |
+
* Note [Generator]
|
26 |
+
* ~~~~~~~~~~~~~~~~
|
27 |
+
* A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to
|
28 |
+
* generate a seemingly random sequence of numbers, that may be later be used in creating
|
29 |
+
* a random distribution. Such an engine almost always maintains a state and requires a
|
30 |
+
* seed to start off the creation of random numbers. Often times, users have
|
31 |
+
* found it beneficial to be able to explicitly create, retain, and destroy
|
32 |
+
* PRNG states and also be able to have control over the seed value.
|
33 |
+
*
|
34 |
+
* A Generator in ATen gives users the ability to read, write and modify a PRNG engine.
|
35 |
+
* For instance, it does so by letting users seed a PRNG engine, fork the state of the
|
36 |
+
* engine, etc.
|
37 |
+
*
|
38 |
+
* By default, there is one generator per device, and a device's generator is
|
39 |
+
* lazily created. A user can use the torch.Generator() api to create their own generator.
|
40 |
+
*/
|
41 |
+
|
42 |
+
/**
|
43 |
+
* Note [Acquire lock when using random generators]
|
44 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
45 |
+
* Generator and its derived classes are NOT thread-safe. Please note that most of the
|
46 |
+
* places where we have inserted locking for generators are historically based, and we
|
47 |
+
* haven't actually checked that everything is truly thread safe (and it probably isn't).
|
48 |
+
* Please use the public mutex_ when using any methods from these classes, except for the
|
49 |
+
* read-only methods. You can learn about the usage by looking into the unittests
|
50 |
+
* (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard.
|
51 |
+
*
|
52 |
+
* TODO: Look into changing the threading semantics of Generators in ATen (e.g., making
|
53 |
+
* them non-thread safe and instead making the generator state splittable, to accommodate
|
54 |
+
* forks into other threads).
|
55 |
+
*/
|
56 |
+
|
57 |
+
namespace at {
|
58 |
+
|
59 |
+
class Tensor;
|
60 |
+
|
61 |
+
struct TORCH_API Generator {
|
62 |
+
Generator() = default;
|
63 |
+
|
64 |
+
explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
|
65 |
+
: impl_(std::move(gen_impl)) {
|
66 |
+
if (impl_.get() == nullptr) {
|
67 |
+
throw std::runtime_error("GeneratorImpl with nullptr is not supported");
|
68 |
+
}
|
69 |
+
}
|
70 |
+
|
71 |
+
bool operator==(const Generator& rhs) const {
|
72 |
+
return this->impl_ == rhs.impl_;
|
73 |
+
}
|
74 |
+
|
75 |
+
bool operator!=(const Generator& rhs) const {
|
76 |
+
return !((*this) == rhs);
|
77 |
+
}
|
78 |
+
|
79 |
+
bool defined() const {
|
80 |
+
return static_cast<bool>(impl_);
|
81 |
+
}
|
82 |
+
|
83 |
+
c10::GeneratorImpl* unsafeGetGeneratorImpl() const {
|
84 |
+
return impl_.get();
|
85 |
+
}
|
86 |
+
|
87 |
+
c10::GeneratorImpl* unsafeReleaseGeneratorImpl() {
|
88 |
+
return impl_.release();
|
89 |
+
}
|
90 |
+
|
91 |
+
const c10::intrusive_ptr<c10::GeneratorImpl>& getIntrusivePtr() const {
|
92 |
+
return impl_;
|
93 |
+
}
|
94 |
+
|
95 |
+
void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); }
|
96 |
+
// Sets the offset of Generator state to the desired offset. This is currently
|
97 |
+
// supported for only Philox based Generators, i.e., CUDA and MPS.
|
98 |
+
void set_offset(uint64_t offset) { impl_->set_offset(offset); }
|
99 |
+
|
100 |
+
// Returns the offset of Generator state. This is currently supported for only
|
101 |
+
// Philox based Generators, i.e., CUDA and MPS.
|
102 |
+
uint64_t get_offset() const { return impl_->get_offset(); }
|
103 |
+
|
104 |
+
uint64_t current_seed() const { return impl_->current_seed(); }
|
105 |
+
|
106 |
+
uint64_t seed() { return impl_->seed(); }
|
107 |
+
|
108 |
+
// Implementation not inlined to prevent cycle reference between
|
109 |
+
// `ATen/core/Generator.h` and `ATen/core/Tensor.h`
|
110 |
+
void set_state(const at::Tensor& new_state);
|
111 |
+
|
112 |
+
at::Tensor get_state() const;
|
113 |
+
|
114 |
+
std::mutex& mutex() {
|
115 |
+
return impl_->mutex_;
|
116 |
+
}
|
117 |
+
|
118 |
+
DispatchKeySet key_set() const {
|
119 |
+
return impl_->key_set();
|
120 |
+
}
|
121 |
+
|
122 |
+
Device device() const { return impl_->device(); }
|
123 |
+
|
124 |
+
inline void set_pyobj(PyObject* pyobj) const noexcept {
|
125 |
+
impl_->set_pyobj(pyobj);
|
126 |
+
}
|
127 |
+
|
128 |
+
inline PyObject* pyobj() const noexcept {
|
129 |
+
return impl_->pyobj();
|
130 |
+
}
|
131 |
+
|
132 |
+
template<typename T>
|
133 |
+
T* get() const { return static_cast<T*>(impl_.get()); }
|
134 |
+
|
135 |
+
Generator clone() const {
|
136 |
+
return Generator(impl_->clone());
|
137 |
+
}
|
138 |
+
|
139 |
+
private:
|
140 |
+
c10::intrusive_ptr<c10::GeneratorImpl> impl_;
|
141 |
+
};
|
142 |
+
|
143 |
+
template<class Impl, class... Args>
|
144 |
+
Generator make_generator(Args&&... args) {
|
145 |
+
return Generator(c10::make_intrusive<Impl>(std::forward<Args>(args)...));
|
146 |
+
}
|
147 |
+
|
148 |
+
/**
|
149 |
+
* Utility function to static cast input Generator* to
|
150 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
151 |
+
*/
|
152 |
+
template <typename T>
|
153 |
+
static inline T * check_generator(c10::optional<Generator> gen) {
|
154 |
+
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
|
155 |
+
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
|
156 |
+
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
|
157 |
+
return gen->get<T>();
|
158 |
+
}
|
159 |
+
|
160 |
+
/**
|
161 |
+
* Utility function used in tensor implementations, which
|
162 |
+
* supplies the default generator to tensors, if an input generator
|
163 |
+
* is not supplied. The input Generator* is also static casted to
|
164 |
+
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
165 |
+
*/
|
166 |
+
template <typename T>
|
167 |
+
static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) {
|
168 |
+
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
|
169 |
+
}
|
170 |
+
|
171 |
+
namespace detail {
|
172 |
+
|
173 |
+
/**
|
174 |
+
* Helper function for checking the validity of new random generator
|
175 |
+
* state. Right now following conditions are checked:
|
176 |
+
*
|
177 |
+
* - The new state tensor must be a torch.ByteTensor
|
178 |
+
* - Data of the new state tensor must be contiguous
|
179 |
+
*/
|
180 |
+
static inline void check_rng_state(const c10::TensorImpl& new_state) {
|
181 |
+
TORCH_CHECK_TYPE(
|
182 |
+
new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte,
|
183 |
+
"RNG state must be a torch.ByteTensor"
|
184 |
+
);
|
185 |
+
|
186 |
+
TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous");
|
187 |
+
}
|
188 |
+
|
189 |
+
} // namespace detail
|
190 |
+
|
191 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Generator.h>
|
4 |
+
#include <c10/util/intrusive_ptr.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
|
9 |
+
|
10 |
+
c10::optional<GeneratorFuncType>& GetGeneratorPrivate();
|
11 |
+
|
12 |
+
class TORCH_API _GeneratorRegister {
|
13 |
+
public:
|
14 |
+
explicit _GeneratorRegister(const GeneratorFuncType& func);
|
15 |
+
};
|
16 |
+
|
17 |
+
TORCH_API at::Generator GetGeneratorForPrivateuse1(
|
18 |
+
c10::DeviceIndex device_index);
|
19 |
+
|
20 |
+
/**
|
21 |
+
* This is used to register Generator to PyTorch for `privateuse1` key.
|
22 |
+
*
|
23 |
+
* Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1)
|
24 |
+
*
|
25 |
+
* class CustomGeneratorImpl : public c10::GeneratorImpl {
|
26 |
+
* CustomGeneratorImpl(DeviceIndex device_index = -1);
|
27 |
+
* explicit ~CustomGeneratorImpl() override = default;
|
28 |
+
* ...
|
29 |
+
* };
|
30 |
+
*
|
31 |
+
* at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) {
|
32 |
+
* return at::make_generator<CustomGeneratorImpl>(id);
|
33 |
+
* }
|
34 |
+
*/
|
35 |
+
|
36 |
+
#define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \
|
37 |
+
static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate);
|
38 |
+
|
39 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/List.h>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
class Tensor;
|
8 |
+
class OptionalTensorRef;
|
9 |
+
}
|
10 |
+
|
11 |
+
namespace c10 {
|
12 |
+
namespace detail {
|
13 |
+
|
14 |
+
/*
|
15 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
16 |
+
* implementation for `IListRefTag::Unboxed`.
|
17 |
+
*/
|
18 |
+
template <typename T, typename ListElemT>
|
19 |
+
class IListRefTagImplBase<IListRefTag::Unboxed, T, ListElemT> {
|
20 |
+
public:
|
21 |
+
using elem_type = ListElemT;
|
22 |
+
using list_type = ArrayRef<elem_type>;
|
23 |
+
|
24 |
+
/*
|
25 |
+
* These `unwrap` static methods unwraps the inner containers out
|
26 |
+
* of `IListRef<T>` (and `IListRefIterator<T>`). They are required when
|
27 |
+
* the macro `TORCH_ILISTREF_UNWRAP` is called.
|
28 |
+
*/
|
29 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
30 |
+
return ilist.payload_.unboxed;
|
31 |
+
}
|
32 |
+
|
33 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
34 |
+
return it.payload_.unboxed_iterator;
|
35 |
+
}
|
36 |
+
|
37 |
+
static const typename list_type::const_iterator& unwrap(
|
38 |
+
const IListRefIterator<T>& it) {
|
39 |
+
return it.payload_.unboxed_iterator;
|
40 |
+
}
|
41 |
+
|
42 |
+
/*
|
43 |
+
* We have these function (besides the `unwrap`s above) because the
|
44 |
+
* implementation for both `IListRef::operator[]` and `IListRefIterator::operator*`
|
45 |
+
* weren't syntatically equal for the existing tags at the time
|
46 |
+
* (`Unboxed` and `Boxed`).
|
47 |
+
*/
|
48 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
49 |
+
return lst.front();
|
50 |
+
}
|
51 |
+
|
52 |
+
static IListRefConstRef<T> iterator_get(
|
53 |
+
const typename list_type::const_iterator& it) {
|
54 |
+
return *it;
|
55 |
+
}
|
56 |
+
};
|
57 |
+
|
58 |
+
/*
|
59 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
60 |
+
* implementation for `IListRefTag::Boxed`.
|
61 |
+
*/
|
62 |
+
template <typename T, typename ListElemT>
|
63 |
+
class IListRefTagImplBase<IListRefTag::Boxed, T, ListElemT> {
|
64 |
+
public:
|
65 |
+
using elem_type = ListElemT;
|
66 |
+
using list_type = List<elem_type>;
|
67 |
+
|
68 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
69 |
+
return *ilist.payload_.boxed;
|
70 |
+
}
|
71 |
+
|
72 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
73 |
+
return it.payload_.boxed_iterator;
|
74 |
+
}
|
75 |
+
|
76 |
+
static const typename list_type::const_iterator& unwrap(
|
77 |
+
const IListRefIterator<T>& it) {
|
78 |
+
return it.payload_.boxed_iterator;
|
79 |
+
}
|
80 |
+
|
81 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
82 |
+
return lst[0];
|
83 |
+
}
|
84 |
+
|
85 |
+
static IListRefConstRef<T> iterator_get(
|
86 |
+
const typename list_type::const_iterator& it) {
|
87 |
+
return (*it).get().toTensor();
|
88 |
+
}
|
89 |
+
};
|
90 |
+
|
91 |
+
/*
|
92 |
+
* Specializations of `IListRefTagImplBase` that implement the default
|
93 |
+
* implementation for `IListRefTag::Materialized`.
|
94 |
+
*/
|
95 |
+
template <typename T>
|
96 |
+
class IListRefTagImplBase<IListRefTag::Materialized, T, MaterializedIListRefElem<T>> {
|
97 |
+
public:
|
98 |
+
using elem_type = MaterializedIListRefElem<T>;
|
99 |
+
using list_type = MaterializedIListRef<T>;
|
100 |
+
|
101 |
+
static const list_type& unwrap(const IListRef<T>& ilist) {
|
102 |
+
return *ilist.payload_.materialized;
|
103 |
+
}
|
104 |
+
|
105 |
+
static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
|
106 |
+
return it.payload_.materialized_iterator;
|
107 |
+
}
|
108 |
+
|
109 |
+
static const typename list_type::const_iterator& unwrap(
|
110 |
+
const IListRefIterator<T>& it) {
|
111 |
+
return it.payload_.materialized_iterator;
|
112 |
+
}
|
113 |
+
|
114 |
+
static IListRefConstRef<T> front(const list_type& lst) {
|
115 |
+
return lst[0];
|
116 |
+
}
|
117 |
+
|
118 |
+
static IListRefConstRef<T> iterator_get(
|
119 |
+
const typename list_type::const_iterator& it) {
|
120 |
+
return *it;
|
121 |
+
}
|
122 |
+
};
|
123 |
+
|
124 |
+
/*
|
125 |
+
* [Note: ITensorListRef]
|
126 |
+
* Specializations necessary for `IListRef<at::Tensor>` type.
|
127 |
+
*
|
128 |
+
* Since the default implementations are usually done with supporting
|
129 |
+
* `Tensor` in mind, we only have to inherit from the base implementations.
|
130 |
+
*/
|
131 |
+
template <>
|
132 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::Tensor>
|
133 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::Tensor> {};
|
134 |
+
|
135 |
+
template <>
|
136 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::Tensor>
|
137 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::Tensor> {};
|
138 |
+
|
139 |
+
template <>
|
140 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::Tensor>
|
141 |
+
: public IListRefTagImplBase<
|
142 |
+
IListRefTag::Materialized,
|
143 |
+
at::Tensor,
|
144 |
+
MaterializedIListRefElem<at::Tensor>> {};
|
145 |
+
|
146 |
+
/*
|
147 |
+
* [Note: IOptTensorListRef]
|
148 |
+
* Specializations necessary for `IListRef<at::OptionalTensorRef>` type.
|
149 |
+
*
|
150 |
+
* We can't get an `at::OptionalTensorRef` directly from an instance of
|
151 |
+
* `List<optional<Tensor>>` (the type that corresponds to the boxed world).
|
152 |
+
*
|
153 |
+
* So, the default implementation won't help us. Thus, we have to implement
|
154 |
+
* this method ourselves.
|
155 |
+
*/
|
156 |
+
template <>
|
157 |
+
class IListRefTagImpl<IListRefTag::Unboxed, at::OptionalTensorRef>
|
158 |
+
: public IListRefTagImplBase<IListRefTag::Unboxed, at::OptionalTensorRef> {};
|
159 |
+
|
160 |
+
template <>
|
161 |
+
class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
|
162 |
+
: public IListRefTagImplBase<IListRefTag::Boxed, at::OptionalTensorRef, optional<at::Tensor>> {
|
163 |
+
|
164 |
+
public:
|
165 |
+
/*
|
166 |
+
* Given an instance of the types corresponding to the `Boxed` tag, we override
|
167 |
+
* the default implementation, so that we can return a `at::OptionalTensorRef`.
|
168 |
+
*/
|
169 |
+
static IListRefConstRef<at::OptionalTensorRef> iterator_get(
|
170 |
+
const typename list_type::const_iterator& it) {
|
171 |
+
const auto& ivalue = (*it).get();
|
172 |
+
if (!ivalue.isNone()) {
|
173 |
+
const auto& tensor = ivalue.toTensor();
|
174 |
+
return (tensor.defined()) ? tensor : at::OptionalTensorRef{};
|
175 |
+
}
|
176 |
+
return {};
|
177 |
+
}
|
178 |
+
};
|
179 |
+
|
180 |
+
template <>
|
181 |
+
class IListRefTagImpl<IListRefTag::Materialized, at::OptionalTensorRef>
|
182 |
+
: public IListRefTagImplBase<
|
183 |
+
IListRefTag::Materialized,
|
184 |
+
at::OptionalTensorRef,
|
185 |
+
MaterializedIListRefElem<at::OptionalTensorRef>> {};
|
186 |
+
|
187 |
+
} // namespace detail
|
188 |
+
} // namespace c10
|
189 |
+
|
190 |
+
namespace at {
|
191 |
+
|
192 |
+
// [Note: ITensorListRef]
|
193 |
+
using ITensorListRef = c10::IListRef<at::Tensor>;
|
194 |
+
using ITensorListRefIterator = c10::IListRefIterator<at::Tensor>;
|
195 |
+
using MaterializedITensorListRef = c10::detail::MaterializedIListRef<at::Tensor>;
|
196 |
+
// [Note: IOptTensorListRef]
|
197 |
+
using IOptTensorListRef = c10::IListRef<at::OptionalTensorRef>;
|
198 |
+
using IOptTensorListRefIterator = c10::IListRefIterator<at::OptionalTensorRef>;
|
199 |
+
using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef<at::OptionalTensorRef>;
|
200 |
+
|
201 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// The legacy mechanism for dispatching operators in ATen is a Type
|
4 |
+
// object, which is essentially a giant virtual dispatch table
|
5 |
+
// for every operation we support dynamically dispatching over.
|
6 |
+
//
|
7 |
+
// This has been deprecated in favor of ATenDispatch, and in the future,
|
8 |
+
// c10 dispatcher.
|
9 |
+
// TODO: Clean up what remains here
|
10 |
+
|
11 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
|
15 |
+
// A RAII, thread local (!) guard that will disable dispatch to variable
|
16 |
+
// handler.
|
17 |
+
//
|
18 |
+
// NOTE [ Treating Variables as non-Variables in type dispatch ]
|
19 |
+
//
|
20 |
+
// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes
|
21 |
+
// dispatches on ATen functions to go to the non-variable implementation,
|
22 |
+
// bypassing autograd handling (and also profiling and tracing).
|
23 |
+
//
|
24 |
+
// To understand why this guard exists, it's helpful to understand the history
|
25 |
+
// behind how Variable was implemented. Previously, Variables were implemented
|
26 |
+
// as a wrapper on Tensors; so the act of processing a Variable involved
|
27 |
+
// unwrapping the underlying Tensor, and then calling the underlying base
|
28 |
+
// operation on /that/ operation
|
29 |
+
//
|
30 |
+
// However, after the Variable/Tensor merge, there is no concept of unwrapping
|
31 |
+
// a tensor anymore. If you just call the operation on the same variable
|
32 |
+
// again inside your VariableType handler, you'll dispatch back to
|
33 |
+
// VariableType, which is not what we want.
|
34 |
+
//
|
35 |
+
// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which
|
36 |
+
// when enabled will cause `legacyTensorType()` and `getType()` to always return
|
37 |
+
// non-Variable type, even if the tensor being called on is a variable.
|
38 |
+
|
39 |
+
/* Note [AutoDispatchBelowAutograd]
|
40 |
+
* AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used
|
41 |
+
* for kernel implementations and customized C++ kernels.
|
42 |
+
* If you are looking for a guard to run workload in inference mode, please use
|
43 |
+
* c10::InferenceMode RAII which is user facing API.
|
44 |
+
* In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode)
|
45 |
+
* was used in the user code for inference-only workload, this was under risk of
|
46 |
+
* producing wrong results silently in some edge cases. For example:
|
47 |
+
* ```
|
48 |
+
* torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true);
|
49 |
+
* torch::Tensor out = s * s;
|
50 |
+
* {
|
51 |
+
* at::AutoDispatchBelowAutograd guard;
|
52 |
+
* s.add_(1); // Skips version bump on `s`.
|
53 |
+
* }
|
54 |
+
* // WRONG GRADIENT! s.grad() are now computed using `s` value after the
|
55 |
+
* // inplace update.
|
56 |
+
* out.backward(torch::ones_like(out));
|
57 |
+
* ```
|
58 |
+
* Users should use `c10::InferenceMode` here so that it'll properly throw an
|
59 |
+
* error saying "one of the variables needed for gradient computation has be modified."
|
60 |
+
*/
|
61 |
+
struct TORCH_API AutoDispatchBelowAutograd {
|
62 |
+
AutoDispatchBelowAutograd() :
|
63 |
+
autograd_guard_(c10::autograd_dispatch_keyset) {
|
64 |
+
}
|
65 |
+
|
66 |
+
// disable all autograd dispatch keys
|
67 |
+
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
|
68 |
+
};
|
69 |
+
|
70 |
+
// TODO: AutoNonVariableTypeMode should be removed in release 1.10.
|
71 |
+
struct TORCH_API AutoNonVariableTypeMode {
|
72 |
+
AutoNonVariableTypeMode(bool enabled = true) :
|
73 |
+
autograd_guard_(c10::autograd_dispatch_keyset) {
|
74 |
+
TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. "
|
75 |
+
"For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, "
|
76 |
+
"If you are looking for a user facing API to enable running your inference-only "
|
77 |
+
"workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code "
|
78 |
+
"is under risk of producing silent wrong result in some edge cases. "
|
79 |
+
"See Note [AutoDispatchBelowAutograd] for more details.");
|
80 |
+
TORCH_INTERNAL_ASSERT(enabled);
|
81 |
+
}
|
82 |
+
|
83 |
+
// disable all autograd dispatch keys
|
84 |
+
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
|
85 |
+
};
|
86 |
+
|
87 |
+
struct TORCH_API AutoDispatchSkipFunctionalize {
|
88 |
+
AutoDispatchSkipFunctionalize() :
|
89 |
+
dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) {
|
90 |
+
}
|
91 |
+
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
|
92 |
+
};
|
93 |
+
|
94 |
+
/* Note [AutoDispatchBelowADInplaceOrView]
|
95 |
+
* AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode
|
96 |
+
* before we split inplace & view ops out of VariableType kernel.
|
97 |
+
* Note this guard is used in VariableType kernels for functional ops
|
98 |
+
* as well as ADInplaceOrView kernels for inplace/view ops to enforce the
|
99 |
+
* Invariant:
|
100 |
+
* Once you are in VariableType/ADInplaceOrView kernel for an op,
|
101 |
+
* you never go back to a kernel on same dispatch key until
|
102 |
+
* you finish the current op.
|
103 |
+
*/
|
104 |
+
struct TORCH_API AutoDispatchBelowADInplaceOrView {
|
105 |
+
AutoDispatchBelowADInplaceOrView() :
|
106 |
+
dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) {
|
107 |
+
}
|
108 |
+
// disable Autograd & ADInplaceOrView dispatch keys
|
109 |
+
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
|
110 |
+
};
|
111 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List.h
ADDED
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue_to.h>
|
4 |
+
#include <ATen/core/jit_type_base.h>
|
5 |
+
#include <c10/macros/Macros.h>
|
6 |
+
#include <c10/macros/Export.h>
|
7 |
+
#include <c10/util/TypeTraits.h>
|
8 |
+
#include <c10/util/TypeList.h>
|
9 |
+
#include <c10/util/intrusive_ptr.h>
|
10 |
+
#include <c10/util/ArrayRef.h>
|
11 |
+
#include <c10/util/Optional.h>
|
12 |
+
#include <vector>
|
13 |
+
|
14 |
+
namespace at {
|
15 |
+
class Tensor;
|
16 |
+
}
|
17 |
+
namespace c10 {
|
18 |
+
struct IValue;
|
19 |
+
template<class T> class List;
|
20 |
+
struct Type;
|
21 |
+
|
22 |
+
namespace detail {
|
23 |
+
|
24 |
+
struct ListImpl final : public c10::intrusive_ptr_target {
|
25 |
+
using list_type = std::vector<IValue>;
|
26 |
+
|
27 |
+
explicit TORCH_API ListImpl(list_type list_, TypePtr elementType_);
|
28 |
+
|
29 |
+
list_type list;
|
30 |
+
|
31 |
+
TypePtr elementType;
|
32 |
+
|
33 |
+
intrusive_ptr<ListImpl> copy() const {
|
34 |
+
return make_intrusive<ListImpl>(list, elementType);
|
35 |
+
}
|
36 |
+
friend TORCH_API bool operator==(const ListImpl& lhs, const ListImpl& rhs);
|
37 |
+
};
|
38 |
+
}
|
39 |
+
|
40 |
+
namespace impl {
|
41 |
+
|
42 |
+
template<class T, class Iterator> class ListIterator;
|
43 |
+
|
44 |
+
template<class T, class Iterator> class ListElementReference;
|
45 |
+
|
46 |
+
template<class T, class Iterator>
|
47 |
+
void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs);
|
48 |
+
|
49 |
+
template<class T, class Iterator>
|
50 |
+
bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs);
|
51 |
+
|
52 |
+
template<class T, class Iterator>
|
53 |
+
bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs);
|
54 |
+
|
55 |
+
template<class T>
|
56 |
+
struct ListElementConstReferenceTraits {
|
57 |
+
// In the general case, we use IValue::to().
|
58 |
+
using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type;
|
59 |
+
};
|
60 |
+
|
61 |
+
// There is no to() overload for c10::optional<std::string>.
|
62 |
+
template<>
|
63 |
+
struct ListElementConstReferenceTraits<c10::optional<std::string>> {
|
64 |
+
using const_reference = c10::optional<std::reference_wrapper<const std::string>>;
|
65 |
+
};
|
66 |
+
|
67 |
+
template<class T, class Iterator>
|
68 |
+
class ListElementReference final {
|
69 |
+
public:
|
70 |
+
operator std::conditional_t<
|
71 |
+
std::is_reference<typename c10::detail::
|
72 |
+
ivalue_to_const_ref_overload_return<T>::type>::value,
|
73 |
+
const T&,
|
74 |
+
T>() const;
|
75 |
+
|
76 |
+
ListElementReference& operator=(T&& new_value) &&;
|
77 |
+
|
78 |
+
ListElementReference& operator=(const T& new_value) &&;
|
79 |
+
|
80 |
+
// assigning another ref to this assigns the underlying value
|
81 |
+
ListElementReference& operator=(ListElementReference&& rhs) && noexcept;
|
82 |
+
|
83 |
+
const IValue& get() const& {
|
84 |
+
return *iterator_;
|
85 |
+
}
|
86 |
+
|
87 |
+
friend void swap<T, Iterator>(ListElementReference&& lhs, ListElementReference&& rhs);
|
88 |
+
|
89 |
+
ListElementReference(const ListElementReference&) = delete;
|
90 |
+
ListElementReference& operator=(const ListElementReference&) = delete;
|
91 |
+
|
92 |
+
private:
|
93 |
+
ListElementReference(Iterator iter)
|
94 |
+
: iterator_(iter) {}
|
95 |
+
|
96 |
+
// allow moving, but only our friends (i.e. the List class) can move us
|
97 |
+
ListElementReference(ListElementReference&&) noexcept = default;
|
98 |
+
ListElementReference& operator=(ListElementReference&& rhs) & noexcept {
|
99 |
+
iterator_ = std::move(rhs.iterator_);
|
100 |
+
return *this;
|
101 |
+
}
|
102 |
+
|
103 |
+
friend class List<T>;
|
104 |
+
friend class ListIterator<T, Iterator>;
|
105 |
+
|
106 |
+
Iterator iterator_;
|
107 |
+
};
|
108 |
+
|
109 |
+
// this wraps vector::iterator to make sure user code can't rely
|
110 |
+
// on it being the type of the underlying vector.
|
111 |
+
template <class T, class Iterator>
|
112 |
+
class ListIterator final {
|
113 |
+
public:
|
114 |
+
// C++17 friendly std::iterator implementation
|
115 |
+
using iterator_category = std::random_access_iterator_tag;
|
116 |
+
using value_type = T;
|
117 |
+
using difference_type = std::ptrdiff_t;
|
118 |
+
using pointer = T*;
|
119 |
+
using reference = ListElementReference<T, Iterator>;
|
120 |
+
|
121 |
+
explicit ListIterator() = default;
|
122 |
+
~ListIterator() = default;
|
123 |
+
|
124 |
+
ListIterator(const ListIterator&) = default;
|
125 |
+
ListIterator(ListIterator&&) noexcept = default;
|
126 |
+
ListIterator& operator=(const ListIterator&) = default;
|
127 |
+
ListIterator& operator=(ListIterator&&) noexcept = default;
|
128 |
+
|
129 |
+
ListIterator& operator++() {
|
130 |
+
++iterator_;
|
131 |
+
return *this;
|
132 |
+
}
|
133 |
+
|
134 |
+
ListIterator operator++(int) {
|
135 |
+
ListIterator copy(*this);
|
136 |
+
++*this;
|
137 |
+
return copy;
|
138 |
+
}
|
139 |
+
|
140 |
+
ListIterator& operator--() {
|
141 |
+
--iterator_;
|
142 |
+
return *this;
|
143 |
+
}
|
144 |
+
|
145 |
+
ListIterator operator--(int) {
|
146 |
+
ListIterator copy(*this);
|
147 |
+
--*this;
|
148 |
+
return copy;
|
149 |
+
}
|
150 |
+
|
151 |
+
ListIterator& operator+=(typename List<T>::size_type offset) {
|
152 |
+
iterator_ += offset;
|
153 |
+
return *this;
|
154 |
+
}
|
155 |
+
|
156 |
+
ListIterator& operator-=(typename List<T>::size_type offset) {
|
157 |
+
iterator_ -= offset;
|
158 |
+
return *this;
|
159 |
+
}
|
160 |
+
|
161 |
+
ListIterator operator+(typename List<T>::size_type offset) const {
|
162 |
+
return ListIterator{iterator_ + offset};
|
163 |
+
}
|
164 |
+
|
165 |
+
ListIterator operator-(typename List<T>::size_type offset) const {
|
166 |
+
return ListIterator{iterator_ - offset};
|
167 |
+
}
|
168 |
+
|
169 |
+
friend difference_type operator-(const ListIterator& lhs, const ListIterator& rhs) {
|
170 |
+
return lhs.iterator_ - rhs.iterator_;
|
171 |
+
}
|
172 |
+
|
173 |
+
ListElementReference<T, Iterator> operator*() const {
|
174 |
+
return {iterator_};
|
175 |
+
}
|
176 |
+
|
177 |
+
ListElementReference<T, Iterator> operator[](typename List<T>::size_type offset) const {
|
178 |
+
return {iterator_ + offset};
|
179 |
+
}
|
180 |
+
|
181 |
+
private:
|
182 |
+
explicit ListIterator(Iterator iterator): iterator_(std::move(iterator)) {}
|
183 |
+
|
184 |
+
Iterator iterator_;
|
185 |
+
|
186 |
+
friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) {
|
187 |
+
return lhs.iterator_ == rhs.iterator_;
|
188 |
+
}
|
189 |
+
|
190 |
+
friend bool operator!=(const ListIterator& lhs, const ListIterator& rhs) {
|
191 |
+
return !(lhs == rhs);
|
192 |
+
}
|
193 |
+
|
194 |
+
friend bool operator<(const ListIterator& lhs, const ListIterator& rhs) {
|
195 |
+
return lhs.iterator_ < rhs.iterator_;
|
196 |
+
}
|
197 |
+
|
198 |
+
friend bool operator<=(const ListIterator& lhs, const ListIterator& rhs) {
|
199 |
+
return lhs.iterator_ <= rhs.iterator_;
|
200 |
+
}
|
201 |
+
|
202 |
+
friend bool operator>(const ListIterator& lhs, const ListIterator& rhs) {
|
203 |
+
return lhs.iterator_ > rhs.iterator_;
|
204 |
+
}
|
205 |
+
|
206 |
+
friend bool operator>=(const ListIterator& lhs, const ListIterator& rhs) {
|
207 |
+
return lhs.iterator_ >= rhs.iterator_;
|
208 |
+
}
|
209 |
+
|
210 |
+
friend class ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
|
211 |
+
friend class List<T>;
|
212 |
+
};
|
213 |
+
|
214 |
+
template<class T> List<T> toTypedList(List<IValue> list);
|
215 |
+
template<class T> List<IValue> toList(List<T>&& list);
|
216 |
+
template<class T> List<IValue> toList(const List<T>& list);
|
217 |
+
const IValue* ptr_to_first_element(const List<IValue>& list);
|
218 |
+
}
|
219 |
+
|
220 |
+
/**
|
221 |
+
* An object of this class stores a list of values of type T.
|
222 |
+
*
|
223 |
+
* This is a pointer type. After a copy, both Lists
|
224 |
+
* will share the same storage:
|
225 |
+
*
|
226 |
+
* > List<int> a;
|
227 |
+
* > List<int> b = a;
|
228 |
+
* > b.push_back("three");
|
229 |
+
* > ASSERT("three" == a.get(0));
|
230 |
+
*
|
231 |
+
* We use this class in the PyTorch kernel API instead of
|
232 |
+
* std::vector<T>, because that allows us to do optimizations
|
233 |
+
* and switch out the underlying list implementation without
|
234 |
+
* breaking backwards compatibility for the kernel API.
|
235 |
+
*/
|
236 |
+
template<class T>
|
237 |
+
class List final {
|
238 |
+
private:
|
239 |
+
// This is an intrusive_ptr because List is a pointer type.
|
240 |
+
// Invariant: This will never be a nullptr, there will always be a valid
|
241 |
+
// ListImpl.
|
242 |
+
c10::intrusive_ptr<c10::detail::ListImpl> impl_;
|
243 |
+
|
244 |
+
using internal_reference_type = impl::ListElementReference<T, typename c10::detail::ListImpl::list_type::iterator>;
|
245 |
+
using internal_const_reference_type = typename impl::ListElementConstReferenceTraits<T>::const_reference;
|
246 |
+
|
247 |
+
public:
|
248 |
+
using value_type = T;
|
249 |
+
using size_type = typename c10::detail::ListImpl::list_type::size_type;
|
250 |
+
using iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
|
251 |
+
using const_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
|
252 |
+
using reverse_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::reverse_iterator>;
|
253 |
+
|
254 |
+
/**
|
255 |
+
* Constructs an empty list.
|
256 |
+
*/
|
257 |
+
explicit List();
|
258 |
+
|
259 |
+
/**
|
260 |
+
* Constructs a list with some initial values.
|
261 |
+
* Example:
|
262 |
+
* List<int> a({2, 3, 4});
|
263 |
+
*/
|
264 |
+
List(std::initializer_list<T> initial_values);
|
265 |
+
explicit List(ArrayRef<T> initial_values);
|
266 |
+
|
267 |
+
/**
|
268 |
+
* Create a generic list with runtime type information.
|
269 |
+
* This only works for c10::impl::GenericList and is not part of the public API
|
270 |
+
* but only supposed to be used internally by PyTorch.
|
271 |
+
*/
|
272 |
+
explicit List(TypePtr elementType);
|
273 |
+
|
274 |
+
List(const List&) = default;
|
275 |
+
List& operator=(const List&) = default;
|
276 |
+
|
277 |
+
/**
|
278 |
+
* Create a new List pointing to a deep copy of the same data.
|
279 |
+
* The List returned is a new list with separate storage.
|
280 |
+
* Changes in it are not reflected in the original list or vice versa.
|
281 |
+
*/
|
282 |
+
List copy() const;
|
283 |
+
|
284 |
+
/**
|
285 |
+
* Returns the element at specified location pos, with bounds checking.
|
286 |
+
* If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
|
287 |
+
*/
|
288 |
+
value_type get(size_type pos) const;
|
289 |
+
|
290 |
+
/**
|
291 |
+
* Moves out the element at the specified location pos and returns it, with bounds checking.
|
292 |
+
* If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
|
293 |
+
* The list contains an invalid element at position pos afterwards. Any operations
|
294 |
+
* on it before re-setting it are invalid.
|
295 |
+
*/
|
296 |
+
value_type extract(size_type pos) const;
|
297 |
+
|
298 |
+
/**
|
299 |
+
* Returns a reference to the element at specified location pos, with bounds checking.
|
300 |
+
* If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
|
301 |
+
*
|
302 |
+
* You cannot store the reference, but you can read it and assign new values to it:
|
303 |
+
*
|
304 |
+
* List<int64_t> list = ...;
|
305 |
+
* list[2] = 5;
|
306 |
+
* int64_t v = list[1];
|
307 |
+
*/
|
308 |
+
internal_const_reference_type operator[](size_type pos) const;
|
309 |
+
|
310 |
+
internal_reference_type operator[](size_type pos);
|
311 |
+
|
312 |
+
/**
|
313 |
+
* Assigns a new value to the element at location pos.
|
314 |
+
*/
|
315 |
+
void set(size_type pos, const value_type& value) const;
|
316 |
+
|
317 |
+
/**
|
318 |
+
* Assigns a new value to the element at location pos.
|
319 |
+
*/
|
320 |
+
void set(size_type pos, value_type&& value) const;
|
321 |
+
|
322 |
+
/**
|
323 |
+
* Returns an iterator to the first element of the container.
|
324 |
+
* If the container is empty, the returned iterator will be equal to end().
|
325 |
+
*/
|
326 |
+
iterator begin() const;
|
327 |
+
|
328 |
+
/**
|
329 |
+
* Returns an iterator to the element following the last element of the container.
|
330 |
+
* This element acts as a placeholder; attempting to access it results in undefined behavior.
|
331 |
+
*/
|
332 |
+
iterator end() const;
|
333 |
+
|
334 |
+
/**
|
335 |
+
* Checks if the container has no elements.
|
336 |
+
*/
|
337 |
+
bool empty() const;
|
338 |
+
|
339 |
+
/**
|
340 |
+
* Returns the number of elements in the container
|
341 |
+
*/
|
342 |
+
size_type size() const;
|
343 |
+
|
344 |
+
/**
|
345 |
+
* Increase the capacity of the vector to a value that's greater or equal to new_cap.
|
346 |
+
*/
|
347 |
+
void reserve(size_type new_cap) const;
|
348 |
+
|
349 |
+
/**
|
350 |
+
* Erases all elements from the container. After this call, size() returns zero.
|
351 |
+
* Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated.
|
352 |
+
*/
|
353 |
+
void clear() const;
|
354 |
+
|
355 |
+
/**
|
356 |
+
* Inserts value before pos.
|
357 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
358 |
+
*/
|
359 |
+
iterator insert(iterator pos, const T& value) const;
|
360 |
+
|
361 |
+
/**
|
362 |
+
* Inserts value before pos.
|
363 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
364 |
+
*/
|
365 |
+
iterator insert(iterator pos, T&& value) const;
|
366 |
+
|
367 |
+
/**
|
368 |
+
* Inserts a new element into the container directly before pos.
|
369 |
+
* The new element is constructed with the given arguments.
|
370 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
371 |
+
*/
|
372 |
+
template<class... Args>
|
373 |
+
iterator emplace(iterator pos, Args&&... value) const;
|
374 |
+
|
375 |
+
/**
|
376 |
+
* Appends the given element value to the end of the container.
|
377 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
378 |
+
*/
|
379 |
+
void push_back(const T& value) const;
|
380 |
+
|
381 |
+
/**
|
382 |
+
* Appends the given element value to the end of the container.
|
383 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
384 |
+
*/
|
385 |
+
void push_back(T&& value) const;
|
386 |
+
|
387 |
+
/**
|
388 |
+
* Appends the given list to the end of the container. Uses at most one memory allocation.
|
389 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
390 |
+
*/
|
391 |
+
void append(List<T> lst) const;
|
392 |
+
|
393 |
+
/**
|
394 |
+
* Appends the given element value to the end of the container.
|
395 |
+
* The new element is constructed with the given arguments.
|
396 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
397 |
+
*/
|
398 |
+
template<class... Args>
|
399 |
+
void emplace_back(Args&&... args) const;
|
400 |
+
|
401 |
+
/**
|
402 |
+
* Removes the element at pos.
|
403 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
404 |
+
*/
|
405 |
+
iterator erase(iterator pos) const;
|
406 |
+
|
407 |
+
/**
|
408 |
+
* Removes the elements in the range [first, last).
|
409 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
410 |
+
*/
|
411 |
+
iterator erase(iterator first, iterator last) const;
|
412 |
+
|
413 |
+
/**
|
414 |
+
* Removes the last element of the container.
|
415 |
+
* Calling pop_back on an empty container is undefined.
|
416 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
417 |
+
*/
|
418 |
+
void pop_back() const;
|
419 |
+
|
420 |
+
/**
|
421 |
+
* Resizes the container to contain count elements.
|
422 |
+
* If the current size is less than count, additional default-inserted elements are appended.
|
423 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
424 |
+
*/
|
425 |
+
void resize(size_type count) const;
|
426 |
+
|
427 |
+
/**
|
428 |
+
* Resizes the container to contain count elements.
|
429 |
+
* If the current size is less than count, additional copies of value are appended.
|
430 |
+
* May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
|
431 |
+
*/
|
432 |
+
void resize(size_type count, const T& value) const;
|
433 |
+
|
434 |
+
/**
|
435 |
+
* Value equality comparison. This function implements Python-like semantics for
|
436 |
+
* equality: two lists with the same identity (e.g. same pointer) trivially
|
437 |
+
* compare equal, otherwise each element is compared for equality.
|
438 |
+
*/
|
439 |
+
template <class T_>
|
440 |
+
friend bool operator==(const List<T_>& lhs, const List<T_>& rhs);
|
441 |
+
|
442 |
+
template <class T_>
|
443 |
+
friend bool operator!=(const List<T_>& lhs, const List<T_>& rhs);
|
444 |
+
|
445 |
+
/**
|
446 |
+
* Identity comparison. Returns true if and only if `rhs` represents the same
|
447 |
+
* List object as `this`.
|
448 |
+
*/
|
449 |
+
bool is(const List<T>& rhs) const;
|
450 |
+
|
451 |
+
std::vector<T> vec() const;
|
452 |
+
|
453 |
+
/**
|
454 |
+
* Returns the number of Lists currently pointing to this same list.
|
455 |
+
* If this is the only instance pointing to this list, returns 1.
|
456 |
+
*/
|
457 |
+
// TODO Test use_count
|
458 |
+
size_t use_count() const;
|
459 |
+
|
460 |
+
TypePtr elementType() const;
|
461 |
+
|
462 |
+
// See [unsafe set type] for why this exists.
|
463 |
+
void unsafeSetElementType(TypePtr t);
|
464 |
+
|
465 |
+
private:
|
466 |
+
explicit List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements);
|
467 |
+
explicit List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements);
|
468 |
+
friend struct IValue;
|
469 |
+
template<class T_> friend List<T_> impl::toTypedList(List<IValue>);
|
470 |
+
template<class T_> friend List<IValue> impl::toList(List<T_>&&);
|
471 |
+
template<class T_> friend List<IValue> impl::toList(const List<T_>&);
|
472 |
+
friend const IValue* impl::ptr_to_first_element(const List<IValue>& list);
|
473 |
+
};
|
474 |
+
|
475 |
+
namespace impl {
|
476 |
+
// GenericList is how IValue stores lists. It is, however, not part of the
|
477 |
+
// public API. Kernels should use Lists with concrete types instead
|
478 |
+
// (maybe except for some internal prim ops).
|
479 |
+
using GenericList = List<IValue>;
|
480 |
+
|
481 |
+
const IValue* ptr_to_first_element(const GenericList& list);
|
482 |
+
|
483 |
+
}
|
484 |
+
}
|
485 |
+
|
486 |
+
namespace torch {
|
487 |
+
template<class T> using List = c10::List<T>;
|
488 |
+
}
|
489 |
+
|
490 |
+
#include <ATen/core/List_inl.h> // IWYU pragma: keep
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/jit_type_base.h>
|
4 |
+
#include <ATen/core/ivalue.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
template<class T> decltype(auto) getTypePtr();
|
9 |
+
std::string toString(const Type& type);
|
10 |
+
|
11 |
+
template<class T>
|
12 |
+
List<T>::List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements)
|
13 |
+
: impl_(std::move(elements)) {}
|
14 |
+
|
15 |
+
template<class T>
|
16 |
+
List<T>::List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements)
|
17 |
+
: impl_(elements) {}
|
18 |
+
|
19 |
+
template<class T>
|
20 |
+
List<T>::List()
|
21 |
+
: List(make_intrusive<c10::detail::ListImpl>(
|
22 |
+
typename c10::detail::ListImpl::list_type(),
|
23 |
+
getTypePtr<T>())) {
|
24 |
+
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
|
25 |
+
}
|
26 |
+
|
27 |
+
template<class T>
|
28 |
+
List<T>::List(ArrayRef<T> values)
|
29 |
+
: List(make_intrusive<c10::detail::ListImpl>(
|
30 |
+
typename c10::detail::ListImpl::list_type(),
|
31 |
+
getTypePtr<T>())) {
|
32 |
+
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
|
33 |
+
impl_->list.reserve(values.size());
|
34 |
+
for (const T& element : values) {
|
35 |
+
impl_->list.push_back(element);
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
template<class T>
|
40 |
+
List<T>::List(std::initializer_list<T> initial_values)
|
41 |
+
: List(ArrayRef<T>(initial_values)) {
|
42 |
+
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
|
43 |
+
}
|
44 |
+
|
45 |
+
template<class T>
|
46 |
+
List<T>::List(TypePtr elementType)
|
47 |
+
: List(make_intrusive<c10::detail::ListImpl>(
|
48 |
+
typename c10::detail::ListImpl::list_type(),
|
49 |
+
std::move(elementType))) {
|
50 |
+
static_assert(std::is_same<T, IValue>::value || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
|
51 |
+
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
|
52 |
+
}
|
53 |
+
|
54 |
+
namespace impl {
|
55 |
+
template<class T>
|
56 |
+
List<T> toTypedList(impl::GenericList list) {
|
57 |
+
// If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant
|
58 |
+
// because upcasting would allow people to add types into the new list that would break the old list.
|
59 |
+
// However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can
|
60 |
+
// allow upcasting. This can be a perf improvement since we can cast List<T> to List<optional<T>>
|
61 |
+
// without having to copy it. This is also used to provide backwards compatibility with some old models
|
62 |
+
// that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_
|
63 |
+
// as List<Tensor> before we changed that argument to be List<optional<Tensor>>. When deserializing, we
|
64 |
+
// have list.use_count() == 1 and can deserialize the List<Tensor> directly as List<optional<Tensor>>.
|
65 |
+
TORCH_CHECK(*list.impl_->elementType == *getTypePtr<T>()
|
66 |
+
|| (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr<T>()))
|
67 |
+
, "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr<T>()), ">. Types mismatch.");
|
68 |
+
return List<T>(std::move(list.impl_));
|
69 |
+
}
|
70 |
+
|
71 |
+
template<class T>
|
72 |
+
impl::GenericList toList(List<T>&& list) {
|
73 |
+
return GenericList(std::move(list.impl_));
|
74 |
+
}
|
75 |
+
template<class T>
|
76 |
+
impl::GenericList toList(const List<T>& list) {
|
77 |
+
return GenericList(list.impl_);
|
78 |
+
}
|
79 |
+
}
|
80 |
+
|
81 |
+
template<class T>
|
82 |
+
List<T> List<T>::copy() const {
|
83 |
+
return List<T>(impl_->copy());
|
84 |
+
}
|
85 |
+
|
86 |
+
namespace detail {
|
87 |
+
template<class T>
|
88 |
+
T list_element_to(T element) {
|
89 |
+
return element;
|
90 |
+
}
|
91 |
+
template<class T>
|
92 |
+
T list_element_to(const IValue& element) {
|
93 |
+
return element.template to<T>();
|
94 |
+
}
|
95 |
+
template<class T>
|
96 |
+
T list_element_to(IValue&& element) {
|
97 |
+
return std::move(element).template to<T>();
|
98 |
+
}
|
99 |
+
template<class T>
|
100 |
+
struct ListElementFrom {
|
101 |
+
static IValue from(const T& element) {
|
102 |
+
return element;
|
103 |
+
}
|
104 |
+
static IValue from(T&& element) {
|
105 |
+
return std::move(element);
|
106 |
+
}
|
107 |
+
};
|
108 |
+
template<>
|
109 |
+
struct ListElementFrom<IValue> {
|
110 |
+
static const IValue& from(const IValue& element) {
|
111 |
+
return element;
|
112 |
+
}
|
113 |
+
static IValue&& from(IValue&& element) {
|
114 |
+
return std::move(element);
|
115 |
+
}
|
116 |
+
};
|
117 |
+
}
|
118 |
+
|
119 |
+
namespace impl {
|
120 |
+
|
121 |
+
template <class T, class Iterator>
|
122 |
+
ListElementReference<T, Iterator>::operator std::conditional_t<
|
123 |
+
std::is_reference<typename c10::detail::ivalue_to_const_ref_overload_return<
|
124 |
+
T>::type>::value,
|
125 |
+
const T&,
|
126 |
+
T>() const {
|
127 |
+
return iterator_->template to<T>();
|
128 |
+
}
|
129 |
+
|
130 |
+
template<class T, class Iterator>
|
131 |
+
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(T&& new_value) && {
|
132 |
+
*iterator_ = c10::detail::ListElementFrom<T>::from(std::move(new_value));
|
133 |
+
return *this;
|
134 |
+
}
|
135 |
+
|
136 |
+
template<class T, class Iterator>
|
137 |
+
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(const T& new_value) && {
|
138 |
+
*iterator_ = c10::detail::ListElementFrom<T>::from(new_value);
|
139 |
+
return *this;
|
140 |
+
}
|
141 |
+
|
142 |
+
template<class T, class Iterator>
|
143 |
+
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && noexcept {
|
144 |
+
*iterator_ = *rhs.iterator_;
|
145 |
+
return *this;
|
146 |
+
}
|
147 |
+
|
148 |
+
template<class T, class Iterator>
|
149 |
+
void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs) {
|
150 |
+
std::swap(*lhs.iterator_, *rhs.iterator_);
|
151 |
+
}
|
152 |
+
|
153 |
+
template<class T, class Iterator>
|
154 |
+
bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs) {
|
155 |
+
const T& lhs_tmp = lhs;
|
156 |
+
return lhs_tmp == rhs;
|
157 |
+
}
|
158 |
+
|
159 |
+
template<class T, class Iterator>
|
160 |
+
inline bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs) {
|
161 |
+
return rhs == lhs;
|
162 |
+
}
|
163 |
+
|
164 |
+
template<class T>
|
165 |
+
inline typename ListElementConstReferenceTraits<T>::const_reference
|
166 |
+
list_element_to_const_ref(const IValue& element) {
|
167 |
+
return element.template to<T>();
|
168 |
+
}
|
169 |
+
|
170 |
+
template<>
|
171 |
+
inline typename ListElementConstReferenceTraits<c10::optional<std::string>>::const_reference
|
172 |
+
list_element_to_const_ref<c10::optional<std::string>>(const IValue& element) {
|
173 |
+
return element.toOptionalStringRef();
|
174 |
+
}
|
175 |
+
|
176 |
+
} // namespace impl
|
177 |
+
|
178 |
+
template<class T>
|
179 |
+
void List<T>::set(size_type pos, const value_type& value) const {
|
180 |
+
impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(value);
|
181 |
+
}
|
182 |
+
|
183 |
+
template<class T>
|
184 |
+
void List<T>::set(size_type pos, value_type&& value) const {
|
185 |
+
impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(std::move(value));
|
186 |
+
}
|
187 |
+
|
188 |
+
template<class T>
|
189 |
+
typename List<T>::value_type List<T>::get(size_type pos) const {
|
190 |
+
return c10::detail::list_element_to<T>(impl_->list.at(pos));
|
191 |
+
}
|
192 |
+
|
193 |
+
template<class T>
|
194 |
+
typename List<T>::internal_const_reference_type List<T>::operator[](size_type pos) const {
|
195 |
+
return c10::impl::list_element_to_const_ref<T>(impl_->list.at(pos));
|
196 |
+
}
|
197 |
+
|
198 |
+
template<class T>
|
199 |
+
typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
|
200 |
+
static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
|
201 |
+
return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
|
202 |
+
}
|
203 |
+
|
204 |
+
template<class T>
|
205 |
+
typename List<T>::value_type List<T>::extract(size_type pos) const {
|
206 |
+
auto& elem = impl_->list.at(pos);
|
207 |
+
auto result = c10::detail::list_element_to<T>(std::move(elem));
|
208 |
+
// Reset the list element to a T() instead of None to keep it correctly typed
|
209 |
+
elem = c10::detail::ListElementFrom<T>::from(T{});
|
210 |
+
return result;
|
211 |
+
}
|
212 |
+
|
213 |
+
template<class T>
|
214 |
+
typename List<T>::iterator List<T>::begin() const {
|
215 |
+
return iterator(impl_->list.begin());
|
216 |
+
}
|
217 |
+
|
218 |
+
template<class T>
|
219 |
+
typename List<T>::iterator List<T>::end() const {
|
220 |
+
return iterator(impl_->list.end());
|
221 |
+
}
|
222 |
+
|
223 |
+
template<class T>
|
224 |
+
bool List<T>::empty() const {
|
225 |
+
return impl_->list.empty();
|
226 |
+
}
|
227 |
+
|
228 |
+
template<class T>
|
229 |
+
typename List<T>::size_type List<T>::size() const {
|
230 |
+
return impl_->list.size();
|
231 |
+
}
|
232 |
+
|
233 |
+
template<class T>
|
234 |
+
void List<T>::reserve(size_type new_cap) const {
|
235 |
+
impl_->list.reserve(new_cap);
|
236 |
+
}
|
237 |
+
|
238 |
+
template<class T>
|
239 |
+
void List<T>::clear() const {
|
240 |
+
impl_->list.clear();
|
241 |
+
}
|
242 |
+
|
243 |
+
template<class T>
|
244 |
+
typename List<T>::iterator List<T>::insert(iterator pos, const T& value) const {
|
245 |
+
return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(value)) };
|
246 |
+
}
|
247 |
+
|
248 |
+
template<class T>
|
249 |
+
typename List<T>::iterator List<T>::insert(iterator pos, T&& value) const {
|
250 |
+
return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(std::move(value))) };
|
251 |
+
}
|
252 |
+
|
253 |
+
template<class T>
|
254 |
+
template<class... Args>
|
255 |
+
typename List<T>::iterator List<T>::emplace(iterator pos, Args&&... value) const {
|
256 |
+
// TODO Use list_element_from?
|
257 |
+
return iterator { impl_->list.emplace(pos.iterator_, std::forward<Args>(value)...) };
|
258 |
+
}
|
259 |
+
|
260 |
+
template<class T>
|
261 |
+
void List<T>::push_back(const T& value) const {
|
262 |
+
impl_->list.push_back(c10::detail::ListElementFrom<T>::from(value));
|
263 |
+
}
|
264 |
+
|
265 |
+
template<class T>
|
266 |
+
void List<T>::push_back(T&& value) const {
|
267 |
+
impl_->list.push_back(c10::detail::ListElementFrom<T>::from(std::move(value)));
|
268 |
+
}
|
269 |
+
|
270 |
+
template<class T>
|
271 |
+
void List<T>::append(List<T> b) const {
|
272 |
+
if (b.use_count() == 1) {
|
273 |
+
impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end()));
|
274 |
+
} else {
|
275 |
+
impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end());
|
276 |
+
}
|
277 |
+
}
|
278 |
+
|
279 |
+
template<class T>
|
280 |
+
template<class... Args>
|
281 |
+
void List<T>::emplace_back(Args&&... args) const {
|
282 |
+
// TODO Use list_element_from?
|
283 |
+
impl_->list.push_back(T(std::forward<Args>(args)...));
|
284 |
+
}
|
285 |
+
|
286 |
+
template<class T>
|
287 |
+
typename List<T>::iterator List<T>::erase(iterator pos) const {
|
288 |
+
return iterator { impl_->list.erase(pos.iterator_) };
|
289 |
+
}
|
290 |
+
|
291 |
+
template<class T>
|
292 |
+
typename List<T>::iterator List<T>::erase(iterator first, iterator last) const {
|
293 |
+
return iterator { impl_->list.erase(first.iterator_, last.iterator_) };
|
294 |
+
}
|
295 |
+
|
296 |
+
template<class T>
|
297 |
+
void List<T>::pop_back() const {
|
298 |
+
impl_->list.pop_back();
|
299 |
+
}
|
300 |
+
|
301 |
+
template<class T>
|
302 |
+
void List<T>::resize(size_type count) const {
|
303 |
+
impl_->list.resize(count, T{});
|
304 |
+
}
|
305 |
+
|
306 |
+
template<class T>
|
307 |
+
void List<T>::resize(size_type count, const T& value) const {
|
308 |
+
impl_->list.resize(count, value);
|
309 |
+
}
|
310 |
+
|
311 |
+
template<class T>
|
312 |
+
bool operator==(const List<T>& lhs, const List<T>& rhs) {
|
313 |
+
// Lists with the same identity trivially compare equal.
|
314 |
+
if (lhs.impl_ == rhs.impl_) {
|
315 |
+
return true;
|
316 |
+
}
|
317 |
+
|
318 |
+
// Otherwise, just compare values directly.
|
319 |
+
return *lhs.impl_ == *rhs.impl_;
|
320 |
+
}
|
321 |
+
|
322 |
+
template<class T>
|
323 |
+
bool operator!=(const List<T>& lhs, const List<T>& rhs) {
|
324 |
+
return !(lhs == rhs);
|
325 |
+
}
|
326 |
+
|
327 |
+
template<class T>
|
328 |
+
bool List<T>::is(const List<T>& rhs) const {
|
329 |
+
return this->impl_ == rhs.impl_;
|
330 |
+
}
|
331 |
+
|
332 |
+
template<class T>
|
333 |
+
std::vector<T> List<T>::vec() const {
|
334 |
+
std::vector<T> result(begin(), end());
|
335 |
+
return result;
|
336 |
+
}
|
337 |
+
|
338 |
+
template<class T>
|
339 |
+
size_t List<T>::use_count() const {
|
340 |
+
return impl_.use_count();
|
341 |
+
}
|
342 |
+
|
343 |
+
template <class T>
|
344 |
+
TypePtr List<T>::elementType() const {
|
345 |
+
return impl_->elementType;
|
346 |
+
}
|
347 |
+
|
348 |
+
template <class T>
|
349 |
+
void List<T>::unsafeSetElementType(TypePtr t) {
|
350 |
+
impl_->elementType = std::move(t);
|
351 |
+
}
|
352 |
+
|
353 |
+
namespace impl {
|
354 |
+
|
355 |
+
inline const IValue* ptr_to_first_element(const GenericList& list) {
|
356 |
+
return &list.impl_->list[0];
|
357 |
+
}
|
358 |
+
|
359 |
+
}
|
360 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
|
5 |
+
// define constants like M_PI and C keywords for MSVC
|
6 |
+
#ifdef _MSC_VER
|
7 |
+
#ifndef _USE_MATH_DEFINES
|
8 |
+
#define _USE_MATH_DEFINES
|
9 |
+
#endif
|
10 |
+
#include <math.h>
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#include <array>
|
14 |
+
#include <cmath>
|
15 |
+
#include <cstdint>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
|
19 |
+
constexpr int MERSENNE_STATE_N = 624;
|
20 |
+
constexpr int MERSENNE_STATE_M = 397;
|
21 |
+
constexpr uint32_t MATRIX_A = 0x9908b0df;
|
22 |
+
constexpr uint32_t UMASK = 0x80000000;
|
23 |
+
constexpr uint32_t LMASK = 0x7fffffff;
|
24 |
+
|
25 |
+
/**
|
26 |
+
* Note [Mt19937 Engine implementation]
|
27 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
28 |
+
* Originally implemented in:
|
29 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c
|
30 |
+
* and modified with C++ constructs. Moreover the state array of the engine
|
31 |
+
* has been modified to hold 32 bit uints instead of 64 bits.
|
32 |
+
*
|
33 |
+
* Note that we reimplemented mt19937 instead of using std::mt19937 because,
|
34 |
+
* at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2
|
35 |
+
* by default and following are the benchmark numbers (benchmark code can be found at
|
36 |
+
* https://github.com/syed-ahmed/benchmark-rngs):
|
37 |
+
*
|
38 |
+
* with -O2
|
39 |
+
* Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s
|
40 |
+
* Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s
|
41 |
+
* Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s
|
42 |
+
* Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s
|
43 |
+
*
|
44 |
+
* std::mt19937 is faster when used in conjunction with std::uniform_real_distribution,
|
45 |
+
* however we can't use std::uniform_real_distribution because of this bug:
|
46 |
+
* http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used
|
47 |
+
* std::uniform_real_distribution and filtered out the 1's, it is a different algorithm
|
48 |
+
* than what's in pytorch currently and that messes up the tests in tests_distributions.py.
|
49 |
+
* The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower
|
50 |
+
* than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter.
|
51 |
+
*
|
52 |
+
* Copyright notice:
|
53 |
+
* A C-program for MT19937, with initialization improved 2002/2/10.
|
54 |
+
* Coded by Takuji Nishimura and Makoto Matsumoto.
|
55 |
+
* This is a faster version by taking Shawn Cokus's optimization,
|
56 |
+
* Matthe Bellew's simplification, Isaku Wada's real version.
|
57 |
+
*
|
58 |
+
* Before using, initialize the state by using init_genrand(seed)
|
59 |
+
* or init_by_array(init_key, key_length).
|
60 |
+
*
|
61 |
+
* Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
|
62 |
+
* All rights reserved.
|
63 |
+
*
|
64 |
+
* Redistribution and use in source and binary forms, with or without
|
65 |
+
* modification, are permitted provided that the following conditions
|
66 |
+
* are met:
|
67 |
+
*
|
68 |
+
* 1. Redistributions of source code must retain the above copyright
|
69 |
+
* notice, this list of conditions and the following disclaimer.
|
70 |
+
*
|
71 |
+
* 2. Redistributions in binary form must reproduce the above copyright
|
72 |
+
* notice, this list of conditions and the following disclaimer in the
|
73 |
+
* documentation and/or other materials provided with the distribution.
|
74 |
+
*
|
75 |
+
* 3. The names of its contributors may not be used to endorse or promote
|
76 |
+
* products derived from this software without specific prior written
|
77 |
+
* permission.
|
78 |
+
*
|
79 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
80 |
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
81 |
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
82 |
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
83 |
+
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
84 |
+
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
85 |
+
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
86 |
+
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
87 |
+
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
88 |
+
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
89 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
90 |
+
*
|
91 |
+
*
|
92 |
+
* Any feedback is very welcome.
|
93 |
+
* http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
|
94 |
+
* email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
|
95 |
+
*/
|
96 |
+
|
97 |
+
/**
|
98 |
+
* mt19937_data_pod is used to get POD data in and out
|
99 |
+
* of mt19937_engine. Used in torch.get_rng_state and
|
100 |
+
* torch.set_rng_state functions.
|
101 |
+
*/
|
102 |
+
struct mt19937_data_pod {
|
103 |
+
uint64_t seed_;
|
104 |
+
int left_;
|
105 |
+
bool seeded_;
|
106 |
+
uint32_t next_;
|
107 |
+
std::array<uint32_t, MERSENNE_STATE_N> state_;
|
108 |
+
};
|
109 |
+
|
110 |
+
class mt19937_engine {
|
111 |
+
public:
|
112 |
+
|
113 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
114 |
+
inline explicit mt19937_engine(uint64_t seed = 5489) {
|
115 |
+
init_with_uint32(seed);
|
116 |
+
}
|
117 |
+
|
118 |
+
inline mt19937_data_pod data() const {
|
119 |
+
return data_;
|
120 |
+
}
|
121 |
+
|
122 |
+
inline void set_data(const mt19937_data_pod& data) {
|
123 |
+
data_ = data;
|
124 |
+
}
|
125 |
+
|
126 |
+
inline uint64_t seed() const {
|
127 |
+
return data_.seed_;
|
128 |
+
}
|
129 |
+
|
130 |
+
inline bool is_valid() {
|
131 |
+
if ((data_.seeded_ == true)
|
132 |
+
&& (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N)
|
133 |
+
&& (data_.next_ <= MERSENNE_STATE_N)) {
|
134 |
+
return true;
|
135 |
+
}
|
136 |
+
return false;
|
137 |
+
}
|
138 |
+
|
139 |
+
inline uint32_t operator()() {
|
140 |
+
if (--(data_.left_) == 0) {
|
141 |
+
next_state();
|
142 |
+
}
|
143 |
+
uint32_t y = *(data_.state_.data() + data_.next_++);
|
144 |
+
y ^= (y >> 11);
|
145 |
+
y ^= (y << 7) & 0x9d2c5680;
|
146 |
+
y ^= (y << 15) & 0xefc60000;
|
147 |
+
y ^= (y >> 18);
|
148 |
+
|
149 |
+
return y;
|
150 |
+
}
|
151 |
+
|
152 |
+
private:
|
153 |
+
mt19937_data_pod data_;
|
154 |
+
|
155 |
+
inline void init_with_uint32(uint64_t seed) {
|
156 |
+
data_.seed_ = seed;
|
157 |
+
data_.seeded_ = true;
|
158 |
+
data_.state_[0] = seed & 0xffffffff;
|
159 |
+
for (const auto j : c10::irange(1, MERSENNE_STATE_N)) {
|
160 |
+
data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j);
|
161 |
+
}
|
162 |
+
data_.left_ = 1;
|
163 |
+
data_.next_ = 0;
|
164 |
+
}
|
165 |
+
|
166 |
+
inline uint32_t mix_bits(uint32_t u, uint32_t v) {
|
167 |
+
return (u & UMASK) | (v & LMASK);
|
168 |
+
}
|
169 |
+
|
170 |
+
inline uint32_t twist(uint32_t u, uint32_t v) {
|
171 |
+
return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0);
|
172 |
+
}
|
173 |
+
|
174 |
+
inline void next_state() {
|
175 |
+
uint32_t* p = data_.state_.data();
|
176 |
+
data_.left_ = MERSENNE_STATE_N;
|
177 |
+
data_.next_ = 0;
|
178 |
+
|
179 |
+
for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) {
|
180 |
+
*p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]);
|
181 |
+
}
|
182 |
+
|
183 |
+
for(int j = MERSENNE_STATE_M; --j; p++) {
|
184 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]);
|
185 |
+
}
|
186 |
+
|
187 |
+
*p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]);
|
188 |
+
}
|
189 |
+
|
190 |
+
};
|
191 |
+
|
192 |
+
typedef mt19937_engine mt19937;
|
193 |
+
|
194 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Dimname.h>
|
4 |
+
#include <c10/core/TensorImpl.h>
|
5 |
+
#include <c10/util/C++17.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
class TensorBase;
|
10 |
+
|
11 |
+
// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen.
|
12 |
+
// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname,
|
13 |
+
// so we have a couple of workarounds.
|
14 |
+
//
|
15 |
+
// In the long term, we'll move Dimname to c10 and everything in this file
|
16 |
+
// can be refactored out. The main blocker for that is that "c10::Symbol"
|
17 |
+
// actually exists outside of c10 and needs to be moved in.
|
18 |
+
|
19 |
+
// TensorImpl has a unique_ptr<NamedTensorMetaInterface> field.
|
20 |
+
// XXX: Ideally we would just put optional<vector<Dimname>> into TensorImpl.
|
21 |
+
//
|
22 |
+
// This class has an important invariant: there must be at least ONE
|
23 |
+
// non-wildcard
|
24 |
+
struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
25 |
+
// This enum is to remind people that the invariant on constructors is that
|
26 |
+
// the list of dimnames must have at least one non-wildcard
|
27 |
+
enum HAS_NON_WILDCARD {
|
28 |
+
HasNonWildcard
|
29 |
+
};
|
30 |
+
|
31 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
|
32 |
+
: names_(names.vec()) {
|
33 |
+
check_invariants();
|
34 |
+
}
|
35 |
+
explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
|
36 |
+
: names_(std::move(names)) {
|
37 |
+
check_invariants();
|
38 |
+
}
|
39 |
+
|
40 |
+
std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
|
41 |
+
return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_);
|
42 |
+
}
|
43 |
+
|
44 |
+
DimnameList names() const { return names_; }
|
45 |
+
|
46 |
+
// Used for an assertion in TensorImpl.h
|
47 |
+
int64_t slow_dim() const override {
|
48 |
+
return names_.size();
|
49 |
+
}
|
50 |
+
|
51 |
+
void check_invariants() const {
|
52 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
53 |
+
std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
|
54 |
+
}
|
55 |
+
|
56 |
+
void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
|
57 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
58 |
+
std::copy(new_names.begin(), new_names.end(), names_.begin());
|
59 |
+
check_invariants();
|
60 |
+
}
|
61 |
+
|
62 |
+
void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
|
63 |
+
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
64 |
+
names_ = std::move(new_names);
|
65 |
+
check_invariants();
|
66 |
+
}
|
67 |
+
|
68 |
+
// INVARIANT: at least one Dimname is non-WILDCARD
|
69 |
+
std::vector<Dimname> names_;
|
70 |
+
};
|
71 |
+
|
72 |
+
// When NamesMode is disabled, then all operations ignore tensors' names fields.
|
73 |
+
// Concretely speaking, all tensors are treated as having nullopt names.
|
74 |
+
struct TORCH_API NamesMode {
|
75 |
+
static bool is_enabled();
|
76 |
+
static void set_enabled(bool enabled);
|
77 |
+
};
|
78 |
+
|
79 |
+
|
80 |
+
// A RAII, thread local (!) guard that enables or disables names upon
|
81 |
+
// construction, and sets it back to the original value upon destruction.
|
82 |
+
struct TORCH_API NoNamesGuard {
|
83 |
+
NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) {
|
84 |
+
NamesMode::set_enabled(false);
|
85 |
+
}
|
86 |
+
~NoNamesGuard() {
|
87 |
+
if (initialized) {
|
88 |
+
reset();
|
89 |
+
}
|
90 |
+
}
|
91 |
+
void reset() {
|
92 |
+
TORCH_INTERNAL_ASSERT(initialized);
|
93 |
+
NamesMode::set_enabled(prev_mode);
|
94 |
+
}
|
95 |
+
private:
|
96 |
+
bool prev_mode;
|
97 |
+
bool initialized;
|
98 |
+
};
|
99 |
+
|
100 |
+
void check_names_valid_for(const TensorBase& tensor, DimnameList names);
|
101 |
+
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
102 |
+
|
103 |
+
// Sets the names of `tensor` to be `names`.
|
104 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names);
|
105 |
+
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
|
106 |
+
|
107 |
+
constexpr size_t kMaxNamedTensorDim = 64;
|
108 |
+
|
109 |
+
DimnameList default_names(size_t len);
|
110 |
+
|
111 |
+
namespace impl {
|
112 |
+
|
113 |
+
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
114 |
+
// XXX: Ideally these would exist as methods on TensorImpl
|
115 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
116 |
+
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
117 |
+
|
118 |
+
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
119 |
+
|
120 |
+
// Returns true if the tensor's names exist and are not all 'None'.
|
121 |
+
// Returns false if the tensor's names don't exist (were not allocated),
|
122 |
+
// or if all names are 'None'.
|
123 |
+
// We treat not-allocated-names the same as allocated names that are all 'None'.
|
124 |
+
TORCH_API bool has_names(const TensorImpl* impl);
|
125 |
+
|
126 |
+
// Returns the names of the tensor's dimensions.
|
127 |
+
// Unnamed tensors are treated as having 'None' in all dimension; this method
|
128 |
+
// would return a DimnameList of all 'None's for an unnamed tensor.
|
129 |
+
TORCH_API DimnameList get_names(const TensorImpl* impl);
|
130 |
+
|
131 |
+
// This is more of an implementation detail; one should use impl::get_names /
|
132 |
+
// Tensor::names() whenever possible because it provides a cleaner API.
|
133 |
+
// Returns the names of the tensor if they have been allocated; returns nullopt
|
134 |
+
// instead if the haven't been. The names of a tensor are not allocated if a
|
135 |
+
// tensor is constructed with names=None.
|
136 |
+
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
137 |
+
|
138 |
+
} // namespace impl
|
139 |
+
|
140 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// define constants like M_PI and C keywords for MSVC
|
4 |
+
#ifdef _MSC_VER
|
5 |
+
#define _USE_MATH_DEFINES
|
6 |
+
#include <math.h>
|
7 |
+
#endif
|
8 |
+
|
9 |
+
|
10 |
+
#ifdef __CUDACC__
|
11 |
+
#include <cuda.h>
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <ATen/core/Array.h>
|
15 |
+
#include <c10/macros/Macros.h>
|
16 |
+
#include <c10/util/Exception.h>
|
17 |
+
#include <c10/util/Half.h>
|
18 |
+
#include <cmath>
|
19 |
+
#include <cstdint>
|
20 |
+
|
21 |
+
namespace at {
|
22 |
+
|
23 |
+
// typedefs for holding vector data
|
24 |
+
namespace detail {
|
25 |
+
|
26 |
+
typedef at::detail::Array<uint32_t, 4> UINT4;
|
27 |
+
typedef at::detail::Array<uint32_t, 2> UINT2;
|
28 |
+
typedef at::detail::Array<double, 2> DOUBLE2;
|
29 |
+
typedef at::detail::Array<float, 2> FLOAT2;
|
30 |
+
|
31 |
+
} // namespace detail
|
32 |
+
|
33 |
+
/**
|
34 |
+
* Note [Philox Engine implementation]
|
35 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
36 |
+
* Originally implemented in PyTorch's fusion compiler
|
37 |
+
* Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
|
38 |
+
* for details regarding the engine.
|
39 |
+
*
|
40 |
+
* Note that currently this implementation of the philox engine is not used
|
41 |
+
* anywhere except for tests in cpu_generator_test.cpp. However, this engine
|
42 |
+
* will replace curandStatePhilox4_32_10_t in the future.
|
43 |
+
*
|
44 |
+
* The philox engine takes a seed value, a subsequeunce
|
45 |
+
* for starting the generation and an offset for the subsequence.
|
46 |
+
* Think of this engine as an algorithm producing a huge array. We are
|
47 |
+
* parallelizing this array by partitioning the huge array and assigning
|
48 |
+
* a thread index to each partition. In other words, each seed value
|
49 |
+
* (there are 2^64 possible seed values) gives a sub array of size
|
50 |
+
* 2^128 (each element in that array is a 128 bit number). Reasoning
|
51 |
+
* behind the array being of size 2^128 is, there are 2^64 possible
|
52 |
+
* thread index value and there is an array of size 2^64 for each of
|
53 |
+
* those thread index. Hence 2^64 * 2^64 = 2^128 for each seed value.
|
54 |
+
*
|
55 |
+
* In short, this generator can produce 2^64 (seed values) * 2^128 (number
|
56 |
+
* of elements in an array given by a seed value) = 2^192 values.
|
57 |
+
*
|
58 |
+
* Arguments:
|
59 |
+
* seed: Seed values could be any number from 0 to 2^64-1.
|
60 |
+
* subsequence: Subsequence is just the cuda thread indexing with:
|
61 |
+
* - blockIdx.x * blockDim.x + threadIdx.x
|
62 |
+
* offset: The offset variable in PhiloxEngine decides how many 128-bit
|
63 |
+
* random numbers to skip (i.e. how many groups of 4, 32-bit numbers to skip)
|
64 |
+
* and hence really decides the total number of randoms that can be achieved
|
65 |
+
* for the given subsequence.
|
66 |
+
*/
|
67 |
+
|
68 |
+
class philox_engine {
|
69 |
+
public:
|
70 |
+
|
71 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
72 |
+
C10_HOST_DEVICE inline explicit philox_engine(uint64_t seed = 67280421310721,
|
73 |
+
uint64_t subsequence = 0,
|
74 |
+
uint64_t offset = 0) {
|
75 |
+
|
76 |
+
reset_state(seed, subsequence);
|
77 |
+
incr_n(offset);
|
78 |
+
}
|
79 |
+
|
80 |
+
C10_HOST_DEVICE inline void reset_state(uint64_t seed = 67280421310721,
|
81 |
+
uint64_t subsequence = 0) {
|
82 |
+
key_[0] = static_cast<uint32_t>(seed);
|
83 |
+
key_[1] = static_cast<uint32_t>(seed >> 32);
|
84 |
+
counter_ = detail::UINT4(0);
|
85 |
+
counter_[2] = static_cast<uint32_t>(subsequence);
|
86 |
+
counter_[3] = static_cast<uint32_t>(subsequence >> 32);
|
87 |
+
STATE = 0;
|
88 |
+
}
|
89 |
+
|
90 |
+
/**
|
91 |
+
* Set the offset field of Philox Generator to the desired offset.
|
92 |
+
*/
|
93 |
+
C10_HOST_DEVICE inline void set_offset(uint64_t offset) {
|
94 |
+
counter_[0] = static_cast<uint32_t>(offset);
|
95 |
+
counter_[1] = static_cast<uint32_t>(offset >> 32);
|
96 |
+
}
|
97 |
+
|
98 |
+
/**
|
99 |
+
* Gets the current offset of the Philox Generator.
|
100 |
+
*/
|
101 |
+
C10_HOST_DEVICE uint64_t get_offset() const {
|
102 |
+
uint64_t lo = static_cast<uint64_t>(counter_[0]);
|
103 |
+
uint64_t hi = static_cast<uint64_t>(counter_[1]) << 32;
|
104 |
+
return lo | hi;
|
105 |
+
}
|
106 |
+
|
107 |
+
/**
|
108 |
+
* Produces a unique 32-bit pseudo random number on every invocation. Bookeeps state to avoid waste.
|
109 |
+
*/
|
110 |
+
C10_HOST_DEVICE inline uint32_t operator()(int32_t n_rounds = 10) { // 10 here to preserve back-compat behavior
|
111 |
+
if(STATE == 0) {
|
112 |
+
detail::UINT4 counter = counter_;
|
113 |
+
detail::UINT2 key = key_;
|
114 |
+
output_ = rand(counter, key, n_rounds);
|
115 |
+
incr();
|
116 |
+
}
|
117 |
+
uint32_t ret = output_[static_cast<int>(STATE)];
|
118 |
+
STATE = (STATE + 1) & 3;
|
119 |
+
return ret;
|
120 |
+
}
|
121 |
+
|
122 |
+
inline float randn(uint32_t n_rounds) {
|
123 |
+
#ifdef __CUDA_ARCH__
|
124 |
+
AT_ASSERT(false, "Unsupported invocation of randn on CUDA");
|
125 |
+
#endif
|
126 |
+
if(STATE == 0) {
|
127 |
+
detail::UINT4 counter = counter_;
|
128 |
+
detail::UINT2 key = key_;
|
129 |
+
output_ = rand(counter, key, n_rounds);
|
130 |
+
incr();
|
131 |
+
}
|
132 |
+
// TODO(min-jean-cho) change to Polar method, a more efficient version of Box-Muller method
|
133 |
+
// TODO(voz) We use std:: below, and thus need a separate impl for CUDA.
|
134 |
+
float u1 = 1 - uint32_to_uniform_float(output_[0]); // uint32_to_uniform_float returns [0,1), we need (0,1] to avoid passing 0 to log.
|
135 |
+
float u2 = 1 - uint32_to_uniform_float(output_[1]);
|
136 |
+
return static_cast<float>(std::sqrt(-2.0 * std::log(u1)) * std::cos(2.0 * M_PI * u2));
|
137 |
+
}
|
138 |
+
|
139 |
+
/**
|
140 |
+
* Function that Skips N 128 bit numbers in a subsequence
|
141 |
+
*/
|
142 |
+
C10_HOST_DEVICE inline void incr_n(uint64_t n) {
|
143 |
+
uint32_t nlo = static_cast<uint32_t>(n);
|
144 |
+
uint32_t nhi = static_cast<uint32_t>(n >> 32);
|
145 |
+
counter_[0] += nlo;
|
146 |
+
// if overflow in x has occurred, carry over to nhi
|
147 |
+
if (counter_[0] < nlo) {
|
148 |
+
nhi++;
|
149 |
+
// if overflow in nhi has occurred during carry over,
|
150 |
+
// propagate that overflow to y and exit to increment z
|
151 |
+
// otherwise return
|
152 |
+
counter_[1] += nhi;
|
153 |
+
if(nhi != 0) {
|
154 |
+
if (nhi <= counter_[1]) {
|
155 |
+
return;
|
156 |
+
}
|
157 |
+
}
|
158 |
+
} else {
|
159 |
+
// if overflow in y has occurred during addition,
|
160 |
+
// exit to increment z
|
161 |
+
// otherwise return
|
162 |
+
counter_[1] += nhi;
|
163 |
+
if (nhi <= counter_[1]) {
|
164 |
+
return;
|
165 |
+
}
|
166 |
+
}
|
167 |
+
if (++counter_[2])
|
168 |
+
return;
|
169 |
+
++counter_[3];
|
170 |
+
}
|
171 |
+
|
172 |
+
/**
|
173 |
+
* Function that Skips one 128 bit number in a subsequence
|
174 |
+
*/
|
175 |
+
C10_HOST_DEVICE inline void incr() {
|
176 |
+
if (++counter_[0])
|
177 |
+
return;
|
178 |
+
if (++counter_[1])
|
179 |
+
return;
|
180 |
+
if (++counter_[2]) {
|
181 |
+
return;
|
182 |
+
}
|
183 |
+
++counter_[3];
|
184 |
+
}
|
185 |
+
|
186 |
+
private:
|
187 |
+
detail::UINT4 counter_;
|
188 |
+
detail::UINT4 output_;
|
189 |
+
detail::UINT2 key_;
|
190 |
+
uint32_t STATE;
|
191 |
+
|
192 |
+
C10_HOST_DEVICE inline uint32_t mulhilo32(uint32_t a, uint32_t b,
|
193 |
+
uint32_t *result_high) {
|
194 |
+
#ifdef __CUDA_ARCH__
|
195 |
+
*result_high = __umulhi(a, b);
|
196 |
+
return a*b;
|
197 |
+
#else
|
198 |
+
const uint64_t product = static_cast<uint64_t>(a) * b;
|
199 |
+
*result_high = static_cast<uint32_t>(product >> 32);
|
200 |
+
return static_cast<uint32_t>(product);
|
201 |
+
#endif
|
202 |
+
}
|
203 |
+
|
204 |
+
C10_HOST_DEVICE inline detail::UINT4 single_round(detail::UINT4 ctr, detail::UINT2 in_key) {
|
205 |
+
uint32_t hi0 = 0;
|
206 |
+
uint32_t hi1 = 0;
|
207 |
+
uint32_t lo0 = mulhilo32(kPhiloxSA, ctr[0], &hi0);
|
208 |
+
uint32_t lo1 = mulhilo32(kPhiloxSB, ctr[2], &hi1);
|
209 |
+
detail::UINT4 ret;
|
210 |
+
ret[0] = hi1 ^ ctr[1] ^ in_key[0];
|
211 |
+
ret[1] = lo1;
|
212 |
+
ret[2] = hi0 ^ ctr[3] ^ in_key[1];
|
213 |
+
ret[3] = lo0;
|
214 |
+
return ret;
|
215 |
+
}
|
216 |
+
|
217 |
+
C10_HOST_DEVICE constexpr float uint32_to_uniform_float(uint32_t value) {
|
218 |
+
// maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
219 |
+
constexpr float scale = 4.6566127342e-10;
|
220 |
+
return static_cast<float>(value & 0x7FFFFFFF) * scale;
|
221 |
+
}
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
C10_HOST_DEVICE inline detail::UINT4 rand(detail::UINT4& counter, detail::UINT2& key, uint32_t n_rounds) {
|
226 |
+
for (uint32_t round = 0; round < (n_rounds - 1); round++) {
|
227 |
+
counter = single_round(counter, key);
|
228 |
+
key[0] += (kPhilox10A); key[1] += (kPhilox10B);
|
229 |
+
}
|
230 |
+
return single_round(counter, key);
|
231 |
+
}
|
232 |
+
|
233 |
+
|
234 |
+
static const uint32_t kPhilox10A = 0x9E3779B9;
|
235 |
+
static const uint32_t kPhilox10B = 0xBB67AE85;
|
236 |
+
static const uint32_t kPhiloxSA = 0xD2511F53;
|
237 |
+
static const uint32_t kPhiloxSB = 0xCD9E8D57;
|
238 |
+
};
|
239 |
+
|
240 |
+
typedef philox_engine Philox4_32;
|
241 |
+
|
242 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/TorchDispatchUtils.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
namespace impl {
|
6 |
+
|
7 |
+
struct TORCH_API RestorePythonTLSSnapshot {
|
8 |
+
RestorePythonTLSSnapshot();
|
9 |
+
~RestorePythonTLSSnapshot();
|
10 |
+
|
11 |
+
private:
|
12 |
+
c10::impl::LocalDispatchKeySet saved_;
|
13 |
+
c10::impl::ForceDispatchKeyGuard guard_;
|
14 |
+
};
|
15 |
+
|
16 |
+
|
17 |
+
// RAII guard to make working with the above TLS safer.
|
18 |
+
struct TORCH_API MaybeSetTLSOnEntryGuard {
|
19 |
+
public:
|
20 |
+
MaybeSetTLSOnEntryGuard();
|
21 |
+
~MaybeSetTLSOnEntryGuard();
|
22 |
+
|
23 |
+
private:
|
24 |
+
bool value_set_;
|
25 |
+
};
|
26 |
+
|
27 |
+
} // namespace impl
|
28 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
4 |
+
|
5 |
+
// TODO: this can probably live in c10
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
namespace impl {
|
9 |
+
|
10 |
+
class TORCH_API PythonOpRegistrationTrampoline final {
|
11 |
+
static std::atomic<c10::impl::PyInterpreter*> interpreter_;
|
12 |
+
|
13 |
+
public:
|
14 |
+
// Returns true if you successfully registered yourself (that means
|
15 |
+
// you are in the hot seat for doing the operator registrations!)
|
16 |
+
static bool registerInterpreter(c10::impl::PyInterpreter*);
|
17 |
+
|
18 |
+
// Returns nullptr if no interpreter has been registered yet.
|
19 |
+
static c10::impl::PyInterpreter* getInterpreter();
|
20 |
+
};
|
21 |
+
|
22 |
+
} // namespace impl
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/ScalarType.h>
|
4 |
+
#include <c10/core/QScheme.h>
|
5 |
+
#include <c10/util/intrusive_ptr.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
class Tensor;
|
10 |
+
struct QTensorImpl;
|
11 |
+
struct Quantizer;
|
12 |
+
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
13 |
+
using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
|
14 |
+
|
15 |
+
/**
|
16 |
+
* Quantizer is the class for storing all the information
|
17 |
+
* that's necessary to perform quantize and dequantize
|
18 |
+
* operation.
|
19 |
+
*
|
20 |
+
* We might have different types of quantization schemes and this is
|
21 |
+
* the base class for all quantizers.
|
22 |
+
*
|
23 |
+
* QTensorImpl will hold a pointer to Quantizer so that we can support
|
24 |
+
* different quantization schemes on Tensor.
|
25 |
+
*
|
26 |
+
* For example, the most common quantization scheme, Affine Quantization,
|
27 |
+
* requires scale and zero_point as parameters, we'll store scale and zero_point
|
28 |
+
* inside the instance and we can use it to quantize a float Tensor or
|
29 |
+
* dequantize a quantized Tensor.
|
30 |
+
*
|
31 |
+
* When you add new types of leaf Quantizer class, please also
|
32 |
+
* make sure to add a corresponding QScheme enum since
|
33 |
+
* they should have one to one mapping.
|
34 |
+
*
|
35 |
+
* Note about intrusive_ptr:
|
36 |
+
* Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can
|
37 |
+
* share the same Quantizer. Quantizer should be immutable.
|
38 |
+
*/
|
39 |
+
struct TORCH_API Quantizer : public c10::intrusive_ptr_target {
|
40 |
+
const ScalarType scalar_type_;
|
41 |
+
explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {}
|
42 |
+
~Quantizer() override;
|
43 |
+
|
44 |
+
// Copied from torch/csrc/jit/ir/scope.h
|
45 |
+
QuantizerPtr intrusive_from_this() {
|
46 |
+
c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
|
47 |
+
// from a raw `this` pointer
|
48 |
+
// so we need to bump the refcount
|
49 |
+
// to account for this ownership
|
50 |
+
return c10::intrusive_ptr<Quantizer>::reclaim(this);
|
51 |
+
}
|
52 |
+
|
53 |
+
/**
|
54 |
+
* Each concrete Quantizer type should have a unique QScheme type.
|
55 |
+
*/
|
56 |
+
virtual QScheme qscheme() const = 0;
|
57 |
+
|
58 |
+
ScalarType scalar_type() const {
|
59 |
+
return scalar_type_;
|
60 |
+
}
|
61 |
+
|
62 |
+
/**
|
63 |
+
* quantize a float Tensor into a quantized Tensor.
|
64 |
+
*/
|
65 |
+
virtual Tensor quantize(const Tensor& t) = 0;
|
66 |
+
|
67 |
+
/**
|
68 |
+
* dequantize a quantized Tensor into a float Tensor.
|
69 |
+
*/
|
70 |
+
virtual Tensor dequantize(const Tensor& t) = 0;
|
71 |
+
|
72 |
+
/**
|
73 |
+
* dequantize a quantized Tensor into a float Tensor, out= variant
|
74 |
+
*/
|
75 |
+
virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0;
|
76 |
+
|
77 |
+
/**
|
78 |
+
* Compare against `other` for equality.
|
79 |
+
*/
|
80 |
+
virtual bool equalTo(QuantizerPtr other) const = 0;
|
81 |
+
};
|
82 |
+
|
83 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Range.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <iosfwd>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
struct Range {
|
9 |
+
Range(int64_t begin, int64_t end)
|
10 |
+
: begin(begin)
|
11 |
+
, end(end) {}
|
12 |
+
|
13 |
+
int64_t size() const { return end - begin; }
|
14 |
+
|
15 |
+
Range operator/(int64_t divisor) {
|
16 |
+
return Range(begin / divisor, end / divisor);
|
17 |
+
}
|
18 |
+
|
19 |
+
int64_t begin;
|
20 |
+
int64_t end;
|
21 |
+
};
|
22 |
+
|
23 |
+
std::ostream& operator<<(std::ostream& out, const Range& range);
|
24 |
+
|
25 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
namespace at {
|
4 |
+
namespace Reduction {
|
5 |
+
|
6 |
+
// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py
|
7 |
+
// These constants control the reduction behavior of loss functions.
|
8 |
+
// Ideally, this would be a scoped enum, but jit doesn't support that
|
9 |
+
enum Reduction {
|
10 |
+
None, // Do not reduce
|
11 |
+
Mean, // (Possibly weighted) mean of losses
|
12 |
+
Sum, // Sum losses
|
13 |
+
END
|
14 |
+
};
|
15 |
+
} // namespace Reduction
|
16 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <c10/core/Scalar.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <c10/core/ScalarType.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/TensorBody.h>
|
4 |
+
#include <c10/util/Exception.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
class TORCH_API OptionalTensorRef {
|
8 |
+
public:
|
9 |
+
OptionalTensorRef() = default;
|
10 |
+
|
11 |
+
~OptionalTensorRef() {
|
12 |
+
ref_.unsafeReleaseTensorImpl();
|
13 |
+
}
|
14 |
+
|
15 |
+
OptionalTensorRef(const TensorBase& src)
|
16 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {
|
17 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined());
|
18 |
+
}
|
19 |
+
|
20 |
+
OptionalTensorRef(const OptionalTensorRef& rhs)
|
21 |
+
: ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {}
|
22 |
+
|
23 |
+
OptionalTensorRef& operator=(OptionalTensorRef rhs) {
|
24 |
+
std::swap(ref_, rhs.ref_);
|
25 |
+
return *this;
|
26 |
+
}
|
27 |
+
|
28 |
+
bool has_value() const {
|
29 |
+
return ref_.defined();
|
30 |
+
}
|
31 |
+
|
32 |
+
const Tensor& getTensorRef() const & {
|
33 |
+
return ref_;
|
34 |
+
}
|
35 |
+
|
36 |
+
const Tensor& operator*() const & {
|
37 |
+
return ref_;
|
38 |
+
}
|
39 |
+
|
40 |
+
const Tensor* operator->() const & {
|
41 |
+
return &ref_;
|
42 |
+
}
|
43 |
+
|
44 |
+
operator bool() const {
|
45 |
+
return ref_.defined();
|
46 |
+
}
|
47 |
+
|
48 |
+
private:
|
49 |
+
Tensor ref_;
|
50 |
+
};
|
51 |
+
|
52 |
+
// Use to convert a TensorBase (that may be undefined) to an at::Tensor
|
53 |
+
// without bumping refcount.
|
54 |
+
class TORCH_API TensorRef {
|
55 |
+
public:
|
56 |
+
~TensorRef() {
|
57 |
+
ref_.unsafeReleaseTensorImpl();
|
58 |
+
}
|
59 |
+
|
60 |
+
TensorRef(const TensorBase& src)
|
61 |
+
: ref_(Tensor::unsafe_borrow_t{}, src) {}
|
62 |
+
|
63 |
+
const Tensor& operator*() const & {
|
64 |
+
return ref_;
|
65 |
+
}
|
66 |
+
private:
|
67 |
+
Tensor ref_;
|
68 |
+
};
|
69 |
+
|
70 |
+
template <typename T>
|
71 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
|
72 |
+
// Return the grad argument in case of a hook with void return type to have an
|
73 |
+
// std::function with Tensor return type
|
74 |
+
static_assert(std::is_same<decltype(hook(Tensor())), void>::value,
|
75 |
+
"Expected hook to return void");
|
76 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
77 |
+
TensorRef grad(grad_base);
|
78 |
+
fn(*grad);
|
79 |
+
return Tensor();
|
80 |
+
});
|
81 |
+
}
|
82 |
+
|
83 |
+
template <typename T>
|
84 |
+
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> {
|
85 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
|
86 |
+
TensorRef grad(grad_base);
|
87 |
+
Tensor ret = fn(*grad);
|
88 |
+
return TensorBase(std::move(ret));
|
89 |
+
});
|
90 |
+
}
|
91 |
+
|
92 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
#include <c10/util/Deprecated.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
#include <c10/util/irange.h>
|
8 |
+
#include <cstddef>
|
9 |
+
#include <cstdint>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor
|
14 |
+
// is used to enable the __restrict__ keyword/modifier for the data
|
15 |
+
// passed to cuda.
|
16 |
+
template <typename T>
|
17 |
+
struct DefaultPtrTraits {
|
18 |
+
typedef T* PtrType;
|
19 |
+
};
|
20 |
+
|
21 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
22 |
+
template <typename T>
|
23 |
+
struct RestrictPtrTraits {
|
24 |
+
typedef T* __restrict__ PtrType;
|
25 |
+
};
|
26 |
+
#endif
|
27 |
+
|
28 |
+
// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors.
|
29 |
+
// For CUDA tensors it is used in device code (only). This means that we restrict ourselves
|
30 |
+
// to functions and types available there (e.g. IntArrayRef isn't).
|
31 |
+
|
32 |
+
// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers.
|
33 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
34 |
+
class TensorAccessorBase {
|
35 |
+
public:
|
36 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
37 |
+
|
38 |
+
C10_HOST_DEVICE TensorAccessorBase(
|
39 |
+
PtrType data_,
|
40 |
+
const index_t* sizes_,
|
41 |
+
const index_t* strides_)
|
42 |
+
: data_(data_), sizes_(sizes_), strides_(strides_) {}
|
43 |
+
C10_HOST IntArrayRef sizes() const {
|
44 |
+
return IntArrayRef(sizes_,N);
|
45 |
+
}
|
46 |
+
C10_HOST IntArrayRef strides() const {
|
47 |
+
return IntArrayRef(strides_,N);
|
48 |
+
}
|
49 |
+
C10_HOST_DEVICE index_t stride(index_t i) const {
|
50 |
+
return strides_[i];
|
51 |
+
}
|
52 |
+
C10_HOST_DEVICE index_t size(index_t i) const {
|
53 |
+
return sizes_[i];
|
54 |
+
}
|
55 |
+
C10_HOST_DEVICE PtrType data() {
|
56 |
+
return data_;
|
57 |
+
}
|
58 |
+
C10_HOST_DEVICE const PtrType data() const {
|
59 |
+
return data_;
|
60 |
+
}
|
61 |
+
protected:
|
62 |
+
PtrType data_;
|
63 |
+
const index_t* sizes_;
|
64 |
+
const index_t* strides_;
|
65 |
+
};
|
66 |
+
|
67 |
+
// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using
|
68 |
+
// `Tensor.accessor<T, N>()`.
|
69 |
+
// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only
|
70 |
+
// indexing on the device uses `TensorAccessor`s.
|
71 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
72 |
+
class TensorAccessor : public TensorAccessorBase<T,N,PtrTraits,index_t> {
|
73 |
+
public:
|
74 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
75 |
+
|
76 |
+
C10_HOST_DEVICE TensorAccessor(
|
77 |
+
PtrType data_,
|
78 |
+
const index_t* sizes_,
|
79 |
+
const index_t* strides_)
|
80 |
+
: TensorAccessorBase<T, N, PtrTraits, index_t>(data_,sizes_,strides_) {}
|
81 |
+
|
82 |
+
C10_HOST_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
|
83 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
|
84 |
+
}
|
85 |
+
|
86 |
+
C10_HOST_DEVICE const TensorAccessor<T, N-1, PtrTraits, index_t> operator[](index_t i) const {
|
87 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
|
88 |
+
}
|
89 |
+
};
|
90 |
+
|
91 |
+
template<typename T, template <typename U> class PtrTraits, typename index_t>
|
92 |
+
class TensorAccessor<T,1,PtrTraits,index_t> : public TensorAccessorBase<T,1,PtrTraits,index_t> {
|
93 |
+
public:
|
94 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
95 |
+
|
96 |
+
C10_HOST_DEVICE TensorAccessor(
|
97 |
+
PtrType data_,
|
98 |
+
const index_t* sizes_,
|
99 |
+
const index_t* strides_)
|
100 |
+
: TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {}
|
101 |
+
C10_HOST_DEVICE T & operator[](index_t i) {
|
102 |
+
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
103 |
+
return this->data_[this->strides_[0]*i];
|
104 |
+
}
|
105 |
+
C10_HOST_DEVICE const T & operator[](index_t i) const {
|
106 |
+
return this->data_[this->strides_[0]*i];
|
107 |
+
}
|
108 |
+
};
|
109 |
+
|
110 |
+
|
111 |
+
// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host
|
112 |
+
// and as
|
113 |
+
// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host)
|
114 |
+
// in order to transfer them on the device when calling kernels.
|
115 |
+
// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s.
|
116 |
+
// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__.
|
117 |
+
// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available
|
118 |
+
// on the device, so those functions are host only.
|
119 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
120 |
+
class GenericPackedTensorAccessorBase {
|
121 |
+
public:
|
122 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
123 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
124 |
+
C10_HOST GenericPackedTensorAccessorBase(
|
125 |
+
PtrType data_,
|
126 |
+
const index_t* sizes_,
|
127 |
+
const index_t* strides_)
|
128 |
+
: data_(data_) {
|
129 |
+
std::copy(sizes_, sizes_ + N, std::begin(this->sizes_));
|
130 |
+
std::copy(strides_, strides_ + N, std::begin(this->strides_));
|
131 |
+
}
|
132 |
+
|
133 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
134 |
+
template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
|
135 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
136 |
+
C10_HOST GenericPackedTensorAccessorBase(
|
137 |
+
PtrType data_,
|
138 |
+
const source_index_t* sizes_,
|
139 |
+
const source_index_t* strides_)
|
140 |
+
: data_(data_) {
|
141 |
+
for (const auto i : c10::irange(N)) {
|
142 |
+
this->sizes_[i] = sizes_[i];
|
143 |
+
this->strides_[i] = strides_[i];
|
144 |
+
}
|
145 |
+
}
|
146 |
+
|
147 |
+
C10_HOST_DEVICE index_t stride(index_t i) const {
|
148 |
+
return strides_[i];
|
149 |
+
}
|
150 |
+
C10_HOST_DEVICE index_t size(index_t i) const {
|
151 |
+
return sizes_[i];
|
152 |
+
}
|
153 |
+
C10_HOST_DEVICE PtrType data() {
|
154 |
+
return data_;
|
155 |
+
}
|
156 |
+
C10_HOST_DEVICE const PtrType data() const {
|
157 |
+
return data_;
|
158 |
+
}
|
159 |
+
protected:
|
160 |
+
PtrType data_;
|
161 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
162 |
+
index_t sizes_[N];
|
163 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
164 |
+
index_t strides_[N];
|
165 |
+
C10_HOST void bounds_check_(index_t i) const {
|
166 |
+
TORCH_CHECK_INDEX(
|
167 |
+
0 <= i && i < index_t{N},
|
168 |
+
"Index ",
|
169 |
+
i,
|
170 |
+
" is not within bounds of a tensor of dimension ",
|
171 |
+
N);
|
172 |
+
}
|
173 |
+
};
|
174 |
+
|
175 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
176 |
+
class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase<T,N,PtrTraits,index_t> {
|
177 |
+
public:
|
178 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
179 |
+
|
180 |
+
C10_HOST GenericPackedTensorAccessor(
|
181 |
+
PtrType data_,
|
182 |
+
const index_t* sizes_,
|
183 |
+
const index_t* strides_)
|
184 |
+
: GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
185 |
+
|
186 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
187 |
+
template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
|
188 |
+
C10_HOST GenericPackedTensorAccessor(
|
189 |
+
PtrType data_,
|
190 |
+
const source_index_t* sizes_,
|
191 |
+
const source_index_t* strides_)
|
192 |
+
: GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
193 |
+
|
194 |
+
C10_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
|
195 |
+
index_t* new_sizes = this->sizes_ + 1;
|
196 |
+
index_t* new_strides = this->strides_ + 1;
|
197 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
|
198 |
+
}
|
199 |
+
|
200 |
+
C10_DEVICE const TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) const {
|
201 |
+
const index_t* new_sizes = this->sizes_ + 1;
|
202 |
+
const index_t* new_strides = this->strides_ + 1;
|
203 |
+
return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
|
204 |
+
}
|
205 |
+
|
206 |
+
/// Returns a PackedTensorAccessor of the same dimension after transposing the
|
207 |
+
/// two dimensions given. Does not actually move elements; transposition is
|
208 |
+
/// made by permuting the size/stride arrays. If the dimensions are not valid,
|
209 |
+
/// asserts.
|
210 |
+
C10_HOST GenericPackedTensorAccessor<T, N, PtrTraits, index_t> transpose(
|
211 |
+
index_t dim1,
|
212 |
+
index_t dim2) const {
|
213 |
+
this->bounds_check_(dim1);
|
214 |
+
this->bounds_check_(dim2);
|
215 |
+
GenericPackedTensorAccessor<T, N, PtrTraits, index_t> result(
|
216 |
+
this->data_, this->sizes_, this->strides_);
|
217 |
+
std::swap(result.strides_[dim1], result.strides_[dim2]);
|
218 |
+
std::swap(result.sizes_[dim1], result.sizes_[dim2]);
|
219 |
+
return result;
|
220 |
+
}
|
221 |
+
};
|
222 |
+
|
223 |
+
template<typename T, template <typename U> class PtrTraits, typename index_t>
|
224 |
+
class GenericPackedTensorAccessor<T,1,PtrTraits,index_t> : public GenericPackedTensorAccessorBase<T,1,PtrTraits,index_t> {
|
225 |
+
public:
|
226 |
+
typedef typename PtrTraits<T>::PtrType PtrType;
|
227 |
+
C10_HOST GenericPackedTensorAccessor(
|
228 |
+
PtrType data_,
|
229 |
+
const index_t* sizes_,
|
230 |
+
const index_t* strides_)
|
231 |
+
: GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
232 |
+
|
233 |
+
// if index_t is not int64_t, we want to have an int64_t constructor
|
234 |
+
template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
|
235 |
+
C10_HOST GenericPackedTensorAccessor(
|
236 |
+
PtrType data_,
|
237 |
+
const source_index_t* sizes_,
|
238 |
+
const source_index_t* strides_)
|
239 |
+
: GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
|
240 |
+
|
241 |
+
C10_DEVICE T & operator[](index_t i) {
|
242 |
+
return this->data_[this->strides_[0] * i];
|
243 |
+
}
|
244 |
+
C10_DEVICE const T& operator[](index_t i) const {
|
245 |
+
return this->data_[this->strides_[0]*i];
|
246 |
+
}
|
247 |
+
|
248 |
+
// Same as in the general N-dimensional case, but note that in the
|
249 |
+
// 1-dimensional case the returned PackedTensorAccessor will always be an
|
250 |
+
// identical copy of the original
|
251 |
+
C10_HOST GenericPackedTensorAccessor<T, 1, PtrTraits, index_t> transpose(
|
252 |
+
index_t dim1,
|
253 |
+
index_t dim2) const {
|
254 |
+
this->bounds_check_(dim1);
|
255 |
+
this->bounds_check_(dim2);
|
256 |
+
return GenericPackedTensorAccessor<T, 1, PtrTraits, index_t>(
|
257 |
+
this->data_, this->sizes_, this->strides_);
|
258 |
+
}
|
259 |
+
};
|
260 |
+
|
261 |
+
|
262 |
+
// Can't put this directly into the macro function args because of commas
|
263 |
+
#define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t>
|
264 |
+
|
265 |
+
// Old name for `GenericPackedTensorAccessor`
|
266 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
267 |
+
C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X)
|
268 |
+
|
269 |
+
#undef AT_X
|
270 |
+
|
271 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
272 |
+
using PackedTensorAccessor32 = GenericPackedTensorAccessor<T, N, PtrTraits, int32_t>;
|
273 |
+
|
274 |
+
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
275 |
+
using PackedTensorAccessor64 = GenericPackedTensorAccessor<T, N, PtrTraits, int64_t>;
|
276 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h
ADDED
@@ -0,0 +1,1039 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/Layout.h>
|
5 |
+
#include <c10/core/MemoryFormat.h>
|
6 |
+
#include <c10/core/ScalarType.h>
|
7 |
+
#include <c10/core/ScalarTypeToTypeMeta.h>
|
8 |
+
#include <c10/core/Storage.h>
|
9 |
+
#include <c10/core/SymIntArrayRef.h>
|
10 |
+
#include <c10/core/TensorImpl.h>
|
11 |
+
#include <c10/core/TensorOptions.h>
|
12 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
13 |
+
#include <c10/core/WrapDimMinimal.h>
|
14 |
+
#include <c10/util/Exception.h>
|
15 |
+
#include <c10/util/ExclusivelyOwned.h>
|
16 |
+
#include <c10/util/ExclusivelyOwnedTensorTraits.h>
|
17 |
+
#include <c10/util/MaybeOwned.h>
|
18 |
+
#include <c10/util/Optional.h>
|
19 |
+
#include <c10/util/intrusive_ptr.h>
|
20 |
+
|
21 |
+
#include <ATen/core/NamedTensor.h>
|
22 |
+
#include <ATen/core/QuantizerBase.h>
|
23 |
+
#include <ATen/core/TensorAccessor.h>
|
24 |
+
#include <ATen/StorageUtils.h>
|
25 |
+
|
26 |
+
namespace c10 {
|
27 |
+
class Scalar;
|
28 |
+
}
|
29 |
+
|
30 |
+
namespace torch { namespace autograd {
|
31 |
+
|
32 |
+
struct Node;
|
33 |
+
|
34 |
+
}} // namespace torch::autograd
|
35 |
+
|
36 |
+
namespace at {
|
37 |
+
|
38 |
+
class Tensor;
|
39 |
+
class TensorBase;
|
40 |
+
|
41 |
+
// Convert Tensor to TensorBase without any need to include Tensor.h
|
42 |
+
TORCH_API const TensorBase& get_tensor_base(const Tensor& t);
|
43 |
+
|
44 |
+
namespace impl {
|
45 |
+
inline bool variable_excluded_from_dispatch() {
|
46 |
+
#ifdef C10_MOBILE
|
47 |
+
// Please read the comment in `VariableFallbackKernel.cpp` about the background of this change.
|
48 |
+
return true;
|
49 |
+
#else
|
50 |
+
return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset);
|
51 |
+
#endif
|
52 |
+
}
|
53 |
+
|
54 |
+
}
|
55 |
+
|
56 |
+
// NOTE: [Tensor vs. TensorBase]
|
57 |
+
//
|
58 |
+
// Tensor, being the central data structure in PyTorch, gets used and
|
59 |
+
// it's header included almost everywhere. Unfortunately this means
|
60 |
+
// every time an operator signature is updated or changed in
|
61 |
+
// native_functions.yaml, you (and every other PyTorch developer) need
|
62 |
+
// to recompile all of ATen and it's dependencies.
|
63 |
+
//
|
64 |
+
// TensorBase aims to break up these header dependencies, and improve
|
65 |
+
// incremental build times for all PyTorch developers. TensorBase
|
66 |
+
// represents a reference counted handle to TensorImpl, exactly the
|
67 |
+
// same as Tensor. However, TensorBase doesn't have code generated
|
68 |
+
// methods in it's API and thus no dependence on native_functions.yaml.
|
69 |
+
//
|
70 |
+
// Usage tips
|
71 |
+
// ----------
|
72 |
+
// - You can `#define TORCH_ASSERT_NO_OPERATORS` at the top of a .cpp
|
73 |
+
// or .cu file to ensure it has no header dependencies on
|
74 |
+
// native_functions.yaml (direct or indirect).
|
75 |
+
// - Tensor inherits from TensorBase, so functions taking
|
76 |
+
// `const TensorBase &` are callable with Tensor as well.
|
77 |
+
// - TensorBase can be converted to tensor with `Tensor(tensor_base)`,
|
78 |
+
// but this requires a reference-count bump. OptionalTensorRef on
|
79 |
+
// the other hand can materialize a `const Tensor &` without
|
80 |
+
// touching the reference-count.
|
81 |
+
class TORCH_API TensorBase {
|
82 |
+
public:
|
83 |
+
struct unsafe_borrow_t { explicit unsafe_borrow_t() = default; };
|
84 |
+
|
85 |
+
protected:
|
86 |
+
// Create a Tensor with a +0 reference count. Special care must be
|
87 |
+
// taken to avoid decrementing this reference count at destruction
|
88 |
+
// time. Intended to support MaybeOwnedTraits<Tensor>.
|
89 |
+
explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs)
|
90 |
+
: impl_(c10::intrusive_ptr<at::TensorImpl, UndefinedTensorImpl>::reclaim(rhs.impl_.get())) {}
|
91 |
+
friend MaybeOwnedTraits<TensorBase>;
|
92 |
+
|
93 |
+
public:
|
94 |
+
TensorBase() = default;
|
95 |
+
// This constructor should not be used by end users and is an implementation
|
96 |
+
// detail invoked by autogenerated code.
|
97 |
+
explicit TensorBase(
|
98 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
|
99 |
+
: impl_(std::move(tensor_impl)) {
|
100 |
+
if (impl_.get() == nullptr) {
|
101 |
+
throw std::runtime_error("TensorImpl with nullptr is not supported");
|
102 |
+
}
|
103 |
+
}
|
104 |
+
TensorBase(const TensorBase&) = default;
|
105 |
+
TensorBase(TensorBase&&) noexcept = default;
|
106 |
+
|
107 |
+
public:
|
108 |
+
// Creates a new wrapper from TensorImpl. Intentionally a free method because
|
109 |
+
// it should be used with care. Checks necessary invariants
|
110 |
+
static TensorBase wrap_tensor_impl(
|
111 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
|
112 |
+
TensorBase r(std::move(tensor_impl));
|
113 |
+
r.enforce_invariants();
|
114 |
+
return r;
|
115 |
+
}
|
116 |
+
|
117 |
+
int64_t dim() const {
|
118 |
+
return impl_->dim();
|
119 |
+
}
|
120 |
+
int64_t storage_offset() const {
|
121 |
+
return impl_->storage_offset();
|
122 |
+
}
|
123 |
+
|
124 |
+
TensorBase contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
|
125 |
+
if (is_contiguous(memory_format)) {
|
126 |
+
return *this;
|
127 |
+
} else {
|
128 |
+
return __dispatch_contiguous(memory_format);
|
129 |
+
}
|
130 |
+
}
|
131 |
+
|
132 |
+
/// Should be used if *this can reasonably be expected to be contiguous and
|
133 |
+
/// performance is important.
|
134 |
+
/// Compared to contiguous, it saves a reference count
|
135 |
+
/// increment/decrement if *this is already contiguous, at the cost
|
136 |
+
/// in all cases of an extra pointer of stack usage, an extra branch
|
137 |
+
/// to access, and an extra branch at destruction time.
|
138 |
+
c10::MaybeOwned<TensorBase> expect_contiguous(
|
139 |
+
MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
|
140 |
+
|
141 |
+
// Use .contiguous() instead. Trying to borrow from a prvalue
|
142 |
+
// will only lead to trouble and dangling references.
|
143 |
+
c10::MaybeOwned<TensorBase> expect_contiguous(
|
144 |
+
MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
|
145 |
+
|
146 |
+
const TensorBase& fill_(const c10::Scalar& scalar) const;
|
147 |
+
const TensorBase& zero_() const;
|
148 |
+
|
149 |
+
TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
|
150 |
+
|
151 |
+
bool is_complex() const {
|
152 |
+
return at::isComplexType(this->scalar_type());
|
153 |
+
}
|
154 |
+
|
155 |
+
bool is_floating_point() const {
|
156 |
+
return at::isFloatingType(this->scalar_type());
|
157 |
+
}
|
158 |
+
|
159 |
+
bool is_signed() const {
|
160 |
+
return at::isSignedType(this->scalar_type());
|
161 |
+
}
|
162 |
+
|
163 |
+
c10::SymInt sym_size(int64_t dim) const {
|
164 |
+
return impl_->sym_size(dim);
|
165 |
+
}
|
166 |
+
|
167 |
+
c10::SymInt sym_stride(int64_t dim) const {
|
168 |
+
const auto sizes = this->sym_strides();
|
169 |
+
const auto ndim = static_cast<int64_t>(sizes.size());
|
170 |
+
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
|
171 |
+
return sizes[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
|
172 |
+
|
173 |
+
}
|
174 |
+
|
175 |
+
int64_t size(int64_t dim) const {
|
176 |
+
return impl_->size(dim);
|
177 |
+
}
|
178 |
+
|
179 |
+
int64_t stride(int64_t dim) const {
|
180 |
+
const auto strides = this->strides();
|
181 |
+
const auto ndim = static_cast<int64_t>(strides.size());
|
182 |
+
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
|
183 |
+
return strides[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
|
184 |
+
}
|
185 |
+
|
186 |
+
TensorImpl * unsafeGetTensorImpl() const {
|
187 |
+
return impl_.get();
|
188 |
+
}
|
189 |
+
TensorImpl * unsafeReleaseTensorImpl() {
|
190 |
+
return impl_.release();
|
191 |
+
}
|
192 |
+
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
|
193 |
+
return impl_;
|
194 |
+
}
|
195 |
+
|
196 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> unsafeReleaseIntrusivePtr() {
|
197 |
+
return std::move(impl_);
|
198 |
+
}
|
199 |
+
|
200 |
+
bool defined() const {
|
201 |
+
return impl_;
|
202 |
+
}
|
203 |
+
|
204 |
+
void reset() {
|
205 |
+
impl_.reset();
|
206 |
+
}
|
207 |
+
|
208 |
+
#if defined (_MSC_VER)
|
209 |
+
TensorBase& operator=(const TensorBase& x) & {
|
210 |
+
impl_ = x.impl_;
|
211 |
+
return *this;
|
212 |
+
};
|
213 |
+
TensorBase& operator=(TensorBase&& x) & noexcept {
|
214 |
+
impl_ = std::move(x.impl_);
|
215 |
+
return *this;
|
216 |
+
}
|
217 |
+
#else
|
218 |
+
TensorBase& operator=(const TensorBase& x) & = default;
|
219 |
+
TensorBase& operator=(TensorBase&& x) & noexcept = default;
|
220 |
+
#endif
|
221 |
+
|
222 |
+
// Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here
|
223 |
+
TensorBase& operator=(const TensorBase&) && = delete;
|
224 |
+
TensorBase& operator=(TensorBase&&) && noexcept = delete;
|
225 |
+
|
226 |
+
bool is_same(const TensorBase& other) const noexcept {
|
227 |
+
return impl_ == other.impl_;
|
228 |
+
}
|
229 |
+
size_t use_count() const noexcept {
|
230 |
+
return impl_.use_count();
|
231 |
+
}
|
232 |
+
size_t weak_use_count() const noexcept {
|
233 |
+
return impl_.weak_use_count();
|
234 |
+
}
|
235 |
+
|
236 |
+
std::string toString() const;
|
237 |
+
|
238 |
+
IntArrayRef sizes() const {
|
239 |
+
return impl_->sizes();
|
240 |
+
}
|
241 |
+
c10::SymIntArrayRef sym_sizes() const {
|
242 |
+
return impl_->sym_sizes();
|
243 |
+
}
|
244 |
+
c10::SymIntArrayRef sym_strides() const {
|
245 |
+
return impl_->sym_strides();
|
246 |
+
}
|
247 |
+
IntArrayRef strides() const {
|
248 |
+
return impl_->strides();
|
249 |
+
}
|
250 |
+
// See impl::get_opt_names in ATen/NamedTensor.h for docs.
|
251 |
+
c10::optional<DimnameList> opt_names() const {
|
252 |
+
return impl::get_opt_names(unsafeGetTensorImpl());
|
253 |
+
}
|
254 |
+
// See impl::get_names in ATen/NamedTensor.h for docs.
|
255 |
+
DimnameList names() const {
|
256 |
+
return impl::get_names(unsafeGetTensorImpl());
|
257 |
+
}
|
258 |
+
int64_t ndimension() const {
|
259 |
+
return dim();
|
260 |
+
}
|
261 |
+
|
262 |
+
bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const {
|
263 |
+
return impl_->is_contiguous(memory_format);
|
264 |
+
}
|
265 |
+
|
266 |
+
bool is_non_overlapping_and_dense() const {
|
267 |
+
return impl_->is_non_overlapping_and_dense();
|
268 |
+
}
|
269 |
+
|
270 |
+
at::MemoryFormat suggest_memory_format(
|
271 |
+
bool channels_last_strides_exact_match = false) const {
|
272 |
+
// Setting channels_last_strides_exact_match to true forces function to
|
273 |
+
// check 0,1 - sized dimension strides.
|
274 |
+
if (layout() == at::kStrided) {
|
275 |
+
if (impl_->is_strides_like_channels_last()) {
|
276 |
+
if (!channels_last_strides_exact_match ||
|
277 |
+
get_channels_last_strides_2d(sizes()) == strides()) {
|
278 |
+
return at::MemoryFormat::ChannelsLast;
|
279 |
+
}
|
280 |
+
}
|
281 |
+
else if (impl_->is_strides_like_channels_last_3d()) {
|
282 |
+
if (!channels_last_strides_exact_match ||
|
283 |
+
get_channels_last_strides_3d(sizes()) == strides()) {
|
284 |
+
return at::MemoryFormat::ChannelsLast3d;
|
285 |
+
}
|
286 |
+
}
|
287 |
+
}
|
288 |
+
return at::MemoryFormat::Contiguous;
|
289 |
+
}
|
290 |
+
|
291 |
+
// Total bytes consumed by the "view" of elements of the array. Does not
|
292 |
+
// include size of metadata. The number reported here does not necessarily
|
293 |
+
// correspond to the true physical memory consumed by a tensor; instead,
|
294 |
+
// it reports the memory the tensor would take *if* it were contiguous.
|
295 |
+
// Defined to be numel() * itemsize()
|
296 |
+
size_t nbytes() const {
|
297 |
+
TORCH_CHECK(layout () != at::kSparse,
|
298 |
+
"nbytes is not defined for sparse tensors. If you want the size of the constituent " \
|
299 |
+
"tensors, add the nbytes of the indices and values. If you want the size of the " \
|
300 |
+
"equivalent dense tensor, multiply numel() by element_size()");
|
301 |
+
return impl_->numel() * impl_->itemsize();
|
302 |
+
}
|
303 |
+
|
304 |
+
c10::SymInt sym_nbytes() const {
|
305 |
+
TORCH_CHECK(layout () != at::kSparse,
|
306 |
+
"nbytes is not defined for sparse tensors. If you want the size of the constituent " \
|
307 |
+
"tensors, add the nbytes of the indices and values. If you want the size of the " \
|
308 |
+
"equivalent dense tensor, multiply numel() by element_size()");
|
309 |
+
return impl_->sym_numel() * impl_->itemsize();
|
310 |
+
}
|
311 |
+
|
312 |
+
int64_t numel() const {
|
313 |
+
return impl_->numel();
|
314 |
+
}
|
315 |
+
|
316 |
+
c10::SymInt sym_numel() const {
|
317 |
+
return impl_->sym_numel();
|
318 |
+
}
|
319 |
+
|
320 |
+
c10::SymInt sym_storage_offset() const {
|
321 |
+
return impl_->sym_storage_offset();
|
322 |
+
}
|
323 |
+
|
324 |
+
// Length of one array element in bytes. This is the traditional
|
325 |
+
// Numpy naming.
|
326 |
+
size_t itemsize() const {
|
327 |
+
return impl_->itemsize();
|
328 |
+
}
|
329 |
+
|
330 |
+
// Same as itemsize(). This is the PyTorch naming.
|
331 |
+
int64_t element_size() const {
|
332 |
+
return static_cast<int64_t>(impl_->itemsize());
|
333 |
+
}
|
334 |
+
|
335 |
+
DispatchKeySet key_set() const {
|
336 |
+
return impl_->key_set();
|
337 |
+
}
|
338 |
+
ScalarType scalar_type() const {
|
339 |
+
return typeMetaToScalarType(impl_->dtype());
|
340 |
+
}
|
341 |
+
bool has_storage() const {
|
342 |
+
return defined() && impl_->has_storage();
|
343 |
+
}
|
344 |
+
const Storage& storage() const {
|
345 |
+
return impl_->storage();
|
346 |
+
}
|
347 |
+
bool is_alias_of(const at::TensorBase& other) const{
|
348 |
+
return impl_->storage().is_alias_of(other.storage());
|
349 |
+
}
|
350 |
+
|
351 |
+
// Move the storage backend to shm based
|
352 |
+
// to enable memory sharing across processes.
|
353 |
+
//
|
354 |
+
// NB1: the ideal behavior of this API still requires further discussion
|
355 |
+
// but for now we are inclined to keep it consistent with existing THP behavior
|
356 |
+
// https://github.com/pytorch/pytorch/blob/4dca9bde0552afc67b5b74f4a0696fe6055709c4/torch/storage.py#L196-L212
|
357 |
+
// so we don't assert on anything here and rely on caller knowing
|
358 |
+
// what it's doing.
|
359 |
+
//
|
360 |
+
// NB2: this currently provides Linux fd based shm support only
|
361 |
+
// to simplify the storage lifetime management logic in ATen
|
362 |
+
// and similarly for now we are not adding support for file system based
|
363 |
+
// shm support like in THP due to additional GC manager support needed
|
364 |
+
// to prevent leaks.
|
365 |
+
// As such, calling this from non supported systems (e.g. Windows) would fail.
|
366 |
+
void share_memory_() {
|
367 |
+
at::share_memory_(*this);
|
368 |
+
}
|
369 |
+
|
370 |
+
inline bool _is_zerotensor() const {
|
371 |
+
return impl_->_is_zerotensor();
|
372 |
+
}
|
373 |
+
|
374 |
+
inline void _set_zero(bool zero) const {
|
375 |
+
impl_->_set_zero(zero);
|
376 |
+
}
|
377 |
+
|
378 |
+
inline bool is_conj() const {
|
379 |
+
return impl_->is_conj();
|
380 |
+
}
|
381 |
+
|
382 |
+
// sets the conjugate bit of a tensor.
|
383 |
+
// NOTE: Conjugate bit is supposed to be a read-only field. Only change this, if you are sure
|
384 |
+
// that's what you want. Changing this might lead to incorrect behavior since conjugation is
|
385 |
+
// a lazy operation and we rely on this bit to determine if a conjugation needs to be materialized.
|
386 |
+
inline void _set_conj(bool conjugate) const {
|
387 |
+
impl_->_set_conj(conjugate);
|
388 |
+
}
|
389 |
+
|
390 |
+
inline bool is_neg() const {
|
391 |
+
return impl_->is_neg();
|
392 |
+
}
|
393 |
+
|
394 |
+
// sets the negative bit of a tensor.
|
395 |
+
// NOTE: Negative bit is supposed to be a read-only field. Only change this, if you are sure
|
396 |
+
// that's what you want. Changing this might lead to incorrect behavior since we rely on this
|
397 |
+
// bit to determine if a negation needs to be materialized.
|
398 |
+
inline void _set_neg(bool negative) const {
|
399 |
+
impl_->_set_neg(negative);
|
400 |
+
}
|
401 |
+
|
402 |
+
/// Returns a `Tensor`'s layout.
|
403 |
+
Layout layout() const {
|
404 |
+
return impl_->layout();
|
405 |
+
}
|
406 |
+
|
407 |
+
/// Returns a `Tensor`'s dtype (`TypeMeta`).
|
408 |
+
caffe2::TypeMeta dtype() const {
|
409 |
+
return impl_->dtype();
|
410 |
+
}
|
411 |
+
|
412 |
+
/// Returns a `Tensor`'s device.
|
413 |
+
inline Device device() const {
|
414 |
+
return impl_->device();
|
415 |
+
}
|
416 |
+
|
417 |
+
/// Returns a `Tensor`'s device index.
|
418 |
+
int64_t get_device() const {
|
419 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
420 |
+
return impl_->get_device();
|
421 |
+
}
|
422 |
+
|
423 |
+
/// Returns if a `Tensor` has CPU backend.
|
424 |
+
bool is_cpu() const {
|
425 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
426 |
+
return impl_->is_cpu();
|
427 |
+
}
|
428 |
+
|
429 |
+
/// Returns if a `Tensor` has CUDA backend.
|
430 |
+
bool is_cuda() const {
|
431 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
432 |
+
return impl_->is_cuda();
|
433 |
+
}
|
434 |
+
|
435 |
+
/// Returns if a `Tensor` has IPU backend.
|
436 |
+
bool is_ipu() const {
|
437 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
438 |
+
return impl_->is_ipu();
|
439 |
+
}
|
440 |
+
|
441 |
+
/// Returns if a `Tensor` has XPU backend.
|
442 |
+
bool is_xpu() const {
|
443 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
444 |
+
return impl_->is_xpu();
|
445 |
+
}
|
446 |
+
|
447 |
+
/// Returns if a `Tensor` has XLA backend.
|
448 |
+
bool is_xla() const {
|
449 |
+
return impl_->is_xla();
|
450 |
+
}
|
451 |
+
|
452 |
+
/// Returns if a `Tensor` has MTIA backend.
|
453 |
+
bool is_mtia() const {
|
454 |
+
return impl_->is_mtia();
|
455 |
+
}
|
456 |
+
|
457 |
+
/// Returns if a `Tensor` has HPU backend.
|
458 |
+
bool is_hpu() const {
|
459 |
+
return impl_->is_hpu();
|
460 |
+
}
|
461 |
+
|
462 |
+
/// Returns if a `Tensor` has Lazy backend.
|
463 |
+
bool is_lazy() const {
|
464 |
+
return impl_->is_lazy();
|
465 |
+
}
|
466 |
+
|
467 |
+
/// Returns if a `Tensor` has HIP backend.
|
468 |
+
bool is_hip() const {
|
469 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
470 |
+
return impl_->is_hip();
|
471 |
+
}
|
472 |
+
|
473 |
+
/// Returns if a `Tensor` has VE backend.
|
474 |
+
bool is_ve() const {
|
475 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
476 |
+
return impl_->is_ve();
|
477 |
+
}
|
478 |
+
|
479 |
+
/// Returns if a `Tensor` has PrivateUse1 backend.
|
480 |
+
bool is_privateuseone() const {
|
481 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
482 |
+
return impl_->is_privateuseone();
|
483 |
+
}
|
484 |
+
|
485 |
+
/// Returns if a `Tensor` has sparse backend.
|
486 |
+
bool is_sparse() const {
|
487 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
488 |
+
return impl_->is_sparse();
|
489 |
+
}
|
490 |
+
|
491 |
+
/// Returns is a `Tensor` has a sparse CSR backend.
|
492 |
+
bool is_sparse_csr() const {
|
493 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
494 |
+
return impl_->is_sparse_csr();
|
495 |
+
}
|
496 |
+
|
497 |
+
/// Returns if a `Tensor` is mkldnn tensor.
|
498 |
+
bool is_mkldnn() const {
|
499 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
500 |
+
return impl_->is_mkldnn();
|
501 |
+
}
|
502 |
+
|
503 |
+
/// Returns if a `Tensor` is mps tensor.
|
504 |
+
bool is_mps() const {
|
505 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
506 |
+
return impl_->is_mps();
|
507 |
+
}
|
508 |
+
|
509 |
+
/// Returns if a `Tensor` is ort tensor.
|
510 |
+
bool is_ort() const {
|
511 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
512 |
+
return impl_->is_ort();
|
513 |
+
}
|
514 |
+
|
515 |
+
/// Returns if a `Tensor` is vulkan tensor.
|
516 |
+
bool is_vulkan() const {
|
517 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
518 |
+
return impl_->is_vulkan();
|
519 |
+
}
|
520 |
+
|
521 |
+
/// Returns if a `Tensor` is metal tensor.
|
522 |
+
bool is_metal() const {
|
523 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
524 |
+
return impl_->is_metal();
|
525 |
+
}
|
526 |
+
|
527 |
+
/// Returns if a `Tensor` has quantized backend.
|
528 |
+
bool is_quantized() const {
|
529 |
+
// NB: this is not a native function to avoid dispatching overhead.
|
530 |
+
return impl_->is_quantized();
|
531 |
+
}
|
532 |
+
|
533 |
+
/// Returns if a `Tensor` is a meta tensor. Meta tensors can
|
534 |
+
/// also have other designations.
|
535 |
+
bool is_meta() const {
|
536 |
+
return impl_->is_meta();
|
537 |
+
}
|
538 |
+
|
539 |
+
/// Returns if a `Tensor` is an inference tensor.
|
540 |
+
bool is_inference() const {
|
541 |
+
return impl_->is_inference();
|
542 |
+
}
|
543 |
+
|
544 |
+
// Returns if a `Tensor` is a NestedTensor.
|
545 |
+
bool is_nested() const {
|
546 |
+
return impl_->is_nested();
|
547 |
+
}
|
548 |
+
|
549 |
+
/// If a tensor is a quantized tensor, returns its quantizer
|
550 |
+
/// TODO: it's not in native_functions.yaml yet as it's not exposed to python
|
551 |
+
QuantizerPtr quantizer() const;
|
552 |
+
|
553 |
+
/// Returns if a `Tensor` has any dimension names
|
554 |
+
bool has_names() const {
|
555 |
+
// If a user is using unnamed tensors, then we can short-circuit right here.
|
556 |
+
// Otherwise, impl::has_names attempts to retrieve names.
|
557 |
+
if (!impl_->has_named_tensor_meta()) {
|
558 |
+
return false;
|
559 |
+
}
|
560 |
+
return impl::has_names(unsafeGetTensorImpl());
|
561 |
+
}
|
562 |
+
|
563 |
+
/// Returns a `Tensor`'s dimension names data structure
|
564 |
+
const NamedTensorMeta* get_named_tensor_meta() const {
|
565 |
+
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
|
566 |
+
}
|
567 |
+
|
568 |
+
NamedTensorMeta* get_named_tensor_meta() {
|
569 |
+
return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
|
570 |
+
}
|
571 |
+
|
572 |
+
/// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in
|
573 |
+
/// TensorOptions.h.
|
574 |
+
TensorOptions options() const {
|
575 |
+
return TensorOptions().dtype(dtype())
|
576 |
+
.device(device())
|
577 |
+
.layout(layout());
|
578 |
+
}
|
579 |
+
|
580 |
+
const void* const_data_ptr() const {
|
581 |
+
return this->unsafeGetTensorImpl()->data();
|
582 |
+
}
|
583 |
+
|
584 |
+
void* mutable_data_ptr() const {
|
585 |
+
return this->unsafeGetTensorImpl()->mutable_data();
|
586 |
+
}
|
587 |
+
|
588 |
+
// TODO(#97856) Make this return a const pointer. This currently
|
589 |
+
// returns a non-const pointer because of the large
|
590 |
+
// number of clients that we still want to audit before
|
591 |
+
// migrating to mutable_data_ptr().
|
592 |
+
void* data_ptr() const {
|
593 |
+
return mutable_data_ptr();
|
594 |
+
}
|
595 |
+
|
596 |
+
template <typename T>
|
597 |
+
const T* const_data_ptr() const;
|
598 |
+
|
599 |
+
template <typename T>
|
600 |
+
T* mutable_data_ptr() const;
|
601 |
+
|
602 |
+
// Legacy interface during the migration to indicate that a callsite
|
603 |
+
// has not been audited for mutability.
|
604 |
+
//
|
605 |
+
// Do not add new uses of this, use const_data_ptr() if possible,
|
606 |
+
// mutable_data_ptr() otherwise.
|
607 |
+
//
|
608 |
+
// TODO(#97856) Make this return a const pointer. This is currently
|
609 |
+
// const because of the vast number of clients that
|
610 |
+
// rely on this.
|
611 |
+
template <typename T>
|
612 |
+
T* data_ptr() const;
|
613 |
+
|
614 |
+
// Purposely not defined here to avoid inlining
|
615 |
+
void print() const;
|
616 |
+
|
617 |
+
// Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and
|
618 |
+
// dimension.
|
619 |
+
template<typename T, size_t N>
|
620 |
+
TensorAccessor<T,N> accessor() const& {
|
621 |
+
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
622 |
+
TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
|
623 |
+
return TensorAccessor<T,N>(data_ptr<T>(),sizes().data(),strides().data());
|
624 |
+
}
|
625 |
+
template<typename T, size_t N>
|
626 |
+
TensorAccessor<T,N> accessor() && = delete;
|
627 |
+
|
628 |
+
// Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and
|
629 |
+
// dimension. You can optionally specify RestrictPtrTraits as a template parameter to
|
630 |
+
// cast the data pointer to a __restrict__ pointer.
|
631 |
+
// In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor
|
632 |
+
// as an argument.
|
633 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
634 |
+
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> generic_packed_accessor() const& {
|
635 |
+
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
636 |
+
TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
|
637 |
+
return GenericPackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(data_ptr<T>()),sizes().data(),strides().data());
|
638 |
+
}
|
639 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
640 |
+
GenericPackedTensorAccessor<T,N> generic_packed_accessor() && = delete;
|
641 |
+
|
642 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
643 |
+
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() const& {
|
644 |
+
TORCH_CHECK(
|
645 |
+
impl_->numel() <=
|
646 |
+
static_cast<int64_t>(std::numeric_limits<int32_t>::max()),
|
647 |
+
"numel needs to be smaller than int32_t max; otherwise, please use packed_accessor64");
|
648 |
+
return generic_packed_accessor<T,N,PtrTraits,int32_t>();
|
649 |
+
}
|
650 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
651 |
+
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() && = delete;
|
652 |
+
|
653 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
654 |
+
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() const& {
|
655 |
+
return generic_packed_accessor<T,N,PtrTraits,int64_t>();
|
656 |
+
}
|
657 |
+
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
658 |
+
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() && = delete;
|
659 |
+
|
660 |
+
// ~~~~~ Autograd API ~~~~~
|
661 |
+
|
662 |
+
/// \fn bool is_leaf() const;
|
663 |
+
///
|
664 |
+
/// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
|
665 |
+
///
|
666 |
+
/// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
|
667 |
+
/// created by the user. This means that they are not the result of an operation and so
|
668 |
+
/// `grad_fn()` is `nullptr`.
|
669 |
+
///
|
670 |
+
/// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
|
671 |
+
/// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
|
672 |
+
///
|
673 |
+
/// Example:
|
674 |
+
/// @code
|
675 |
+
/// auto a = torch::rand(10, torch::requires_grad());
|
676 |
+
/// std::cout << a.is_leaf() << std::endl; // prints `true`
|
677 |
+
///
|
678 |
+
/// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
|
679 |
+
/// std::cout << b.is_leaf() << std::endl; // prints `false`
|
680 |
+
/// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
|
681 |
+
///
|
682 |
+
/// auto c = torch::rand(10, torch::requires_grad()) + 2;
|
683 |
+
/// std::cout << c.is_leaf() << std::endl; // prints `false`
|
684 |
+
/// // c was created by the addition operation
|
685 |
+
///
|
686 |
+
/// auto d = torch::rand(10).cuda();
|
687 |
+
/// std::cout << d.is_leaf() << std::endl; // prints `true`
|
688 |
+
/// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
|
689 |
+
///
|
690 |
+
/// auto e = torch::rand(10).cuda().requires_grad_();
|
691 |
+
/// std::cout << e.is_leaf() << std::endl; // prints `true`
|
692 |
+
/// // e requires gradients and has no operations creating it
|
693 |
+
///
|
694 |
+
/// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
|
695 |
+
/// std::cout << f.is_leaf() << std::endl; // prints `true`
|
696 |
+
/// // f requires grad, has no operation creating it
|
697 |
+
/// @endcode
|
698 |
+
|
699 |
+
/// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
|
700 |
+
///
|
701 |
+
/// Computes the gradient of current tensor with respect to graph leaves.
|
702 |
+
///
|
703 |
+
/// The graph is differentiated using the chain rule. If the tensor is
|
704 |
+
/// non-scalar (i.e. its data has more than one element) and requires
|
705 |
+
/// gradient, the function additionally requires specifying ``gradient``.
|
706 |
+
/// It should be a tensor of matching type and location, that contains
|
707 |
+
/// the gradient of the differentiated function w.r.t. this Tensor.
|
708 |
+
///
|
709 |
+
/// This function accumulates gradients in the leaves - you might need to
|
710 |
+
/// zero them before calling it.
|
711 |
+
///
|
712 |
+
/// \param gradient Gradient w.r.t. the
|
713 |
+
/// tensor. If it is a tensor, it will be automatically converted
|
714 |
+
/// to a Tensor that does not require grad unless ``create_graph`` is True.
|
715 |
+
/// None values can be specified for scalar Tensors or ones that
|
716 |
+
/// don't require grad. If a None value would be acceptable then
|
717 |
+
/// this argument is optional.
|
718 |
+
/// \param retain_graph If ``false``, the graph used to compute
|
719 |
+
/// the grads will be freed. Note that in nearly all cases setting
|
720 |
+
/// this option to True is not needed and often can be worked around
|
721 |
+
/// in a much more efficient way. Defaults to the value of
|
722 |
+
/// ``create_graph``.
|
723 |
+
/// \param create_graph If ``true``, graph of the derivative will
|
724 |
+
/// be constructed, allowing to compute higher order derivative
|
725 |
+
/// products. Defaults to ``false``.
|
726 |
+
/// \param inputs Inputs w.r.t. which the gradient will be accumulated into
|
727 |
+
/// ``at::Tensor::grad``. All other Tensors will be ignored. If not
|
728 |
+
/// provided, the gradient is accumulated into all the leaf Tensors
|
729 |
+
/// that were used to compute the current tensor.
|
730 |
+
/// When inputs are provided and a given input is not a leaf,
|
731 |
+
/// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
|
732 |
+
/// It is an implementation detail on which the user should not rely.
|
733 |
+
/// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
|
734 |
+
|
735 |
+
/// \fn Tensor detach() const;
|
736 |
+
///
|
737 |
+
/// Returns a new Tensor, detached from the current graph.
|
738 |
+
/// The result will never require gradient.
|
739 |
+
|
740 |
+
/// \fn Tensor & detach_() const;
|
741 |
+
///
|
742 |
+
/// Detaches the Tensor from the graph that created it, making it a leaf.
|
743 |
+
/// Views cannot be detached in-place.
|
744 |
+
|
745 |
+
/// \fn void retain_grad() const;
|
746 |
+
///
|
747 |
+
/// Enables this Tensor to have their :attr:`grad` populated during
|
748 |
+
/// :func:`backward`. This is a no-op for leaf tensors.
|
749 |
+
|
750 |
+
/// \fn bool retains_grad() const;
|
751 |
+
///
|
752 |
+
/// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
|
753 |
+
/// populated during :func:`backward`, ``false`` otherwise.
|
754 |
+
|
755 |
+
const TensorBase& set_requires_grad(bool requires_grad) const {
|
756 |
+
impl_->set_requires_grad(requires_grad);
|
757 |
+
return *this;
|
758 |
+
}
|
759 |
+
bool requires_grad() const {
|
760 |
+
return impl_->requires_grad();
|
761 |
+
}
|
762 |
+
|
763 |
+
// The Forward AD API functions below are low level and are not to be used by end
|
764 |
+
// users who should use the API provided in torch/csrc/autograd.h
|
765 |
+
|
766 |
+
/// This function returns the forward gradient for this Tensor at the given level.
|
767 |
+
const Tensor& _fw_grad(uint64_t level) const {
|
768 |
+
return impl_->_fw_grad(level, *this);
|
769 |
+
}
|
770 |
+
|
771 |
+
/// This function can be used to set the value of the forward grad.
|
772 |
+
/// Note that the given new_grad might not be used directly if it has different
|
773 |
+
/// metadata (size/stride/storage offset) compared to this Tensor. In that case,
|
774 |
+
/// new_grad content will be copied into a new Tensor
|
775 |
+
void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
|
776 |
+
impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
|
777 |
+
}
|
778 |
+
|
779 |
+
/// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
|
780 |
+
/// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
|
781 |
+
/// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
|
782 |
+
///
|
783 |
+
/// One notable difference with the legacy `.data()` function is that changes to the
|
784 |
+
/// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
|
785 |
+
/// will not update the original `Variable`, due to the fact that this function
|
786 |
+
/// shallow-copies the `Variable`'s underlying TensorImpl.
|
787 |
+
at::TensorBase tensor_data() const;
|
788 |
+
|
789 |
+
/// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
|
790 |
+
/// in Python, which create a new `Variable` that shares the same storage and
|
791 |
+
/// tensor metadata with the original `Variable`, but with a completely new
|
792 |
+
/// autograd history.
|
793 |
+
///
|
794 |
+
/// NOTE: If we change the tensor metadata (e.g. sizes / strides /
|
795 |
+
/// storage / storage_offset) of a variable created from `var.variable_data()`, those
|
796 |
+
/// changes will not update the original variable `var`. In `.variable_data()`, we set
|
797 |
+
/// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
|
798 |
+
/// in order to prevent users from changing metadata of `var.variable_data()`
|
799 |
+
/// and expecting the original variable `var` to also be updated.
|
800 |
+
at::TensorBase variable_data() const;
|
801 |
+
|
802 |
+
// Gradient Node and Edges
|
803 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
804 |
+
|
805 |
+
/// Gets the gradient function of the `Variable`. If this is a leaf variable,
|
806 |
+
/// the pointer returned will be null.
|
807 |
+
///
|
808 |
+
/// For View Variables:
|
809 |
+
/// Gets the up-to-date grad_fn. If the shared data or base was modified, we
|
810 |
+
/// re-create the grad_fn to express the up-to-date view relationship between
|
811 |
+
/// this and the base Variable.
|
812 |
+
const std::shared_ptr<torch::autograd::Node>& grad_fn() const;
|
813 |
+
|
814 |
+
// Hooks
|
815 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
816 |
+
|
817 |
+
template <typename T>
|
818 |
+
using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, TensorBase>>::value, unsigned>;
|
819 |
+
template <typename T>
|
820 |
+
using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, TensorBase>, TensorBase>::value, unsigned>;
|
821 |
+
|
822 |
+
/// Registers a backward hook.
|
823 |
+
///
|
824 |
+
/// The hook will be called every time a gradient with respect to the Tensor is computed.
|
825 |
+
/// The hook should have one of the following signature:
|
826 |
+
/// ```
|
827 |
+
/// hook(TensorBase grad) -> TensorBase
|
828 |
+
/// ```
|
829 |
+
/// ```
|
830 |
+
/// hook(TensorBase grad) -> void
|
831 |
+
/// ```
|
832 |
+
/// The hook should not modify its argument, but it can optionally return a new gradient
|
833 |
+
/// which will be used in place of `grad`.
|
834 |
+
///
|
835 |
+
/// This function returns the index of the hook in the list which can be used to remove hook.
|
836 |
+
///
|
837 |
+
/// Example:
|
838 |
+
/// @code
|
839 |
+
/// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
|
840 |
+
/// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
|
841 |
+
/// v.backward(torch::tensor({1., 2., 3.}));
|
842 |
+
/// // This prints:
|
843 |
+
/// // ```
|
844 |
+
/// // 2
|
845 |
+
/// // 4
|
846 |
+
/// // 6
|
847 |
+
/// // [ CPUFloatType{3} ]
|
848 |
+
/// // ```
|
849 |
+
/// std::cout << v.grad() << std::endl;
|
850 |
+
/// v.remove_hook(h); // removes the hook
|
851 |
+
/// @endcode
|
852 |
+
template <typename T>
|
853 |
+
hook_return_void_t<T> register_hook(T&& hook) const;
|
854 |
+
template <typename T>
|
855 |
+
hook_return_var_t<T> register_hook(T&& hook) const;
|
856 |
+
|
857 |
+
protected:
|
858 |
+
unsigned _register_hook(std::function<TensorBase(const TensorBase&)> hook) const;
|
859 |
+
|
860 |
+
public:
|
861 |
+
|
862 |
+
/// Remove hook at given position
|
863 |
+
void remove_hook(unsigned pos) const;
|
864 |
+
|
865 |
+
// Variable methods
|
866 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
867 |
+
|
868 |
+
bool is_leaf() const;
|
869 |
+
|
870 |
+
int64_t output_nr() const;
|
871 |
+
|
872 |
+
void set_data(const TensorBase & new_data) const;
|
873 |
+
|
874 |
+
TensorBase data() const;
|
875 |
+
|
876 |
+
int64_t _version() const;
|
877 |
+
|
878 |
+
void retain_grad() const;
|
879 |
+
|
880 |
+
bool retains_grad() const;
|
881 |
+
|
882 |
+
const TensorBase& requires_grad_(bool _requires_grad=true) const;
|
883 |
+
|
884 |
+
// View Variables
|
885 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
886 |
+
|
887 |
+
/// Returns true if this `Variable` is a view of another `Variable`.
|
888 |
+
bool is_view() const;
|
889 |
+
|
890 |
+
/// Returns the `Variable` that this `Variable` is a view of. If this
|
891 |
+
/// `Variable` is not a view, throw a `std::runtime_error`.
|
892 |
+
const TensorBase& _base() const;
|
893 |
+
|
894 |
+
// Miscellaneous
|
895 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
896 |
+
|
897 |
+
const std::string& name() const;
|
898 |
+
|
899 |
+
protected:
|
900 |
+
void enforce_invariants();
|
901 |
+
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_;
|
902 |
+
|
903 |
+
private:
|
904 |
+
TensorBase __dispatch_contiguous(c10::MemoryFormat) const;
|
905 |
+
};
|
906 |
+
|
907 |
+
inline int64_t get_device(const TensorBase& self) {
|
908 |
+
return self.get_device();
|
909 |
+
}
|
910 |
+
|
911 |
+
template <typename T>
|
912 |
+
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t<T> {
|
913 |
+
// Return the grad argument in case of a hook with void return type to have an
|
914 |
+
// std::function with Tensor return type
|
915 |
+
static_assert(std::is_same<decltype(hook(TensorBase())), void>::value,
|
916 |
+
"Expected hook to return void");
|
917 |
+
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad) {
|
918 |
+
fn(grad);
|
919 |
+
return TensorBase();
|
920 |
+
});
|
921 |
+
}
|
922 |
+
|
923 |
+
template <typename T>
|
924 |
+
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t<T> {
|
925 |
+
return _register_hook(std::forward<T>(hook));
|
926 |
+
}
|
927 |
+
|
928 |
+
namespace detail {
|
929 |
+
// Helper creator for Tensor class which doesn't requires the users to pass
|
930 |
+
// in an intrusive_ptr instead it just converts the argument passed to
|
931 |
+
// requested intrusive_ptr type.
|
932 |
+
template <typename T, typename... Args>
|
933 |
+
TensorBase make_tensor_base(Args&&... args) {
|
934 |
+
return TensorBase(c10::make_intrusive<T>(std::forward<Args>(args)...));
|
935 |
+
}
|
936 |
+
|
937 |
+
} // namespace detail
|
938 |
+
|
939 |
+
static inline DispatchKey legacyExtractDispatchKey(const TensorBase& t) {
|
940 |
+
return legacyExtractDispatchKey(t.key_set());
|
941 |
+
}
|
942 |
+
|
943 |
+
} // namespace at
|
944 |
+
|
945 |
+
namespace c10 {
|
946 |
+
template <>
|
947 |
+
struct MaybeOwnedTraits<at::TensorBase> {
|
948 |
+
using owned_type = at::TensorBase;
|
949 |
+
using borrow_type = at::TensorBase;
|
950 |
+
|
951 |
+
static borrow_type createBorrow(const owned_type& from) {
|
952 |
+
// NOTE: this can be implemented without the special
|
953 |
+
// unsafe_borrow_t Tensor constructor as
|
954 |
+
//
|
955 |
+
// return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
|
956 |
+
//
|
957 |
+
// but that hurts inlining due to the nullptr check in the
|
958 |
+
// Tensor(c10::intrusive_ptr<...>) constructor. We already know
|
959 |
+
// that from.impl_ isn't null because from is a valid Tensor, so
|
960 |
+
// we needn't do the check again. (using __builtin_assume can
|
961 |
+
// avoid this, but wouldn't be portable to MSVC.)
|
962 |
+
return borrow_type(borrow_type::unsafe_borrow_t{}, from);
|
963 |
+
}
|
964 |
+
|
965 |
+
static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
|
966 |
+
lhs.unsafeReleaseTensorImpl();
|
967 |
+
// See above note: this can be implemented with public API
|
968 |
+
// similarly to createBorrow(), but that would hurt inlining.
|
969 |
+
lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
|
970 |
+
}
|
971 |
+
|
972 |
+
static void destroyBorrow(borrow_type& toDestroy) {
|
973 |
+
toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
|
974 |
+
}
|
975 |
+
|
976 |
+
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
|
977 |
+
return borrow;
|
978 |
+
}
|
979 |
+
|
980 |
+
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
|
981 |
+
return &borrow;
|
982 |
+
}
|
983 |
+
|
984 |
+
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
|
985 |
+
return true;
|
986 |
+
}
|
987 |
+
};
|
988 |
+
|
989 |
+
template <>
|
990 |
+
struct ExclusivelyOwnedTraits<at::TensorBase> : public c10::ExclusivelyOwnedTensorTraits<at::TensorBase> {};
|
991 |
+
} // namespace c10
|
992 |
+
|
993 |
+
namespace at {
|
994 |
+
|
995 |
+
inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor(
|
996 |
+
const c10::optional<TensorBase>& opt) {
|
997 |
+
return opt.has_value()
|
998 |
+
? c10::MaybeOwned<TensorBase>::borrowed(*opt)
|
999 |
+
: c10::MaybeOwned<TensorBase>::owned(c10::in_place);
|
1000 |
+
}
|
1001 |
+
|
1002 |
+
inline c10::MaybeOwned<TensorBase> TensorBase::expect_contiguous(MemoryFormat memory_format) const & {
|
1003 |
+
if (is_contiguous(memory_format)) {
|
1004 |
+
return c10::MaybeOwned<TensorBase>::borrowed(*this);
|
1005 |
+
} else {
|
1006 |
+
return c10::MaybeOwned<TensorBase>::owned(__dispatch_contiguous(memory_format));
|
1007 |
+
}
|
1008 |
+
}
|
1009 |
+
|
1010 |
+
namespace symint {
|
1011 |
+
|
1012 |
+
template <typename T>
|
1013 |
+
using enable_if_symint = std::enable_if_t<std::is_same<T, c10::SymInt>::value>;
|
1014 |
+
template <typename T>
|
1015 |
+
using enable_if_int = std::enable_if_t<std::is_same<T, int64_t>::value>;
|
1016 |
+
|
1017 |
+
template <typename T, typename = enable_if_symint<T>>
|
1018 |
+
c10::SymIntArrayRef sizes(const TensorBase& t) { return t.sym_sizes(); }
|
1019 |
+
template <typename T, typename = enable_if_int<T>>
|
1020 |
+
IntArrayRef sizes(const TensorBase& t) { return t.sizes(); }
|
1021 |
+
|
1022 |
+
template <typename T, typename = enable_if_symint<T>>
|
1023 |
+
c10::SymInt size(const TensorBase& t, int64_t dim) { return t.sym_size(dim); }
|
1024 |
+
template <typename T, typename = enable_if_int<T>>
|
1025 |
+
int64_t size(const TensorBase& t, int64_t dim) { return t.size(dim); }
|
1026 |
+
|
1027 |
+
template <typename T, typename = enable_if_symint<T>>
|
1028 |
+
c10::SymIntArrayRef strides(const TensorBase& t) { return t.sym_strides(); }
|
1029 |
+
template <typename T, typename = enable_if_int<T>>
|
1030 |
+
IntArrayRef strides(const TensorBase& t) { return t.strides(); }
|
1031 |
+
|
1032 |
+
template <typename T, typename = enable_if_symint<T>>
|
1033 |
+
c10::SymInt numel(const TensorBase& t) { return t.sym_numel(); }
|
1034 |
+
template <typename T, typename = enable_if_int<T>>
|
1035 |
+
int64_t numel(const TensorBase& t) { return t.numel(); }
|
1036 |
+
|
1037 |
+
} // namespace symint
|
1038 |
+
|
1039 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <c10/macros/Macros.h>
|
2 |
+
#include <c10/util/Half.h>
|
3 |
+
#include <c10/util/BFloat16.h>
|
4 |
+
#include <c10/util/MathConstants.h>
|
5 |
+
#include <ATen/NumericUtils.h>
|
6 |
+
#include <limits>
|
7 |
+
#include <cstdint>
|
8 |
+
#include <cassert>
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
// Using DistAccumType in accumulate types for distributions.
|
13 |
+
// Note: Ideally we'd be using ATen/AccumulateType.h but looks
|
14 |
+
// like the there is some inconsistency in how accumulate types
|
15 |
+
// are mapped currently, e.g. for the cpu side, float is mapped
|
16 |
+
// to double.
|
17 |
+
template <typename T>
|
18 |
+
struct DistAccumType { };
|
19 |
+
|
20 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
21 |
+
template <> struct DistAccumType<half> { using type = float; };
|
22 |
+
#endif
|
23 |
+
template <> struct DistAccumType<BFloat16> { using type = float; };
|
24 |
+
template <> struct DistAccumType<Half> { using type = float; };
|
25 |
+
template <> struct DistAccumType<float> { using type = float; };
|
26 |
+
template <> struct DistAccumType<double> { using type = double; };
|
27 |
+
|
28 |
+
template <typename T>
|
29 |
+
using dist_acctype = typename DistAccumType<T>::type;
|
30 |
+
|
31 |
+
namespace transformation {
|
32 |
+
|
33 |
+
/**
|
34 |
+
* A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified.
|
35 |
+
* `range` is `to - from`
|
36 |
+
* `base` is `from`
|
37 |
+
*/
|
38 |
+
template <typename T, typename V>
|
39 |
+
C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) {
|
40 |
+
return static_cast<T>(static_cast<int64_t>((val % range) + base));
|
41 |
+
}
|
42 |
+
|
43 |
+
/**
|
44 |
+
* A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None
|
45 |
+
*/
|
46 |
+
template <typename T, typename V>
|
47 |
+
C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
|
48 |
+
return static_cast<T>(static_cast<int64_t>(val));
|
49 |
+
}
|
50 |
+
|
51 |
+
/**
|
52 |
+
* A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`.
|
53 |
+
* In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double
|
54 |
+
* in this overloaded version
|
55 |
+
*/
|
56 |
+
template <typename T, typename V>
|
57 |
+
C10_HOST_DEVICE inline typename std::enable_if<!(std::is_floating_point<T>::value), T>::type uniform_int(V val) {
|
58 |
+
if constexpr (std::is_same_v<T, bool>) {
|
59 |
+
return static_cast<bool>(val & 1);
|
60 |
+
} else if constexpr (std::is_same_v<T, int64_t>) {
|
61 |
+
return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
|
62 |
+
} else if constexpr (std::is_same_v<T, at::Half> || std::is_same<T, at::BFloat16>::value) {
|
63 |
+
return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
|
64 |
+
} else if constexpr (std::is_integral_v<T>) {
|
65 |
+
return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
|
66 |
+
} else {
|
67 |
+
assert(false);
|
68 |
+
return 0;
|
69 |
+
}
|
70 |
+
}
|
71 |
+
|
72 |
+
/**
|
73 |
+
* An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`,
|
74 |
+
* added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version.
|
75 |
+
*/
|
76 |
+
template<typename T, typename V>
|
77 |
+
C10_HOST_DEVICE inline typename std::enable_if<std::is_floating_point<T>::value, T>::type uniform_int(V val) {
|
78 |
+
return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
|
79 |
+
}
|
80 |
+
|
81 |
+
template <typename T, typename V>
|
82 |
+
C10_HOST_DEVICE inline dist_acctype<T> uniform_real(V val, T from, T to) {
|
83 |
+
constexpr auto MASK = static_cast<V>((static_cast<uint64_t>(1) << std::numeric_limits<T>::digits) - 1);
|
84 |
+
constexpr auto DIVISOR = static_cast<dist_acctype<T>>(1) / (static_cast<uint64_t>(1) << std::numeric_limits<T>::digits);
|
85 |
+
dist_acctype<T> x = (val & MASK) * DIVISOR;
|
86 |
+
return (x * (to - from) + from);
|
87 |
+
}
|
88 |
+
|
89 |
+
/**
|
90 |
+
* Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to
|
91 |
+
* normally distributed with `mean` and standard deviation `std`.
|
92 |
+
*/
|
93 |
+
template <typename T>
|
94 |
+
C10_HOST_DEVICE inline T normal(T val, T mean, T std) {
|
95 |
+
return val * std + mean;
|
96 |
+
}
|
97 |
+
|
98 |
+
/**
|
99 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
100 |
+
* Cauchy distribution with location parameter `median` and scale parameter `sigma`.
|
101 |
+
*/
|
102 |
+
template <typename T>
|
103 |
+
C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) {
|
104 |
+
// https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
|
105 |
+
// __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps),
|
106 |
+
// thus we clip those values.
|
107 |
+
constexpr T eps = std::numeric_limits<T>::epsilon();
|
108 |
+
constexpr T one_minus_eps = 1 - eps;
|
109 |
+
constexpr T zero_plus_eps = 0 + eps;
|
110 |
+
val = (val > one_minus_eps ? one_minus_eps : val);
|
111 |
+
val = (val < zero_plus_eps ? zero_plus_eps : val);
|
112 |
+
return median + sigma * at::tan(c10::pi<T> * (val - static_cast<T>(0.5)));
|
113 |
+
}
|
114 |
+
|
115 |
+
template <>
|
116 |
+
C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) {
|
117 |
+
// https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
|
118 |
+
return median + sigma * at::tan(c10::pi<double> * (val - static_cast<double>(0.5)));
|
119 |
+
}
|
120 |
+
|
121 |
+
/**
|
122 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
123 |
+
* exponentially distributed with `lambda` parameter of the distribution.
|
124 |
+
*/
|
125 |
+
template <typename T>
|
126 |
+
C10_HOST_DEVICE inline T exponential(T val, T lambda) {
|
127 |
+
// https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates
|
128 |
+
// Different implementations for CUDA and CPU to preserve original logic
|
129 |
+
// TODO: must be investigated and unified!!!
|
130 |
+
// https://github.com/pytorch/pytorch/issues/38662
|
131 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
132 |
+
// BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
|
133 |
+
// curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
|
134 |
+
// we need log to be not 0, and not underflow when converted to half
|
135 |
+
// fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args
|
136 |
+
auto log = val >= static_cast<T>(1.) - std::numeric_limits<T>::epsilon() / 2
|
137 |
+
? -std::numeric_limits<T>::epsilon() / 2
|
138 |
+
: at::log(val);
|
139 |
+
return static_cast<T>(-1.0) / lambda * log;
|
140 |
+
#else
|
141 |
+
return static_cast<T>(-1.0) / lambda * at::log1p(-val);
|
142 |
+
#endif
|
143 |
+
}
|
144 |
+
|
145 |
+
/**
|
146 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
147 |
+
* geometrically distributed with success probability `p`.
|
148 |
+
*/
|
149 |
+
template <typename T>
|
150 |
+
C10_HOST_DEVICE inline T geometric(T val, T p) {
|
151 |
+
// https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions
|
152 |
+
return static_cast<T>(::ceil(at::log(val) / at::log1p(-p)));
|
153 |
+
}
|
154 |
+
|
155 |
+
/**
|
156 |
+
* Transforms normally distributed `val` to log-normally distributed.
|
157 |
+
*/
|
158 |
+
template <typename T>
|
159 |
+
C10_HOST_DEVICE inline T log_normal(T val) {
|
160 |
+
// https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles
|
161 |
+
return at::exp(val);
|
162 |
+
}
|
163 |
+
|
164 |
+
/**
|
165 |
+
* Transforms uniformly distributed `val` between 0.0 and 1.0 to
|
166 |
+
* bernoulli distributed with success probability `p`.
|
167 |
+
*/
|
168 |
+
template <typename T>
|
169 |
+
C10_HOST_DEVICE inline T bernoulli(T val, T p) {
|
170 |
+
return val < p;
|
171 |
+
}
|
172 |
+
|
173 |
+
}} // namespace at::transformation
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Tensor.h>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
|
6 |
+
inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) {
|
7 |
+
auto tensor_impl = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(static_cast<TensorImpl*>(th_pointer));
|
8 |
+
if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) {
|
9 |
+
c10::raw::intrusive_ptr::incref(tensor_impl.get());
|
10 |
+
}
|
11 |
+
return Tensor(std::move(tensor_impl));
|
12 |
+
}
|
13 |
+
|
14 |
+
inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) {
|
15 |
+
if (retain && th_pointer) {
|
16 |
+
c10::raw::intrusive_ptr::incref(static_cast<StorageImpl*>(th_pointer));
|
17 |
+
}
|
18 |
+
return Storage(c10::intrusive_ptr<StorageImpl>::reclaim(static_cast<StorageImpl*>(th_pointer)));
|
19 |
+
}
|
20 |
+
|
21 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <ATen/core/Tensor.h>
|
5 |
+
|
6 |
+
// A little explanation about why this file exists at all. We have
|
7 |
+
// a few methods on Tensor class which require access to reified access to
|
8 |
+
// AutogradMeta. In open source, this isn't a big deal: we just access
|
9 |
+
// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and
|
10 |
+
// we can put the definitions inline. This is because everything gets balled
|
11 |
+
// into a single dynamic library in the end.
|
12 |
+
//
|
13 |
+
// However, inside our Facebook internal version of our build system, we
|
14 |
+
// have a split between aten and torch/csrc. So we cannot simply just
|
15 |
+
// cross this boundary. "Now wait," you might say, "Why don't we just
|
16 |
+
// merge the libraries inside Facebook". Well, the problem is that there
|
17 |
+
// are some downstream applications which are at binary size limit, and
|
18 |
+
// incorporating all of the extra code from libtorch would push them
|
19 |
+
// over (admarket/adreview/service:adreviewservice, see also
|
20 |
+
// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that,
|
21 |
+
// we have to fix all of the services like this.
|
22 |
+
//
|
23 |
+
// I didn't want to block eliminating Tensor-Variable on this work, so I
|
24 |
+
// had to introduce another dynamic dispatch to get to the variable
|
25 |
+
// implementations (which live in torch/csrc/autograd/variable.cpp, FYI).
|
26 |
+
//
|
27 |
+
// I also considered using our existing dynamic dispatch mechanism, c10
|
28 |
+
// dispatcher, to do this. However, (1) some of the functions on Tensor
|
29 |
+
// have weird signatures that are not supported by autograd, and (2)
|
30 |
+
// see this bug https://github.com/pytorch/pytorch/issues/30102
|
31 |
+
|
32 |
+
namespace torch { namespace autograd {
|
33 |
+
|
34 |
+
struct Node;
|
35 |
+
|
36 |
+
}} // namespace torch::autograd
|
37 |
+
|
38 |
+
namespace at {
|
39 |
+
namespace impl {
|
40 |
+
|
41 |
+
struct TORCH_API VariableHooksInterface {
|
42 |
+
virtual ~VariableHooksInterface() = default;
|
43 |
+
virtual TensorBase tensor_data(const TensorBase&) const = 0;
|
44 |
+
virtual TensorBase variable_data(const TensorBase&) const = 0;
|
45 |
+
virtual const std::shared_ptr<torch::autograd::Node>& grad_fn(const TensorBase&) const = 0;
|
46 |
+
virtual unsigned _register_hook(
|
47 |
+
const TensorBase&,
|
48 |
+
std::function<TensorBase(const TensorBase&)> hook) const = 0;
|
49 |
+
virtual void remove_hook(const TensorBase&, unsigned pos) const = 0;
|
50 |
+
virtual bool is_view(const TensorBase&) const = 0;
|
51 |
+
virtual const TensorBase& base(const TensorBase&) const = 0;
|
52 |
+
virtual const std::string& name(const TensorBase&) const = 0;
|
53 |
+
virtual bool is_leaf(const TensorBase&) const = 0;
|
54 |
+
virtual int64_t output_nr(const TensorBase&) const = 0;
|
55 |
+
virtual void set_data(const TensorBase&, const TensorBase&) const = 0;
|
56 |
+
virtual TensorBase data(const TensorBase&) const = 0;
|
57 |
+
virtual int64_t _version(const TensorBase&) const = 0;
|
58 |
+
virtual void retain_grad(const TensorBase&) const = 0;
|
59 |
+
virtual bool retains_grad(const TensorBase&) const = 0;
|
60 |
+
virtual void _backward(const Tensor&, TensorList, const c10::optional<Tensor>&, c10::optional<bool>, bool) const = 0;
|
61 |
+
virtual void requires_grad_(const TensorBase&, bool) const = 0;
|
62 |
+
virtual void basic_autograd_not_implemented_fallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const = 0;
|
63 |
+
};
|
64 |
+
|
65 |
+
TORCH_API void SetVariableHooks(VariableHooksInterface* hooks);
|
66 |
+
TORCH_API VariableHooksInterface* GetVariableHooks();
|
67 |
+
TORCH_API bool HasVariableHooks();
|
68 |
+
|
69 |
+
struct TORCH_API VariableHooksRegisterer {
|
70 |
+
explicit VariableHooksRegisterer(VariableHooksInterface* hooks) {
|
71 |
+
SetVariableHooks(hooks);
|
72 |
+
}
|
73 |
+
};
|
74 |
+
|
75 |
+
}} // namespace at::impl
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <tuple>
|
5 |
+
#include <type_traits>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
#include <c10/util/ArrayRef.h>
|
9 |
+
#include <ATen/core/List.h>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
// This class allows you to write variadic functions which
|
14 |
+
// call a (possibly overloaded) function on each argument,
|
15 |
+
// in order. This is most commonly used in autogenerated code,
|
16 |
+
// where it is convenient to have a function that can uniformly
|
17 |
+
// take arguments of different types. If your arguments
|
18 |
+
// are homogenous consider using a std::initializer_list instead.
|
19 |
+
//
|
20 |
+
// For examples of this in use, see torch/csrc/utils/variadic.h
|
21 |
+
template <typename F>
|
22 |
+
struct IterArgs {
|
23 |
+
template <typename... Args>
|
24 |
+
inline F& apply() {
|
25 |
+
return self();
|
26 |
+
}
|
27 |
+
|
28 |
+
// NB: Use perfect forwarding here, otherwise we'll make value
|
29 |
+
// copies of all arguments!
|
30 |
+
template <typename T, typename... Args>
|
31 |
+
inline F& apply(T&& arg, Args&&... args) {
|
32 |
+
self()(std::forward<T>(arg));
|
33 |
+
if (self().short_circuit()) {
|
34 |
+
return self();
|
35 |
+
} else {
|
36 |
+
return apply(std::forward<Args>(args)...);
|
37 |
+
}
|
38 |
+
}
|
39 |
+
|
40 |
+
// Here are some handy overloads which provide sensible
|
41 |
+
// defaults for container-like structures that one might
|
42 |
+
// be interested in recursing into. You can enable them
|
43 |
+
// by adding:
|
44 |
+
//
|
45 |
+
// using IterArgs<YourStructName>::operator()
|
46 |
+
//
|
47 |
+
// to your struct. These are not enabled by default because
|
48 |
+
// you may be able to process these structures more efficiently
|
49 |
+
// than handling them one-by-one.
|
50 |
+
|
51 |
+
template <typename T>
|
52 |
+
void operator()(c10::IListRef<T> args) {
|
53 |
+
for (const auto& arg : args) {
|
54 |
+
self()(arg);
|
55 |
+
if (self().short_circuit())
|
56 |
+
return;
|
57 |
+
}
|
58 |
+
}
|
59 |
+
|
60 |
+
template <typename T>
|
61 |
+
void operator()(at::ArrayRef<T> args) {
|
62 |
+
for (const auto& arg : args) {
|
63 |
+
self()(arg);
|
64 |
+
if (self().short_circuit())
|
65 |
+
return;
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
template <typename T>
|
70 |
+
void operator()(const torch::List<T>& args) {
|
71 |
+
for (const auto& arg : args) {
|
72 |
+
self()(arg);
|
73 |
+
if (self().short_circuit())
|
74 |
+
return;
|
75 |
+
}
|
76 |
+
}
|
77 |
+
|
78 |
+
// NB: we need to specify std::vector manually as C++ won't
|
79 |
+
// do an implicit conversion to make a template deduction go through.
|
80 |
+
template <typename T>
|
81 |
+
void operator()(const std::vector<T>& args) {
|
82 |
+
self()(at::ArrayRef<T>{args});
|
83 |
+
}
|
84 |
+
|
85 |
+
constexpr bool short_circuit() const {
|
86 |
+
return false;
|
87 |
+
}
|
88 |
+
|
89 |
+
private:
|
90 |
+
inline F& self() {
|
91 |
+
return *static_cast<F*>(this);
|
92 |
+
}
|
93 |
+
};
|
94 |
+
|
95 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <cstring>
|
3 |
+
#include <map>
|
4 |
+
#include <memory>
|
5 |
+
#include <ostream>
|
6 |
+
#include <sstream>
|
7 |
+
#include <unordered_map>
|
8 |
+
|
9 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
namespace vitals {
|
13 |
+
|
14 |
+
TORCH_API bool torchVitalEnabled();
|
15 |
+
|
16 |
+
struct TORCH_API TorchVitalAttr {
|
17 |
+
// always initialized to empty
|
18 |
+
std::string value = "";
|
19 |
+
template <typename T>
|
20 |
+
TorchVitalAttr& operator<<(const T& t) {
|
21 |
+
if (torchVitalEnabled()) {
|
22 |
+
std::stringstream ss;
|
23 |
+
ss << t;
|
24 |
+
value += ss.str();
|
25 |
+
}
|
26 |
+
return *this;
|
27 |
+
}
|
28 |
+
|
29 |
+
template <typename T>
|
30 |
+
void write(const T& t, bool force) {
|
31 |
+
if (force || torchVitalEnabled()) {
|
32 |
+
std::stringstream ss;
|
33 |
+
ss << t;
|
34 |
+
value = ss.str();
|
35 |
+
}
|
36 |
+
}
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API TorchVital {
|
40 |
+
std::string name;
|
41 |
+
std::unordered_map<std::string, TorchVitalAttr> attrs;
|
42 |
+
|
43 |
+
explicit TorchVital(std::string n) : name(std::move(n)) {}
|
44 |
+
TorchVital(const TorchVital&) = default;
|
45 |
+
TorchVital(TorchVital&&) = default;
|
46 |
+
TorchVital() = delete;
|
47 |
+
|
48 |
+
TorchVitalAttr& create(const std::string& attr);
|
49 |
+
TorchVitalAttr& create(const std::string& attr, bool force);
|
50 |
+
friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt);
|
51 |
+
|
52 |
+
~TorchVital();
|
53 |
+
};
|
54 |
+
|
55 |
+
std::ostream& operator<<(std::ostream& os, TorchVital const& tv);
|
56 |
+
|
57 |
+
// A way to access vitals by string names instead of by global reference.
|
58 |
+
// This enables access to vitals from the PythonAPI.
|
59 |
+
class TORCH_API APIVitals {
|
60 |
+
public:
|
61 |
+
bool vitals_enabled;
|
62 |
+
|
63 |
+
// Set any vital sign that was added to the map.
|
64 |
+
bool setVital(
|
65 |
+
const std::string& vital_name,
|
66 |
+
const std::string& attr_name,
|
67 |
+
const std::string& value,
|
68 |
+
bool force = false);
|
69 |
+
std::string readVitals();
|
70 |
+
|
71 |
+
APIVitals();
|
72 |
+
|
73 |
+
// Ensure this stays a singleton
|
74 |
+
APIVitals(APIVitals const& other) = delete;
|
75 |
+
APIVitals(APIVitals&& other) = delete;
|
76 |
+
APIVitals& operator=(const APIVitals&) = delete;
|
77 |
+
APIVitals& operator=(APIVitals&&) = delete;
|
78 |
+
|
79 |
+
private:
|
80 |
+
std::unordered_map<std::string, TorchVital> name_map_;
|
81 |
+
};
|
82 |
+
|
83 |
+
extern TORCH_API APIVitals VitalsAPI;
|
84 |
+
|
85 |
+
} // namespace vitals
|
86 |
+
} // namespace at
|
87 |
+
|
88 |
+
#define TORCH_VITAL_DECLARE(name) \
|
89 |
+
TORCH_API at::vitals::TorchVital TorchVital_##name;
|
90 |
+
|
91 |
+
#define TORCH_VITAL_DEFINE(name) \
|
92 |
+
TORCH_API at::vitals::TorchVital TorchVital_##name(#name);
|
93 |
+
|
94 |
+
#define TORCH_VITAL_BASE(name) TorchVital_##name
|
95 |
+
|
96 |
+
#define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr)
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <unordered_set>
|
3 |
+
#include <vector>
|
4 |
+
#include <ATen/core/symbol.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/hash.h>
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
/**
|
10 |
+
* class AliasInfo
|
11 |
+
*
|
12 |
+
* Data structure to hold aliasing information for an `Argument`. They can be
|
13 |
+
* nested to represent aliasing information on contained types.
|
14 |
+
*
|
15 |
+
* There is a `beforeSet` which describes the aliasing information before the
|
16 |
+
* operator executes, and an `afterSet` that describes aliasing info
|
17 |
+
* after execution.
|
18 |
+
*/
|
19 |
+
class AliasInfo {
|
20 |
+
public:
|
21 |
+
// Symbol for the set that can alias anything
|
22 |
+
static Symbol wildcardSet() {
|
23 |
+
static const Symbol wc = Symbol::fromQualString("alias::*");
|
24 |
+
return wc;
|
25 |
+
}
|
26 |
+
|
27 |
+
void setIsWrite(bool isWrite) {
|
28 |
+
isWrite_ = isWrite;
|
29 |
+
}
|
30 |
+
|
31 |
+
bool isWrite() const {
|
32 |
+
return isWrite_;
|
33 |
+
}
|
34 |
+
|
35 |
+
void addBeforeSet(Symbol aliasSet) {
|
36 |
+
beforeSets_.insert(aliasSet);
|
37 |
+
}
|
38 |
+
|
39 |
+
void addAfterSet(Symbol aliasSet) {
|
40 |
+
afterSets_.insert(aliasSet);
|
41 |
+
}
|
42 |
+
|
43 |
+
const std::unordered_set<Symbol>& beforeSets() const {
|
44 |
+
return beforeSets_;
|
45 |
+
}
|
46 |
+
|
47 |
+
const std::unordered_set<Symbol>& afterSets() const {
|
48 |
+
return afterSets_;
|
49 |
+
}
|
50 |
+
|
51 |
+
Symbol beforeSet() const {
|
52 |
+
AT_ASSERT(beforeSets_.size() == 1);
|
53 |
+
return *beforeSets_.begin();
|
54 |
+
}
|
55 |
+
|
56 |
+
bool isWildcardBefore() const {
|
57 |
+
return beforeSets_.count(wildcardSet()) != 0;
|
58 |
+
}
|
59 |
+
|
60 |
+
bool isWildcardAfter() const {
|
61 |
+
return afterSets_.count(wildcardSet()) != 0;
|
62 |
+
}
|
63 |
+
|
64 |
+
// the alias info for the contained types of the type
|
65 |
+
// e.g. if this is an annotation on List[T], `sets` refers to
|
66 |
+
// the alias sets that the list may be in
|
67 |
+
// while containedTypes()[0] refers to the sets that members of the list
|
68 |
+
// may be in
|
69 |
+
void addContainedType(AliasInfo aliasInfo) {
|
70 |
+
containedTypes_.push_back(std::move(aliasInfo));
|
71 |
+
}
|
72 |
+
const std::vector<AliasInfo>& containedTypes() const {
|
73 |
+
return containedTypes_;
|
74 |
+
}
|
75 |
+
|
76 |
+
private:
|
77 |
+
std::unordered_set<Symbol> beforeSets_;
|
78 |
+
std::unordered_set<Symbol> afterSets_;
|
79 |
+
std::vector<AliasInfo> containedTypes_;
|
80 |
+
bool isWrite_ = false;
|
81 |
+
};
|
82 |
+
|
83 |
+
inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) {
|
84 |
+
return lhs.isWrite() == rhs.isWrite()
|
85 |
+
&& lhs.beforeSets() == rhs.beforeSets()
|
86 |
+
&& lhs.afterSets() == rhs.afterSets()
|
87 |
+
&& lhs.containedTypes() == rhs.containedTypes();
|
88 |
+
}
|
89 |
+
|
90 |
+
// this does match the way things are represented in the schema
|
91 |
+
inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) {
|
92 |
+
out << "(";
|
93 |
+
bool first = true;
|
94 |
+
for (const auto& set : aliasInfo.beforeSets()) {
|
95 |
+
if (first) {
|
96 |
+
first = false;
|
97 |
+
} else {
|
98 |
+
out << "|";
|
99 |
+
}
|
100 |
+
out << set.toUnqualString();
|
101 |
+
}
|
102 |
+
if (aliasInfo.isWrite()) {
|
103 |
+
out << "!";
|
104 |
+
}
|
105 |
+
if (aliasInfo.beforeSets() != aliasInfo.afterSets()) {
|
106 |
+
out << " -> ";
|
107 |
+
first = true;
|
108 |
+
for (const auto& set : aliasInfo.afterSets()) {
|
109 |
+
if (first) {
|
110 |
+
first = false;
|
111 |
+
} else {
|
112 |
+
out << "|";
|
113 |
+
}
|
114 |
+
out << set.toUnqualString();
|
115 |
+
}
|
116 |
+
}
|
117 |
+
out << ")";
|
118 |
+
return out;
|
119 |
+
}
|
120 |
+
} // namespace c10
|
121 |
+
|
122 |
+
namespace std {
|
123 |
+
template <>
|
124 |
+
struct hash<c10::AliasInfo> {
|
125 |
+
size_t operator()(const c10::AliasInfo& aliasInfo) const {
|
126 |
+
auto hash = std::hash<bool>()(aliasInfo.isWrite());
|
127 |
+
|
128 |
+
// NOTE: for unordered_set hashes, we couldn't use hash_combine
|
129 |
+
// because hash_combine is order dependent. Instead, we choose to
|
130 |
+
// use XOR as the combining function as XOR is commutative.
|
131 |
+
size_t before_set_hash_seed = 0;
|
132 |
+
for (auto &e: aliasInfo.beforeSets()) {
|
133 |
+
auto symbol_hash = std::hash<c10::Symbol>()(e);
|
134 |
+
before_set_hash_seed = before_set_hash_seed ^ symbol_hash;
|
135 |
+
}
|
136 |
+
size_t after_set_hash_seed = 0;
|
137 |
+
for (auto &e: aliasInfo.afterSets()) {
|
138 |
+
auto symbol_hash = std::hash<c10::Symbol>()(e);
|
139 |
+
after_set_hash_seed = after_set_hash_seed ^ symbol_hash;
|
140 |
+
}
|
141 |
+
|
142 |
+
hash = c10::hash_combine(hash, before_set_hash_seed);
|
143 |
+
hash = c10::hash_combine(hash, after_set_hash_seed);
|
144 |
+
for (auto &e: aliasInfo.containedTypes()) {
|
145 |
+
auto contained_type_hash = std::hash<c10::AliasInfo>()(e);
|
146 |
+
hash = c10::hash_combine(hash, contained_type_hash);
|
147 |
+
}
|
148 |
+
return hash;
|
149 |
+
}
|
150 |
+
};
|
151 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h
ADDED
@@ -0,0 +1,2180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from aten_interned_strings.h
|
4 |
+
|
5 |
+
#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if including <ATen/core/symbol.h> for \
|
9 |
+
the c10::Symbol class would be sufficient, or if your change would be \
|
10 |
+
better placed in another file.
|
11 |
+
#endif
|
12 |
+
|
13 |
+
// ATen symbols correspond exactly to operators defined in ATen. Every
|
14 |
+
// symbol here corresponds exactly to an ATen operation defined in
|
15 |
+
// native_functions.yaml; attributes are in one-to-one correspondence
|
16 |
+
// with their ATen name.
|
17 |
+
|
18 |
+
#define FORALL_ATEN_BASE_SYMBOLS(_) \
|
19 |
+
_(aten, __and__) \
|
20 |
+
_(aten, __iand__) \
|
21 |
+
_(aten, __ilshift__) \
|
22 |
+
_(aten, __ior__) \
|
23 |
+
_(aten, __irshift__) \
|
24 |
+
_(aten, __ixor__) \
|
25 |
+
_(aten, __lshift__) \
|
26 |
+
_(aten, __or__) \
|
27 |
+
_(aten, __rshift__) \
|
28 |
+
_(aten, __xor__) \
|
29 |
+
_(aten, _adaptive_avg_pool2d) \
|
30 |
+
_(aten, _adaptive_avg_pool2d_backward) \
|
31 |
+
_(aten, _adaptive_avg_pool3d) \
|
32 |
+
_(aten, _adaptive_avg_pool3d_backward) \
|
33 |
+
_(aten, _add_batch_dim) \
|
34 |
+
_(aten, _add_relu) \
|
35 |
+
_(aten, _add_relu_) \
|
36 |
+
_(aten, _addmm_activation) \
|
37 |
+
_(aten, _aminmax) \
|
38 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale) \
|
39 |
+
_(aten, _amp_foreach_non_finite_check_and_unscale_) \
|
40 |
+
_(aten, _amp_update_scale) \
|
41 |
+
_(aten, _amp_update_scale_) \
|
42 |
+
_(aten, _assert_async) \
|
43 |
+
_(aten, _assert_tensor_metadata) \
|
44 |
+
_(aten, _autocast_to_full_precision) \
|
45 |
+
_(aten, _autocast_to_reduced_precision) \
|
46 |
+
_(aten, _backward) \
|
47 |
+
_(aten, _batch_norm_impl_index) \
|
48 |
+
_(aten, _batch_norm_impl_index_backward) \
|
49 |
+
_(aten, _cast_Byte) \
|
50 |
+
_(aten, _cast_Char) \
|
51 |
+
_(aten, _cast_Double) \
|
52 |
+
_(aten, _cast_Float) \
|
53 |
+
_(aten, _cast_Half) \
|
54 |
+
_(aten, _cast_Int) \
|
55 |
+
_(aten, _cast_Long) \
|
56 |
+
_(aten, _cast_Short) \
|
57 |
+
_(aten, _cdist_backward) \
|
58 |
+
_(aten, _cdist_forward) \
|
59 |
+
_(aten, _cholesky_solve_helper) \
|
60 |
+
_(aten, _choose_qparams_per_tensor) \
|
61 |
+
_(aten, _coalesce) \
|
62 |
+
_(aten, _coalesced) \
|
63 |
+
_(aten, _coalesced_) \
|
64 |
+
_(aten, _compute_linear_combination) \
|
65 |
+
_(aten, _conj) \
|
66 |
+
_(aten, _conj_copy) \
|
67 |
+
_(aten, _conj_physical) \
|
68 |
+
_(aten, _conv_depthwise2d) \
|
69 |
+
_(aten, _convert_indices_from_coo_to_csr) \
|
70 |
+
_(aten, _convert_indices_from_csr_to_coo) \
|
71 |
+
_(aten, _convert_weight_to_int4pack) \
|
72 |
+
_(aten, _convolution) \
|
73 |
+
_(aten, _convolution_double_backward) \
|
74 |
+
_(aten, _convolution_mode) \
|
75 |
+
_(aten, _copy_from) \
|
76 |
+
_(aten, _copy_from_and_resize) \
|
77 |
+
_(aten, _cslt_compress) \
|
78 |
+
_(aten, _cslt_sparse_mm) \
|
79 |
+
_(aten, _ctc_loss) \
|
80 |
+
_(aten, _ctc_loss_backward) \
|
81 |
+
_(aten, _cudnn_ctc_loss) \
|
82 |
+
_(aten, _cudnn_init_dropout_state) \
|
83 |
+
_(aten, _cudnn_rnn) \
|
84 |
+
_(aten, _cudnn_rnn_backward) \
|
85 |
+
_(aten, _cudnn_rnn_flatten_weight) \
|
86 |
+
_(aten, _cufft_clear_plan_cache) \
|
87 |
+
_(aten, _cufft_get_plan_cache_max_size) \
|
88 |
+
_(aten, _cufft_get_plan_cache_size) \
|
89 |
+
_(aten, _cufft_set_plan_cache_max_size) \
|
90 |
+
_(aten, _cummax_helper) \
|
91 |
+
_(aten, _cummin_helper) \
|
92 |
+
_(aten, _debug_has_internal_overlap) \
|
93 |
+
_(aten, _dimI) \
|
94 |
+
_(aten, _dimV) \
|
95 |
+
_(aten, _dim_arange) \
|
96 |
+
_(aten, _dirichlet_grad) \
|
97 |
+
_(aten, _efficient_attention_backward) \
|
98 |
+
_(aten, _efficient_attention_forward) \
|
99 |
+
_(aten, _efficientzerotensor) \
|
100 |
+
_(aten, _embedding_bag) \
|
101 |
+
_(aten, _embedding_bag_backward) \
|
102 |
+
_(aten, _embedding_bag_dense_backward) \
|
103 |
+
_(aten, _embedding_bag_forward_only) \
|
104 |
+
_(aten, _embedding_bag_per_sample_weights_backward) \
|
105 |
+
_(aten, _embedding_bag_sparse_backward) \
|
106 |
+
_(aten, _empty_affine_quantized) \
|
107 |
+
_(aten, _empty_per_channel_affine_quantized) \
|
108 |
+
_(aten, _euclidean_dist) \
|
109 |
+
_(aten, _fake_quantize_learnable_per_channel_affine) \
|
110 |
+
_(aten, _fake_quantize_learnable_per_channel_affine_backward) \
|
111 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine) \
|
112 |
+
_(aten, _fake_quantize_learnable_per_tensor_affine_backward) \
|
113 |
+
_(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \
|
114 |
+
_(aten, _fft_c2c) \
|
115 |
+
_(aten, _fft_c2r) \
|
116 |
+
_(aten, _fft_r2c) \
|
117 |
+
_(aten, _fill_mem_eff_dropout_mask) \
|
118 |
+
_(aten, _fill_mem_eff_dropout_mask_) \
|
119 |
+
_(aten, _flash_attention_backward) \
|
120 |
+
_(aten, _flash_attention_forward) \
|
121 |
+
_(aten, _foobar) \
|
122 |
+
_(aten, _foreach_abs) \
|
123 |
+
_(aten, _foreach_abs_) \
|
124 |
+
_(aten, _foreach_acos) \
|
125 |
+
_(aten, _foreach_acos_) \
|
126 |
+
_(aten, _foreach_add) \
|
127 |
+
_(aten, _foreach_add_) \
|
128 |
+
_(aten, _foreach_addcdiv) \
|
129 |
+
_(aten, _foreach_addcdiv_) \
|
130 |
+
_(aten, _foreach_addcmul) \
|
131 |
+
_(aten, _foreach_addcmul_) \
|
132 |
+
_(aten, _foreach_asin) \
|
133 |
+
_(aten, _foreach_asin_) \
|
134 |
+
_(aten, _foreach_atan) \
|
135 |
+
_(aten, _foreach_atan_) \
|
136 |
+
_(aten, _foreach_ceil) \
|
137 |
+
_(aten, _foreach_ceil_) \
|
138 |
+
_(aten, _foreach_clamp_max) \
|
139 |
+
_(aten, _foreach_clamp_max_) \
|
140 |
+
_(aten, _foreach_clamp_min) \
|
141 |
+
_(aten, _foreach_clamp_min_) \
|
142 |
+
_(aten, _foreach_copy) \
|
143 |
+
_(aten, _foreach_copy_) \
|
144 |
+
_(aten, _foreach_cos) \
|
145 |
+
_(aten, _foreach_cos_) \
|
146 |
+
_(aten, _foreach_cosh) \
|
147 |
+
_(aten, _foreach_cosh_) \
|
148 |
+
_(aten, _foreach_div) \
|
149 |
+
_(aten, _foreach_div_) \
|
150 |
+
_(aten, _foreach_erf) \
|
151 |
+
_(aten, _foreach_erf_) \
|
152 |
+
_(aten, _foreach_erfc) \
|
153 |
+
_(aten, _foreach_erfc_) \
|
154 |
+
_(aten, _foreach_exp) \
|
155 |
+
_(aten, _foreach_exp_) \
|
156 |
+
_(aten, _foreach_expm1) \
|
157 |
+
_(aten, _foreach_expm1_) \
|
158 |
+
_(aten, _foreach_floor) \
|
159 |
+
_(aten, _foreach_floor_) \
|
160 |
+
_(aten, _foreach_frac) \
|
161 |
+
_(aten, _foreach_frac_) \
|
162 |
+
_(aten, _foreach_lerp) \
|
163 |
+
_(aten, _foreach_lerp_) \
|
164 |
+
_(aten, _foreach_lgamma) \
|
165 |
+
_(aten, _foreach_lgamma_) \
|
166 |
+
_(aten, _foreach_log) \
|
167 |
+
_(aten, _foreach_log10) \
|
168 |
+
_(aten, _foreach_log10_) \
|
169 |
+
_(aten, _foreach_log1p) \
|
170 |
+
_(aten, _foreach_log1p_) \
|
171 |
+
_(aten, _foreach_log2) \
|
172 |
+
_(aten, _foreach_log2_) \
|
173 |
+
_(aten, _foreach_log_) \
|
174 |
+
_(aten, _foreach_maximum) \
|
175 |
+
_(aten, _foreach_maximum_) \
|
176 |
+
_(aten, _foreach_minimum) \
|
177 |
+
_(aten, _foreach_minimum_) \
|
178 |
+
_(aten, _foreach_mul) \
|
179 |
+
_(aten, _foreach_mul_) \
|
180 |
+
_(aten, _foreach_neg) \
|
181 |
+
_(aten, _foreach_neg_) \
|
182 |
+
_(aten, _foreach_norm) \
|
183 |
+
_(aten, _foreach_pow) \
|
184 |
+
_(aten, _foreach_pow_) \
|
185 |
+
_(aten, _foreach_reciprocal) \
|
186 |
+
_(aten, _foreach_reciprocal_) \
|
187 |
+
_(aten, _foreach_round) \
|
188 |
+
_(aten, _foreach_round_) \
|
189 |
+
_(aten, _foreach_sigmoid) \
|
190 |
+
_(aten, _foreach_sigmoid_) \
|
191 |
+
_(aten, _foreach_sign) \
|
192 |
+
_(aten, _foreach_sign_) \
|
193 |
+
_(aten, _foreach_sin) \
|
194 |
+
_(aten, _foreach_sin_) \
|
195 |
+
_(aten, _foreach_sinh) \
|
196 |
+
_(aten, _foreach_sinh_) \
|
197 |
+
_(aten, _foreach_sqrt) \
|
198 |
+
_(aten, _foreach_sqrt_) \
|
199 |
+
_(aten, _foreach_sub) \
|
200 |
+
_(aten, _foreach_sub_) \
|
201 |
+
_(aten, _foreach_tan) \
|
202 |
+
_(aten, _foreach_tan_) \
|
203 |
+
_(aten, _foreach_tanh) \
|
204 |
+
_(aten, _foreach_tanh_) \
|
205 |
+
_(aten, _foreach_trunc) \
|
206 |
+
_(aten, _foreach_trunc_) \
|
207 |
+
_(aten, _foreach_zero) \
|
208 |
+
_(aten, _foreach_zero_) \
|
209 |
+
_(aten, _functional_assert_async) \
|
210 |
+
_(aten, _functional_sym_constrain_range) \
|
211 |
+
_(aten, _functional_sym_constrain_range_for_size) \
|
212 |
+
_(aten, _fused_adam) \
|
213 |
+
_(aten, _fused_adam_) \
|
214 |
+
_(aten, _fused_adamw) \
|
215 |
+
_(aten, _fused_adamw_) \
|
216 |
+
_(aten, _fused_dropout) \
|
217 |
+
_(aten, _fused_moving_avg_obs_fq_helper) \
|
218 |
+
_(aten, _fused_moving_avg_obs_fq_helper_functional) \
|
219 |
+
_(aten, _fused_sdp_choice) \
|
220 |
+
_(aten, _fw_primal) \
|
221 |
+
_(aten, _fw_primal_copy) \
|
222 |
+
_(aten, _gather_sparse_backward) \
|
223 |
+
_(aten, _grid_sampler_2d_cpu_fallback) \
|
224 |
+
_(aten, _grid_sampler_2d_cpu_fallback_backward) \
|
225 |
+
_(aten, _has_compatible_shallow_copy_type) \
|
226 |
+
_(aten, _has_same_storage_numel) \
|
227 |
+
_(aten, _histogramdd_bin_edges) \
|
228 |
+
_(aten, _histogramdd_from_bin_cts) \
|
229 |
+
_(aten, _histogramdd_from_bin_tensors) \
|
230 |
+
_(aten, _index_put_impl) \
|
231 |
+
_(aten, _index_put_impl_) \
|
232 |
+
_(aten, _indices) \
|
233 |
+
_(aten, _indices_copy) \
|
234 |
+
_(aten, _int_mm) \
|
235 |
+
_(aten, _is_all_true) \
|
236 |
+
_(aten, _is_any_true) \
|
237 |
+
_(aten, _is_zerotensor) \
|
238 |
+
_(aten, _linalg_check_errors) \
|
239 |
+
_(aten, _linalg_det) \
|
240 |
+
_(aten, _linalg_eigh) \
|
241 |
+
_(aten, _linalg_slogdet) \
|
242 |
+
_(aten, _linalg_solve_ex) \
|
243 |
+
_(aten, _linalg_svd) \
|
244 |
+
_(aten, _local_scalar_dense) \
|
245 |
+
_(aten, _log_softmax) \
|
246 |
+
_(aten, _log_softmax_backward_data) \
|
247 |
+
_(aten, _logcumsumexp) \
|
248 |
+
_(aten, _lstm_mps) \
|
249 |
+
_(aten, _lu_with_info) \
|
250 |
+
_(aten, _make_dep_token) \
|
251 |
+
_(aten, _make_dual) \
|
252 |
+
_(aten, _make_dual_copy) \
|
253 |
+
_(aten, _make_per_channel_quantized_tensor) \
|
254 |
+
_(aten, _make_per_tensor_quantized_tensor) \
|
255 |
+
_(aten, _masked_scale) \
|
256 |
+
_(aten, _masked_softmax) \
|
257 |
+
_(aten, _masked_softmax_backward) \
|
258 |
+
_(aten, _mixed_dtypes_linear) \
|
259 |
+
_(aten, _mkldnn_reshape) \
|
260 |
+
_(aten, _mkldnn_transpose) \
|
261 |
+
_(aten, _mkldnn_transpose_) \
|
262 |
+
_(aten, _mps_convolution) \
|
263 |
+
_(aten, _mps_convolution_transpose) \
|
264 |
+
_(aten, _native_batch_norm_legit) \
|
265 |
+
_(aten, _native_batch_norm_legit_functional) \
|
266 |
+
_(aten, _native_batch_norm_legit_no_training) \
|
267 |
+
_(aten, _native_multi_head_attention) \
|
268 |
+
_(aten, _neg_view) \
|
269 |
+
_(aten, _neg_view_copy) \
|
270 |
+
_(aten, _nested_from_padded) \
|
271 |
+
_(aten, _nested_from_padded_and_nested_example) \
|
272 |
+
_(aten, _nested_select_backward) \
|
273 |
+
_(aten, _nested_sum_backward) \
|
274 |
+
_(aten, _nested_tensor_from_mask) \
|
275 |
+
_(aten, _nested_tensor_from_mask_left_aligned) \
|
276 |
+
_(aten, _nested_tensor_from_tensor_list) \
|
277 |
+
_(aten, _nested_tensor_size) \
|
278 |
+
_(aten, _nested_tensor_softmax_with_shape) \
|
279 |
+
_(aten, _nested_tensor_storage_offsets) \
|
280 |
+
_(aten, _nested_tensor_strides) \
|
281 |
+
_(aten, _nested_view_from_buffer) \
|
282 |
+
_(aten, _nested_view_from_buffer_copy) \
|
283 |
+
_(aten, _new_zeros_with_same_feature_meta) \
|
284 |
+
_(aten, _nnpack_available) \
|
285 |
+
_(aten, _nnpack_spatial_convolution) \
|
286 |
+
_(aten, _nnz) \
|
287 |
+
_(aten, _pack_padded_sequence) \
|
288 |
+
_(aten, _pack_padded_sequence_backward) \
|
289 |
+
_(aten, _pad_circular) \
|
290 |
+
_(aten, _pad_enum) \
|
291 |
+
_(aten, _pad_packed_sequence) \
|
292 |
+
_(aten, _pdist_backward) \
|
293 |
+
_(aten, _pdist_forward) \
|
294 |
+
_(aten, _pin_memory) \
|
295 |
+
_(aten, _prelu_kernel) \
|
296 |
+
_(aten, _prelu_kernel_backward) \
|
297 |
+
_(aten, _propagate_xla_data) \
|
298 |
+
_(aten, _remove_batch_dim) \
|
299 |
+
_(aten, _reshape_alias) \
|
300 |
+
_(aten, _reshape_alias_copy) \
|
301 |
+
_(aten, _reshape_copy) \
|
302 |
+
_(aten, _reshape_from_tensor) \
|
303 |
+
_(aten, _resize_output) \
|
304 |
+
_(aten, _resize_output_) \
|
305 |
+
_(aten, _rowwise_prune) \
|
306 |
+
_(aten, _sample_dirichlet) \
|
307 |
+
_(aten, _saturate_weight_to_fp16) \
|
308 |
+
_(aten, _scaled_dot_product_attention_math) \
|
309 |
+
_(aten, _scaled_dot_product_efficient_attention) \
|
310 |
+
_(aten, _scaled_dot_product_efficient_attention_backward) \
|
311 |
+
_(aten, _scaled_dot_product_flash_attention) \
|
312 |
+
_(aten, _scaled_dot_product_flash_attention_backward) \
|
313 |
+
_(aten, _scaled_mm) \
|
314 |
+
_(aten, _segment_reduce_backward) \
|
315 |
+
_(aten, _shape_as_tensor) \
|
316 |
+
_(aten, _slow_conv2d_backward) \
|
317 |
+
_(aten, _slow_conv2d_forward) \
|
318 |
+
_(aten, _sobol_engine_draw) \
|
319 |
+
_(aten, _sobol_engine_ff) \
|
320 |
+
_(aten, _sobol_engine_ff_) \
|
321 |
+
_(aten, _sobol_engine_initialize_state) \
|
322 |
+
_(aten, _sobol_engine_initialize_state_) \
|
323 |
+
_(aten, _sobol_engine_scramble) \
|
324 |
+
_(aten, _sobol_engine_scramble_) \
|
325 |
+
_(aten, _softmax) \
|
326 |
+
_(aten, _softmax_backward_data) \
|
327 |
+
_(aten, _sparse_addmm) \
|
328 |
+
_(aten, _sparse_broadcast_to) \
|
329 |
+
_(aten, _sparse_broadcast_to_copy) \
|
330 |
+
_(aten, _sparse_bsc_tensor_unsafe) \
|
331 |
+
_(aten, _sparse_bsr_tensor_unsafe) \
|
332 |
+
_(aten, _sparse_compressed_tensor_unsafe) \
|
333 |
+
_(aten, _sparse_coo_tensor_unsafe) \
|
334 |
+
_(aten, _sparse_coo_tensor_with_dims) \
|
335 |
+
_(aten, _sparse_coo_tensor_with_dims_and_tensors) \
|
336 |
+
_(aten, _sparse_csc_tensor_unsafe) \
|
337 |
+
_(aten, _sparse_csr_prod) \
|
338 |
+
_(aten, _sparse_csr_sum) \
|
339 |
+
_(aten, _sparse_csr_tensor_unsafe) \
|
340 |
+
_(aten, _sparse_log_softmax) \
|
341 |
+
_(aten, _sparse_log_softmax_backward_data) \
|
342 |
+
_(aten, _sparse_mask_projection) \
|
343 |
+
_(aten, _sparse_mm) \
|
344 |
+
_(aten, _sparse_mm_reduce_impl) \
|
345 |
+
_(aten, _sparse_mm_reduce_impl_backward) \
|
346 |
+
_(aten, _sparse_semi_structured_linear) \
|
347 |
+
_(aten, _sparse_softmax) \
|
348 |
+
_(aten, _sparse_softmax_backward_data) \
|
349 |
+
_(aten, _sparse_sparse_matmul) \
|
350 |
+
_(aten, _sparse_sum) \
|
351 |
+
_(aten, _sparse_sum_backward) \
|
352 |
+
_(aten, _spdiags) \
|
353 |
+
_(aten, _stack) \
|
354 |
+
_(aten, _standard_gamma) \
|
355 |
+
_(aten, _standard_gamma_grad) \
|
356 |
+
_(aten, _test_ambiguous_defaults) \
|
357 |
+
_(aten, _test_autograd_multiple_dispatch) \
|
358 |
+
_(aten, _test_autograd_multiple_dispatch_view) \
|
359 |
+
_(aten, _test_autograd_multiple_dispatch_view_copy) \
|
360 |
+
_(aten, _test_check_tensor) \
|
361 |
+
_(aten, _test_functorch_fallback) \
|
362 |
+
_(aten, _test_optional_filled_intlist) \
|
363 |
+
_(aten, _test_optional_floatlist) \
|
364 |
+
_(aten, _test_optional_intlist) \
|
365 |
+
_(aten, _test_serialization_subcmul) \
|
366 |
+
_(aten, _test_string_default) \
|
367 |
+
_(aten, _test_warn_in_autograd) \
|
368 |
+
_(aten, _thnn_differentiable_gru_cell_backward) \
|
369 |
+
_(aten, _thnn_differentiable_lstm_cell_backward) \
|
370 |
+
_(aten, _thnn_fused_gru_cell) \
|
371 |
+
_(aten, _thnn_fused_gru_cell_backward) \
|
372 |
+
_(aten, _thnn_fused_lstm_cell) \
|
373 |
+
_(aten, _thnn_fused_lstm_cell_backward) \
|
374 |
+
_(aten, _thnn_fused_lstm_cell_backward_impl) \
|
375 |
+
_(aten, _to_copy) \
|
376 |
+
_(aten, _to_cpu) \
|
377 |
+
_(aten, _to_dense) \
|
378 |
+
_(aten, _to_sparse) \
|
379 |
+
_(aten, _to_sparse_bsc) \
|
380 |
+
_(aten, _to_sparse_bsr) \
|
381 |
+
_(aten, _to_sparse_csc) \
|
382 |
+
_(aten, _to_sparse_csr) \
|
383 |
+
_(aten, _to_sparse_semi_structured) \
|
384 |
+
_(aten, _transform_bias_rescale_qkv) \
|
385 |
+
_(aten, _transformer_encoder_layer_fwd) \
|
386 |
+
_(aten, _trilinear) \
|
387 |
+
_(aten, _triton_multi_head_attention) \
|
388 |
+
_(aten, _triton_scaled_dot_attention) \
|
389 |
+
_(aten, _unique) \
|
390 |
+
_(aten, _unique2) \
|
391 |
+
_(aten, _unpack_dual) \
|
392 |
+
_(aten, _unsafe_index) \
|
393 |
+
_(aten, _unsafe_index_put) \
|
394 |
+
_(aten, _unsafe_view) \
|
395 |
+
_(aten, _upsample_bicubic2d_aa) \
|
396 |
+
_(aten, _upsample_bicubic2d_aa_backward) \
|
397 |
+
_(aten, _upsample_bilinear2d_aa) \
|
398 |
+
_(aten, _upsample_bilinear2d_aa_backward) \
|
399 |
+
_(aten, _upsample_nearest_exact1d) \
|
400 |
+
_(aten, _upsample_nearest_exact1d_backward) \
|
401 |
+
_(aten, _upsample_nearest_exact2d) \
|
402 |
+
_(aten, _upsample_nearest_exact2d_backward) \
|
403 |
+
_(aten, _upsample_nearest_exact3d) \
|
404 |
+
_(aten, _upsample_nearest_exact3d_backward) \
|
405 |
+
_(aten, _use_cudnn_ctc_loss) \
|
406 |
+
_(aten, _use_cudnn_rnn_flatten_weight) \
|
407 |
+
_(aten, _validate_compressed_sparse_indices) \
|
408 |
+
_(aten, _validate_sparse_bsc_tensor_args) \
|
409 |
+
_(aten, _validate_sparse_bsr_tensor_args) \
|
410 |
+
_(aten, _validate_sparse_compressed_tensor_args) \
|
411 |
+
_(aten, _validate_sparse_coo_tensor_args) \
|
412 |
+
_(aten, _validate_sparse_csc_tensor_args) \
|
413 |
+
_(aten, _validate_sparse_csr_tensor_args) \
|
414 |
+
_(aten, _values) \
|
415 |
+
_(aten, _values_copy) \
|
416 |
+
_(aten, _version) \
|
417 |
+
_(aten, _weight_int4pack_mm) \
|
418 |
+
_(aten, _weight_norm) \
|
419 |
+
_(aten, _weight_norm_differentiable_backward) \
|
420 |
+
_(aten, _weight_norm_interface) \
|
421 |
+
_(aten, _weight_norm_interface_backward) \
|
422 |
+
_(aten, abs) \
|
423 |
+
_(aten, abs_) \
|
424 |
+
_(aten, absolute) \
|
425 |
+
_(aten, absolute_) \
|
426 |
+
_(aten, acos) \
|
427 |
+
_(aten, acos_) \
|
428 |
+
_(aten, acosh) \
|
429 |
+
_(aten, acosh_) \
|
430 |
+
_(aten, adaptive_avg_pool1d) \
|
431 |
+
_(aten, adaptive_avg_pool2d) \
|
432 |
+
_(aten, adaptive_avg_pool3d) \
|
433 |
+
_(aten, adaptive_avg_pool3d_backward) \
|
434 |
+
_(aten, adaptive_max_pool1d) \
|
435 |
+
_(aten, adaptive_max_pool2d) \
|
436 |
+
_(aten, adaptive_max_pool2d_backward) \
|
437 |
+
_(aten, adaptive_max_pool3d) \
|
438 |
+
_(aten, adaptive_max_pool3d_backward) \
|
439 |
+
_(aten, add) \
|
440 |
+
_(aten, add_) \
|
441 |
+
_(aten, addbmm) \
|
442 |
+
_(aten, addbmm_) \
|
443 |
+
_(aten, addcdiv) \
|
444 |
+
_(aten, addcdiv_) \
|
445 |
+
_(aten, addcmul) \
|
446 |
+
_(aten, addcmul_) \
|
447 |
+
_(aten, addmm) \
|
448 |
+
_(aten, addmm_) \
|
449 |
+
_(aten, addmv) \
|
450 |
+
_(aten, addmv_) \
|
451 |
+
_(aten, addr) \
|
452 |
+
_(aten, addr_) \
|
453 |
+
_(aten, adjoint) \
|
454 |
+
_(aten, affine_grid_generator) \
|
455 |
+
_(aten, affine_grid_generator_backward) \
|
456 |
+
_(aten, alias) \
|
457 |
+
_(aten, alias_copy) \
|
458 |
+
_(aten, align_as) \
|
459 |
+
_(aten, align_tensors) \
|
460 |
+
_(aten, align_to) \
|
461 |
+
_(aten, all) \
|
462 |
+
_(aten, allclose) \
|
463 |
+
_(aten, alpha_dropout) \
|
464 |
+
_(aten, alpha_dropout_) \
|
465 |
+
_(aten, amax) \
|
466 |
+
_(aten, amin) \
|
467 |
+
_(aten, aminmax) \
|
468 |
+
_(aten, angle) \
|
469 |
+
_(aten, any) \
|
470 |
+
_(aten, arange) \
|
471 |
+
_(aten, arccos) \
|
472 |
+
_(aten, arccos_) \
|
473 |
+
_(aten, arccosh) \
|
474 |
+
_(aten, arccosh_) \
|
475 |
+
_(aten, arcsin) \
|
476 |
+
_(aten, arcsin_) \
|
477 |
+
_(aten, arcsinh) \
|
478 |
+
_(aten, arcsinh_) \
|
479 |
+
_(aten, arctan) \
|
480 |
+
_(aten, arctan2) \
|
481 |
+
_(aten, arctan2_) \
|
482 |
+
_(aten, arctan_) \
|
483 |
+
_(aten, arctanh) \
|
484 |
+
_(aten, arctanh_) \
|
485 |
+
_(aten, argmax) \
|
486 |
+
_(aten, argmin) \
|
487 |
+
_(aten, argsort) \
|
488 |
+
_(aten, argwhere) \
|
489 |
+
_(aten, as_strided) \
|
490 |
+
_(aten, as_strided_) \
|
491 |
+
_(aten, as_strided_copy) \
|
492 |
+
_(aten, as_strided_scatter) \
|
493 |
+
_(aten, asin) \
|
494 |
+
_(aten, asin_) \
|
495 |
+
_(aten, asinh) \
|
496 |
+
_(aten, asinh_) \
|
497 |
+
_(aten, atan) \
|
498 |
+
_(aten, atan2) \
|
499 |
+
_(aten, atan2_) \
|
500 |
+
_(aten, atan_) \
|
501 |
+
_(aten, atanh) \
|
502 |
+
_(aten, atanh_) \
|
503 |
+
_(aten, atleast_1d) \
|
504 |
+
_(aten, atleast_2d) \
|
505 |
+
_(aten, atleast_3d) \
|
506 |
+
_(aten, avg_pool1d) \
|
507 |
+
_(aten, avg_pool2d) \
|
508 |
+
_(aten, avg_pool2d_backward) \
|
509 |
+
_(aten, avg_pool3d) \
|
510 |
+
_(aten, avg_pool3d_backward) \
|
511 |
+
_(aten, baddbmm) \
|
512 |
+
_(aten, baddbmm_) \
|
513 |
+
_(aten, bartlett_window) \
|
514 |
+
_(aten, batch_norm) \
|
515 |
+
_(aten, batch_norm_backward_elemt) \
|
516 |
+
_(aten, batch_norm_backward_reduce) \
|
517 |
+
_(aten, batch_norm_elemt) \
|
518 |
+
_(aten, batch_norm_gather_stats) \
|
519 |
+
_(aten, batch_norm_gather_stats_with_counts) \
|
520 |
+
_(aten, batch_norm_stats) \
|
521 |
+
_(aten, batch_norm_update_stats) \
|
522 |
+
_(aten, bernoulli) \
|
523 |
+
_(aten, bernoulli_) \
|
524 |
+
_(aten, bilinear) \
|
525 |
+
_(aten, binary_cross_entropy) \
|
526 |
+
_(aten, binary_cross_entropy_backward) \
|
527 |
+
_(aten, binary_cross_entropy_with_logits) \
|
528 |
+
_(aten, bincount) \
|
529 |
+
_(aten, binomial) \
|
530 |
+
_(aten, bitwise_and) \
|
531 |
+
_(aten, bitwise_and_) \
|
532 |
+
_(aten, bitwise_left_shift) \
|
533 |
+
_(aten, bitwise_left_shift_) \
|
534 |
+
_(aten, bitwise_not) \
|
535 |
+
_(aten, bitwise_not_) \
|
536 |
+
_(aten, bitwise_or) \
|
537 |
+
_(aten, bitwise_or_) \
|
538 |
+
_(aten, bitwise_right_shift) \
|
539 |
+
_(aten, bitwise_right_shift_) \
|
540 |
+
_(aten, bitwise_xor) \
|
541 |
+
_(aten, bitwise_xor_) \
|
542 |
+
_(aten, blackman_window) \
|
543 |
+
_(aten, block_diag) \
|
544 |
+
_(aten, bmm) \
|
545 |
+
_(aten, broadcast_tensors) \
|
546 |
+
_(aten, broadcast_to) \
|
547 |
+
_(aten, bucketize) \
|
548 |
+
_(aten, can_cast) \
|
549 |
+
_(aten, cartesian_prod) \
|
550 |
+
_(aten, cat) \
|
551 |
+
_(aten, cauchy) \
|
552 |
+
_(aten, cauchy_) \
|
553 |
+
_(aten, ccol_indices) \
|
554 |
+
_(aten, ccol_indices_copy) \
|
555 |
+
_(aten, cdist) \
|
556 |
+
_(aten, ceil) \
|
557 |
+
_(aten, ceil_) \
|
558 |
+
_(aten, celu) \
|
559 |
+
_(aten, celu_) \
|
560 |
+
_(aten, chain_matmul) \
|
561 |
+
_(aten, chalf) \
|
562 |
+
_(aten, channel_shuffle) \
|
563 |
+
_(aten, cholesky) \
|
564 |
+
_(aten, cholesky_inverse) \
|
565 |
+
_(aten, cholesky_solve) \
|
566 |
+
_(aten, choose_qparams_optimized) \
|
567 |
+
_(aten, chunk) \
|
568 |
+
_(aten, clamp) \
|
569 |
+
_(aten, clamp_) \
|
570 |
+
_(aten, clamp_max) \
|
571 |
+
_(aten, clamp_max_) \
|
572 |
+
_(aten, clamp_min) \
|
573 |
+
_(aten, clamp_min_) \
|
574 |
+
_(aten, clip) \
|
575 |
+
_(aten, clip_) \
|
576 |
+
_(aten, clone) \
|
577 |
+
_(aten, coalesce) \
|
578 |
+
_(aten, col2im) \
|
579 |
+
_(aten, col_indices) \
|
580 |
+
_(aten, col_indices_copy) \
|
581 |
+
_(aten, column_stack) \
|
582 |
+
_(aten, combinations) \
|
583 |
+
_(aten, complex) \
|
584 |
+
_(aten, concat) \
|
585 |
+
_(aten, concatenate) \
|
586 |
+
_(aten, conj) \
|
587 |
+
_(aten, conj_physical) \
|
588 |
+
_(aten, conj_physical_) \
|
589 |
+
_(aten, constant_pad_nd) \
|
590 |
+
_(aten, contiguous) \
|
591 |
+
_(aten, conv1d) \
|
592 |
+
_(aten, conv2d) \
|
593 |
+
_(aten, conv3d) \
|
594 |
+
_(aten, conv_depthwise3d) \
|
595 |
+
_(aten, conv_tbc) \
|
596 |
+
_(aten, conv_tbc_backward) \
|
597 |
+
_(aten, conv_transpose1d) \
|
598 |
+
_(aten, conv_transpose2d) \
|
599 |
+
_(aten, conv_transpose3d) \
|
600 |
+
_(aten, convolution) \
|
601 |
+
_(aten, convolution_backward) \
|
602 |
+
_(aten, convolution_backward_overrideable) \
|
603 |
+
_(aten, convolution_overrideable) \
|
604 |
+
_(aten, copy) \
|
605 |
+
_(aten, copy_) \
|
606 |
+
_(aten, copy_sparse_to_sparse) \
|
607 |
+
_(aten, copy_sparse_to_sparse_) \
|
608 |
+
_(aten, copysign) \
|
609 |
+
_(aten, copysign_) \
|
610 |
+
_(aten, corrcoef) \
|
611 |
+
_(aten, cos) \
|
612 |
+
_(aten, cos_) \
|
613 |
+
_(aten, cosh) \
|
614 |
+
_(aten, cosh_) \
|
615 |
+
_(aten, cosine_embedding_loss) \
|
616 |
+
_(aten, cosine_similarity) \
|
617 |
+
_(aten, count_nonzero) \
|
618 |
+
_(aten, cov) \
|
619 |
+
_(aten, cross) \
|
620 |
+
_(aten, cross_entropy_loss) \
|
621 |
+
_(aten, crow_indices) \
|
622 |
+
_(aten, crow_indices_copy) \
|
623 |
+
_(aten, ctc_loss) \
|
624 |
+
_(aten, cudnn_affine_grid_generator) \
|
625 |
+
_(aten, cudnn_affine_grid_generator_backward) \
|
626 |
+
_(aten, cudnn_batch_norm) \
|
627 |
+
_(aten, cudnn_batch_norm_backward) \
|
628 |
+
_(aten, cudnn_convolution) \
|
629 |
+
_(aten, cudnn_convolution_add_relu) \
|
630 |
+
_(aten, cudnn_convolution_relu) \
|
631 |
+
_(aten, cudnn_convolution_transpose) \
|
632 |
+
_(aten, cudnn_grid_sampler) \
|
633 |
+
_(aten, cudnn_grid_sampler_backward) \
|
634 |
+
_(aten, cudnn_is_acceptable) \
|
635 |
+
_(aten, cummax) \
|
636 |
+
_(aten, cummaxmin_backward) \
|
637 |
+
_(aten, cummin) \
|
638 |
+
_(aten, cumprod) \
|
639 |
+
_(aten, cumprod_) \
|
640 |
+
_(aten, cumprod_backward) \
|
641 |
+
_(aten, cumsum) \
|
642 |
+
_(aten, cumsum_) \
|
643 |
+
_(aten, cumulative_trapezoid) \
|
644 |
+
_(aten, data) \
|
645 |
+
_(aten, deg2rad) \
|
646 |
+
_(aten, deg2rad_) \
|
647 |
+
_(aten, dense_dim) \
|
648 |
+
_(aten, dequantize) \
|
649 |
+
_(aten, det) \
|
650 |
+
_(aten, detach) \
|
651 |
+
_(aten, detach_) \
|
652 |
+
_(aten, detach_copy) \
|
653 |
+
_(aten, diag) \
|
654 |
+
_(aten, diag_embed) \
|
655 |
+
_(aten, diagflat) \
|
656 |
+
_(aten, diagonal) \
|
657 |
+
_(aten, diagonal_backward) \
|
658 |
+
_(aten, diagonal_copy) \
|
659 |
+
_(aten, diagonal_scatter) \
|
660 |
+
_(aten, diff) \
|
661 |
+
_(aten, digamma) \
|
662 |
+
_(aten, digamma_) \
|
663 |
+
_(aten, dist) \
|
664 |
+
_(aten, div) \
|
665 |
+
_(aten, div_) \
|
666 |
+
_(aten, divide) \
|
667 |
+
_(aten, divide_) \
|
668 |
+
_(aten, dot) \
|
669 |
+
_(aten, dropout) \
|
670 |
+
_(aten, dropout_) \
|
671 |
+
_(aten, dsplit) \
|
672 |
+
_(aten, dstack) \
|
673 |
+
_(aten, einsum) \
|
674 |
+
_(aten, elu) \
|
675 |
+
_(aten, elu_) \
|
676 |
+
_(aten, elu_backward) \
|
677 |
+
_(aten, embedding) \
|
678 |
+
_(aten, embedding_backward) \
|
679 |
+
_(aten, embedding_bag) \
|
680 |
+
_(aten, embedding_dense_backward) \
|
681 |
+
_(aten, embedding_renorm) \
|
682 |
+
_(aten, embedding_renorm_) \
|
683 |
+
_(aten, embedding_sparse_backward) \
|
684 |
+
_(aten, empty) \
|
685 |
+
_(aten, empty_like) \
|
686 |
+
_(aten, empty_permuted) \
|
687 |
+
_(aten, empty_quantized) \
|
688 |
+
_(aten, empty_strided) \
|
689 |
+
_(aten, eq) \
|
690 |
+
_(aten, eq_) \
|
691 |
+
_(aten, equal) \
|
692 |
+
_(aten, erf) \
|
693 |
+
_(aten, erf_) \
|
694 |
+
_(aten, erfc) \
|
695 |
+
_(aten, erfc_) \
|
696 |
+
_(aten, erfinv) \
|
697 |
+
_(aten, erfinv_) \
|
698 |
+
_(aten, exp) \
|
699 |
+
_(aten, exp2) \
|
700 |
+
_(aten, exp2_) \
|
701 |
+
_(aten, exp_) \
|
702 |
+
_(aten, expand) \
|
703 |
+
_(aten, expand_as) \
|
704 |
+
_(aten, expand_copy) \
|
705 |
+
_(aten, expm1) \
|
706 |
+
_(aten, expm1_) \
|
707 |
+
_(aten, exponential) \
|
708 |
+
_(aten, exponential_) \
|
709 |
+
_(aten, eye) \
|
710 |
+
_(aten, fake_quantize_per_channel_affine) \
|
711 |
+
_(aten, fake_quantize_per_channel_affine_cachemask) \
|
712 |
+
_(aten, fake_quantize_per_channel_affine_cachemask_backward) \
|
713 |
+
_(aten, fake_quantize_per_tensor_affine) \
|
714 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask) \
|
715 |
+
_(aten, fake_quantize_per_tensor_affine_cachemask_backward) \
|
716 |
+
_(aten, fbgemm_linear_fp16_weight) \
|
717 |
+
_(aten, fbgemm_linear_fp16_weight_fp32_activation) \
|
718 |
+
_(aten, fbgemm_linear_int8_weight) \
|
719 |
+
_(aten, fbgemm_linear_int8_weight_fp32_activation) \
|
720 |
+
_(aten, fbgemm_linear_quantize_weight) \
|
721 |
+
_(aten, fbgemm_pack_gemm_matrix_fp16) \
|
722 |
+
_(aten, fbgemm_pack_quantized_matrix) \
|
723 |
+
_(aten, feature_alpha_dropout) \
|
724 |
+
_(aten, feature_alpha_dropout_) \
|
725 |
+
_(aten, feature_dropout) \
|
726 |
+
_(aten, feature_dropout_) \
|
727 |
+
_(aten, fft_fft) \
|
728 |
+
_(aten, fft_fft2) \
|
729 |
+
_(aten, fft_fftfreq) \
|
730 |
+
_(aten, fft_fftn) \
|
731 |
+
_(aten, fft_fftshift) \
|
732 |
+
_(aten, fft_hfft) \
|
733 |
+
_(aten, fft_hfft2) \
|
734 |
+
_(aten, fft_hfftn) \
|
735 |
+
_(aten, fft_ifft) \
|
736 |
+
_(aten, fft_ifft2) \
|
737 |
+
_(aten, fft_ifftn) \
|
738 |
+
_(aten, fft_ifftshift) \
|
739 |
+
_(aten, fft_ihfft) \
|
740 |
+
_(aten, fft_ihfft2) \
|
741 |
+
_(aten, fft_ihfftn) \
|
742 |
+
_(aten, fft_irfft) \
|
743 |
+
_(aten, fft_irfft2) \
|
744 |
+
_(aten, fft_irfftn) \
|
745 |
+
_(aten, fft_rfft) \
|
746 |
+
_(aten, fft_rfft2) \
|
747 |
+
_(aten, fft_rfftfreq) \
|
748 |
+
_(aten, fft_rfftn) \
|
749 |
+
_(aten, fill) \
|
750 |
+
_(aten, fill_) \
|
751 |
+
_(aten, fill_diagonal) \
|
752 |
+
_(aten, fill_diagonal_) \
|
753 |
+
_(aten, fix) \
|
754 |
+
_(aten, fix_) \
|
755 |
+
_(aten, flatten) \
|
756 |
+
_(aten, flatten_dense_tensors) \
|
757 |
+
_(aten, flip) \
|
758 |
+
_(aten, fliplr) \
|
759 |
+
_(aten, flipud) \
|
760 |
+
_(aten, float_power) \
|
761 |
+
_(aten, float_power_) \
|
762 |
+
_(aten, floor) \
|
763 |
+
_(aten, floor_) \
|
764 |
+
_(aten, floor_divide) \
|
765 |
+
_(aten, floor_divide_) \
|
766 |
+
_(aten, fmax) \
|
767 |
+
_(aten, fmin) \
|
768 |
+
_(aten, fmod) \
|
769 |
+
_(aten, fmod_) \
|
770 |
+
_(aten, frac) \
|
771 |
+
_(aten, frac_) \
|
772 |
+
_(aten, fractional_max_pool2d) \
|
773 |
+
_(aten, fractional_max_pool2d_backward) \
|
774 |
+
_(aten, fractional_max_pool3d) \
|
775 |
+
_(aten, fractional_max_pool3d_backward) \
|
776 |
+
_(aten, frexp) \
|
777 |
+
_(aten, frobenius_norm) \
|
778 |
+
_(aten, from_file) \
|
779 |
+
_(aten, full) \
|
780 |
+
_(aten, full_like) \
|
781 |
+
_(aten, fused_moving_avg_obs_fake_quant) \
|
782 |
+
_(aten, gather) \
|
783 |
+
_(aten, gather_backward) \
|
784 |
+
_(aten, gcd) \
|
785 |
+
_(aten, gcd_) \
|
786 |
+
_(aten, ge) \
|
787 |
+
_(aten, ge_) \
|
788 |
+
_(aten, gelu) \
|
789 |
+
_(aten, gelu_) \
|
790 |
+
_(aten, gelu_backward) \
|
791 |
+
_(aten, geometric) \
|
792 |
+
_(aten, geometric_) \
|
793 |
+
_(aten, geqrf) \
|
794 |
+
_(aten, ger) \
|
795 |
+
_(aten, glu) \
|
796 |
+
_(aten, glu_backward) \
|
797 |
+
_(aten, glu_backward_jvp) \
|
798 |
+
_(aten, glu_jvp) \
|
799 |
+
_(aten, gradient) \
|
800 |
+
_(aten, greater) \
|
801 |
+
_(aten, greater_) \
|
802 |
+
_(aten, greater_equal) \
|
803 |
+
_(aten, greater_equal_) \
|
804 |
+
_(aten, grid_sampler) \
|
805 |
+
_(aten, grid_sampler_2d) \
|
806 |
+
_(aten, grid_sampler_2d_backward) \
|
807 |
+
_(aten, grid_sampler_3d) \
|
808 |
+
_(aten, grid_sampler_3d_backward) \
|
809 |
+
_(aten, group_norm) \
|
810 |
+
_(aten, gru) \
|
811 |
+
_(aten, gru_cell) \
|
812 |
+
_(aten, gt) \
|
813 |
+
_(aten, gt_) \
|
814 |
+
_(aten, hamming_window) \
|
815 |
+
_(aten, hann_window) \
|
816 |
+
_(aten, hardshrink) \
|
817 |
+
_(aten, hardshrink_backward) \
|
818 |
+
_(aten, hardsigmoid) \
|
819 |
+
_(aten, hardsigmoid_) \
|
820 |
+
_(aten, hardsigmoid_backward) \
|
821 |
+
_(aten, hardswish) \
|
822 |
+
_(aten, hardswish_) \
|
823 |
+
_(aten, hardswish_backward) \
|
824 |
+
_(aten, hardtanh) \
|
825 |
+
_(aten, hardtanh_) \
|
826 |
+
_(aten, hardtanh_backward) \
|
827 |
+
_(aten, heaviside) \
|
828 |
+
_(aten, heaviside_) \
|
829 |
+
_(aten, hinge_embedding_loss) \
|
830 |
+
_(aten, histc) \
|
831 |
+
_(aten, histogram) \
|
832 |
+
_(aten, histogramdd) \
|
833 |
+
_(aten, hsplit) \
|
834 |
+
_(aten, hspmm) \
|
835 |
+
_(aten, hstack) \
|
836 |
+
_(aten, huber_loss) \
|
837 |
+
_(aten, huber_loss_backward) \
|
838 |
+
_(aten, hypot) \
|
839 |
+
_(aten, hypot_) \
|
840 |
+
_(aten, i0) \
|
841 |
+
_(aten, i0_) \
|
842 |
+
_(aten, igamma) \
|
843 |
+
_(aten, igamma_) \
|
844 |
+
_(aten, igammac) \
|
845 |
+
_(aten, igammac_) \
|
846 |
+
_(aten, im2col) \
|
847 |
+
_(aten, imag) \
|
848 |
+
_(aten, index) \
|
849 |
+
_(aten, index_add) \
|
850 |
+
_(aten, index_add_) \
|
851 |
+
_(aten, index_copy) \
|
852 |
+
_(aten, index_copy_) \
|
853 |
+
_(aten, index_fill) \
|
854 |
+
_(aten, index_fill_) \
|
855 |
+
_(aten, index_put) \
|
856 |
+
_(aten, index_put_) \
|
857 |
+
_(aten, index_reduce) \
|
858 |
+
_(aten, index_reduce_) \
|
859 |
+
_(aten, index_select) \
|
860 |
+
_(aten, index_select_backward) \
|
861 |
+
_(aten, indices) \
|
862 |
+
_(aten, indices_copy) \
|
863 |
+
_(aten, infinitely_differentiable_gelu_backward) \
|
864 |
+
_(aten, inner) \
|
865 |
+
_(aten, instance_norm) \
|
866 |
+
_(aten, int_repr) \
|
867 |
+
_(aten, inverse) \
|
868 |
+
_(aten, is_coalesced) \
|
869 |
+
_(aten, is_complex) \
|
870 |
+
_(aten, is_conj) \
|
871 |
+
_(aten, is_distributed) \
|
872 |
+
_(aten, is_floating_point) \
|
873 |
+
_(aten, is_inference) \
|
874 |
+
_(aten, is_leaf) \
|
875 |
+
_(aten, is_neg) \
|
876 |
+
_(aten, is_nonzero) \
|
877 |
+
_(aten, is_pinned) \
|
878 |
+
_(aten, is_same_size) \
|
879 |
+
_(aten, is_set_to) \
|
880 |
+
_(aten, is_signed) \
|
881 |
+
_(aten, is_vulkan_available) \
|
882 |
+
_(aten, isclose) \
|
883 |
+
_(aten, isfinite) \
|
884 |
+
_(aten, isin) \
|
885 |
+
_(aten, isinf) \
|
886 |
+
_(aten, isnan) \
|
887 |
+
_(aten, isneginf) \
|
888 |
+
_(aten, isposinf) \
|
889 |
+
_(aten, isreal) \
|
890 |
+
_(aten, istft) \
|
891 |
+
_(aten, item) \
|
892 |
+
_(aten, kaiser_window) \
|
893 |
+
_(aten, kl_div) \
|
894 |
+
_(aten, kron) \
|
895 |
+
_(aten, kthvalue) \
|
896 |
+
_(aten, l1_loss) \
|
897 |
+
_(aten, layer_norm) \
|
898 |
+
_(aten, lcm) \
|
899 |
+
_(aten, lcm_) \
|
900 |
+
_(aten, ldexp) \
|
901 |
+
_(aten, ldexp_) \
|
902 |
+
_(aten, le) \
|
903 |
+
_(aten, le_) \
|
904 |
+
_(aten, leaky_relu) \
|
905 |
+
_(aten, leaky_relu_) \
|
906 |
+
_(aten, leaky_relu_backward) \
|
907 |
+
_(aten, lerp) \
|
908 |
+
_(aten, lerp_) \
|
909 |
+
_(aten, less) \
|
910 |
+
_(aten, less_) \
|
911 |
+
_(aten, less_equal) \
|
912 |
+
_(aten, less_equal_) \
|
913 |
+
_(aten, lgamma) \
|
914 |
+
_(aten, lgamma_) \
|
915 |
+
_(aten, lift) \
|
916 |
+
_(aten, lift_fresh) \
|
917 |
+
_(aten, lift_fresh_copy) \
|
918 |
+
_(aten, linalg_cholesky) \
|
919 |
+
_(aten, linalg_cholesky_ex) \
|
920 |
+
_(aten, linalg_cond) \
|
921 |
+
_(aten, linalg_cross) \
|
922 |
+
_(aten, linalg_det) \
|
923 |
+
_(aten, linalg_diagonal) \
|
924 |
+
_(aten, linalg_eig) \
|
925 |
+
_(aten, linalg_eigh) \
|
926 |
+
_(aten, linalg_eigvals) \
|
927 |
+
_(aten, linalg_eigvalsh) \
|
928 |
+
_(aten, linalg_householder_product) \
|
929 |
+
_(aten, linalg_inv) \
|
930 |
+
_(aten, linalg_inv_ex) \
|
931 |
+
_(aten, linalg_ldl_factor) \
|
932 |
+
_(aten, linalg_ldl_factor_ex) \
|
933 |
+
_(aten, linalg_ldl_solve) \
|
934 |
+
_(aten, linalg_lstsq) \
|
935 |
+
_(aten, linalg_lu) \
|
936 |
+
_(aten, linalg_lu_factor) \
|
937 |
+
_(aten, linalg_lu_factor_ex) \
|
938 |
+
_(aten, linalg_lu_solve) \
|
939 |
+
_(aten, linalg_matmul) \
|
940 |
+
_(aten, linalg_matrix_exp) \
|
941 |
+
_(aten, linalg_matrix_norm) \
|
942 |
+
_(aten, linalg_matrix_power) \
|
943 |
+
_(aten, linalg_matrix_rank) \
|
944 |
+
_(aten, linalg_multi_dot) \
|
945 |
+
_(aten, linalg_norm) \
|
946 |
+
_(aten, linalg_pinv) \
|
947 |
+
_(aten, linalg_qr) \
|
948 |
+
_(aten, linalg_slogdet) \
|
949 |
+
_(aten, linalg_solve) \
|
950 |
+
_(aten, linalg_solve_ex) \
|
951 |
+
_(aten, linalg_solve_triangular) \
|
952 |
+
_(aten, linalg_svd) \
|
953 |
+
_(aten, linalg_svdvals) \
|
954 |
+
_(aten, linalg_tensorinv) \
|
955 |
+
_(aten, linalg_tensorsolve) \
|
956 |
+
_(aten, linalg_vander) \
|
957 |
+
_(aten, linalg_vecdot) \
|
958 |
+
_(aten, linalg_vector_norm) \
|
959 |
+
_(aten, linear) \
|
960 |
+
_(aten, linear_backward) \
|
961 |
+
_(aten, linspace) \
|
962 |
+
_(aten, log) \
|
963 |
+
_(aten, log10) \
|
964 |
+
_(aten, log10_) \
|
965 |
+
_(aten, log1p) \
|
966 |
+
_(aten, log1p_) \
|
967 |
+
_(aten, log2) \
|
968 |
+
_(aten, log2_) \
|
969 |
+
_(aten, log_) \
|
970 |
+
_(aten, log_normal) \
|
971 |
+
_(aten, log_normal_) \
|
972 |
+
_(aten, log_sigmoid) \
|
973 |
+
_(aten, log_sigmoid_backward) \
|
974 |
+
_(aten, log_sigmoid_forward) \
|
975 |
+
_(aten, log_softmax) \
|
976 |
+
_(aten, logaddexp) \
|
977 |
+
_(aten, logaddexp2) \
|
978 |
+
_(aten, logcumsumexp) \
|
979 |
+
_(aten, logdet) \
|
980 |
+
_(aten, logical_and) \
|
981 |
+
_(aten, logical_and_) \
|
982 |
+
_(aten, logical_not) \
|
983 |
+
_(aten, logical_not_) \
|
984 |
+
_(aten, logical_or) \
|
985 |
+
_(aten, logical_or_) \
|
986 |
+
_(aten, logical_xor) \
|
987 |
+
_(aten, logical_xor_) \
|
988 |
+
_(aten, logit) \
|
989 |
+
_(aten, logit_) \
|
990 |
+
_(aten, logit_backward) \
|
991 |
+
_(aten, logspace) \
|
992 |
+
_(aten, logsumexp) \
|
993 |
+
_(aten, lshift) \
|
994 |
+
_(aten, lstm) \
|
995 |
+
_(aten, lstm_cell) \
|
996 |
+
_(aten, lstm_mps_backward) \
|
997 |
+
_(aten, lt) \
|
998 |
+
_(aten, lt_) \
|
999 |
+
_(aten, lu_solve) \
|
1000 |
+
_(aten, lu_unpack) \
|
1001 |
+
_(aten, mH) \
|
1002 |
+
_(aten, mT) \
|
1003 |
+
_(aten, margin_ranking_loss) \
|
1004 |
+
_(aten, masked_fill) \
|
1005 |
+
_(aten, masked_fill_) \
|
1006 |
+
_(aten, masked_scatter) \
|
1007 |
+
_(aten, masked_scatter_) \
|
1008 |
+
_(aten, masked_scatter_backward) \
|
1009 |
+
_(aten, masked_select) \
|
1010 |
+
_(aten, masked_select_backward) \
|
1011 |
+
_(aten, matmul) \
|
1012 |
+
_(aten, matmul_backward) \
|
1013 |
+
_(aten, matrix_H) \
|
1014 |
+
_(aten, matrix_exp) \
|
1015 |
+
_(aten, matrix_exp_backward) \
|
1016 |
+
_(aten, matrix_power) \
|
1017 |
+
_(aten, max) \
|
1018 |
+
_(aten, max_pool1d) \
|
1019 |
+
_(aten, max_pool1d_with_indices) \
|
1020 |
+
_(aten, max_pool2d) \
|
1021 |
+
_(aten, max_pool2d_backward) \
|
1022 |
+
_(aten, max_pool2d_with_indices) \
|
1023 |
+
_(aten, max_pool2d_with_indices_backward) \
|
1024 |
+
_(aten, max_pool3d) \
|
1025 |
+
_(aten, max_pool3d_with_indices) \
|
1026 |
+
_(aten, max_pool3d_with_indices_backward) \
|
1027 |
+
_(aten, max_unpool2d) \
|
1028 |
+
_(aten, max_unpool3d) \
|
1029 |
+
_(aten, maximum) \
|
1030 |
+
_(aten, mean) \
|
1031 |
+
_(aten, median) \
|
1032 |
+
_(aten, meshgrid) \
|
1033 |
+
_(aten, min) \
|
1034 |
+
_(aten, minimum) \
|
1035 |
+
_(aten, miopen_batch_norm) \
|
1036 |
+
_(aten, miopen_batch_norm_backward) \
|
1037 |
+
_(aten, miopen_convolution) \
|
1038 |
+
_(aten, miopen_convolution_add_relu) \
|
1039 |
+
_(aten, miopen_convolution_relu) \
|
1040 |
+
_(aten, miopen_convolution_transpose) \
|
1041 |
+
_(aten, miopen_depthwise_convolution) \
|
1042 |
+
_(aten, miopen_rnn) \
|
1043 |
+
_(aten, miopen_rnn_backward) \
|
1044 |
+
_(aten, mish) \
|
1045 |
+
_(aten, mish_) \
|
1046 |
+
_(aten, mish_backward) \
|
1047 |
+
_(aten, mkldnn_adaptive_avg_pool2d) \
|
1048 |
+
_(aten, mkldnn_adaptive_avg_pool2d_backward) \
|
1049 |
+
_(aten, mkldnn_convolution) \
|
1050 |
+
_(aten, mkldnn_linear) \
|
1051 |
+
_(aten, mkldnn_linear_backward) \
|
1052 |
+
_(aten, mkldnn_linear_backward_input) \
|
1053 |
+
_(aten, mkldnn_linear_backward_weights) \
|
1054 |
+
_(aten, mkldnn_max_pool2d) \
|
1055 |
+
_(aten, mkldnn_max_pool2d_backward) \
|
1056 |
+
_(aten, mkldnn_max_pool3d) \
|
1057 |
+
_(aten, mkldnn_max_pool3d_backward) \
|
1058 |
+
_(aten, mkldnn_reorder_conv2d_weight) \
|
1059 |
+
_(aten, mkldnn_reorder_conv3d_weight) \
|
1060 |
+
_(aten, mkldnn_rnn_layer) \
|
1061 |
+
_(aten, mkldnn_rnn_layer_backward) \
|
1062 |
+
_(aten, mm) \
|
1063 |
+
_(aten, mode) \
|
1064 |
+
_(aten, moveaxis) \
|
1065 |
+
_(aten, movedim) \
|
1066 |
+
_(aten, mps_convolution_backward) \
|
1067 |
+
_(aten, mps_convolution_transpose_backward) \
|
1068 |
+
_(aten, mse_loss) \
|
1069 |
+
_(aten, mse_loss_backward) \
|
1070 |
+
_(aten, msort) \
|
1071 |
+
_(aten, mul) \
|
1072 |
+
_(aten, mul_) \
|
1073 |
+
_(aten, multi_margin_loss) \
|
1074 |
+
_(aten, multi_margin_loss_backward) \
|
1075 |
+
_(aten, multilabel_margin_loss) \
|
1076 |
+
_(aten, multilabel_margin_loss_backward) \
|
1077 |
+
_(aten, multilabel_margin_loss_forward) \
|
1078 |
+
_(aten, multinomial) \
|
1079 |
+
_(aten, multiply) \
|
1080 |
+
_(aten, multiply_) \
|
1081 |
+
_(aten, mv) \
|
1082 |
+
_(aten, mvlgamma) \
|
1083 |
+
_(aten, mvlgamma_) \
|
1084 |
+
_(aten, nan_to_num) \
|
1085 |
+
_(aten, nan_to_num_) \
|
1086 |
+
_(aten, nanmean) \
|
1087 |
+
_(aten, nanmedian) \
|
1088 |
+
_(aten, nanquantile) \
|
1089 |
+
_(aten, nansum) \
|
1090 |
+
_(aten, narrow) \
|
1091 |
+
_(aten, narrow_copy) \
|
1092 |
+
_(aten, native_batch_norm) \
|
1093 |
+
_(aten, native_batch_norm_backward) \
|
1094 |
+
_(aten, native_channel_shuffle) \
|
1095 |
+
_(aten, native_dropout) \
|
1096 |
+
_(aten, native_dropout_backward) \
|
1097 |
+
_(aten, native_group_norm) \
|
1098 |
+
_(aten, native_group_norm_backward) \
|
1099 |
+
_(aten, native_layer_norm) \
|
1100 |
+
_(aten, native_layer_norm_backward) \
|
1101 |
+
_(aten, native_norm) \
|
1102 |
+
_(aten, ne) \
|
1103 |
+
_(aten, ne_) \
|
1104 |
+
_(aten, neg) \
|
1105 |
+
_(aten, neg_) \
|
1106 |
+
_(aten, negative) \
|
1107 |
+
_(aten, negative_) \
|
1108 |
+
_(aten, nested_to_padded_tensor) \
|
1109 |
+
_(aten, new_empty) \
|
1110 |
+
_(aten, new_empty_strided) \
|
1111 |
+
_(aten, new_full) \
|
1112 |
+
_(aten, new_ones) \
|
1113 |
+
_(aten, new_zeros) \
|
1114 |
+
_(aten, nextafter) \
|
1115 |
+
_(aten, nextafter_) \
|
1116 |
+
_(aten, nll_loss) \
|
1117 |
+
_(aten, nll_loss2d) \
|
1118 |
+
_(aten, nll_loss2d_backward) \
|
1119 |
+
_(aten, nll_loss2d_forward) \
|
1120 |
+
_(aten, nll_loss_backward) \
|
1121 |
+
_(aten, nll_loss_forward) \
|
1122 |
+
_(aten, nll_loss_nd) \
|
1123 |
+
_(aten, nonzero) \
|
1124 |
+
_(aten, nonzero_numpy) \
|
1125 |
+
_(aten, nonzero_static) \
|
1126 |
+
_(aten, norm) \
|
1127 |
+
_(aten, norm_except_dim) \
|
1128 |
+
_(aten, normal) \
|
1129 |
+
_(aten, normal_) \
|
1130 |
+
_(aten, normal_functional) \
|
1131 |
+
_(aten, not_equal) \
|
1132 |
+
_(aten, not_equal_) \
|
1133 |
+
_(aten, nuclear_norm) \
|
1134 |
+
_(aten, numpy_T) \
|
1135 |
+
_(aten, one_hot) \
|
1136 |
+
_(aten, ones) \
|
1137 |
+
_(aten, ones_like) \
|
1138 |
+
_(aten, orgqr) \
|
1139 |
+
_(aten, ormqr) \
|
1140 |
+
_(aten, outer) \
|
1141 |
+
_(aten, output_nr) \
|
1142 |
+
_(aten, pad) \
|
1143 |
+
_(aten, pad_sequence) \
|
1144 |
+
_(aten, pairwise_distance) \
|
1145 |
+
_(aten, pdist) \
|
1146 |
+
_(aten, permute) \
|
1147 |
+
_(aten, permute_copy) \
|
1148 |
+
_(aten, pin_memory) \
|
1149 |
+
_(aten, pinverse) \
|
1150 |
+
_(aten, pixel_shuffle) \
|
1151 |
+
_(aten, pixel_unshuffle) \
|
1152 |
+
_(aten, poisson) \
|
1153 |
+
_(aten, poisson_nll_loss) \
|
1154 |
+
_(aten, polar) \
|
1155 |
+
_(aten, polygamma) \
|
1156 |
+
_(aten, polygamma_) \
|
1157 |
+
_(aten, positive) \
|
1158 |
+
_(aten, pow) \
|
1159 |
+
_(aten, pow_) \
|
1160 |
+
_(aten, prelu) \
|
1161 |
+
_(aten, prod) \
|
1162 |
+
_(aten, promote_types) \
|
1163 |
+
_(aten, put) \
|
1164 |
+
_(aten, put_) \
|
1165 |
+
_(aten, q_per_channel_axis) \
|
1166 |
+
_(aten, q_per_channel_scales) \
|
1167 |
+
_(aten, q_per_channel_zero_points) \
|
1168 |
+
_(aten, q_scale) \
|
1169 |
+
_(aten, q_zero_point) \
|
1170 |
+
_(aten, qr) \
|
1171 |
+
_(aten, qscheme) \
|
1172 |
+
_(aten, quantile) \
|
1173 |
+
_(aten, quantize_per_channel) \
|
1174 |
+
_(aten, quantize_per_tensor) \
|
1175 |
+
_(aten, quantize_per_tensor_dynamic) \
|
1176 |
+
_(aten, quantized_batch_norm) \
|
1177 |
+
_(aten, quantized_gru_cell) \
|
1178 |
+
_(aten, quantized_lstm_cell) \
|
1179 |
+
_(aten, quantized_max_pool1d) \
|
1180 |
+
_(aten, quantized_max_pool2d) \
|
1181 |
+
_(aten, quantized_max_pool3d) \
|
1182 |
+
_(aten, quantized_rnn_relu_cell) \
|
1183 |
+
_(aten, quantized_rnn_tanh_cell) \
|
1184 |
+
_(aten, rad2deg) \
|
1185 |
+
_(aten, rad2deg_) \
|
1186 |
+
_(aten, rand) \
|
1187 |
+
_(aten, rand_like) \
|
1188 |
+
_(aten, randint) \
|
1189 |
+
_(aten, randint_like) \
|
1190 |
+
_(aten, randn) \
|
1191 |
+
_(aten, randn_like) \
|
1192 |
+
_(aten, random) \
|
1193 |
+
_(aten, random_) \
|
1194 |
+
_(aten, randperm) \
|
1195 |
+
_(aten, range) \
|
1196 |
+
_(aten, ravel) \
|
1197 |
+
_(aten, real) \
|
1198 |
+
_(aten, reciprocal) \
|
1199 |
+
_(aten, reciprocal_) \
|
1200 |
+
_(aten, record_stream) \
|
1201 |
+
_(aten, refine_names) \
|
1202 |
+
_(aten, reflection_pad1d) \
|
1203 |
+
_(aten, reflection_pad1d_backward) \
|
1204 |
+
_(aten, reflection_pad2d) \
|
1205 |
+
_(aten, reflection_pad2d_backward) \
|
1206 |
+
_(aten, reflection_pad3d) \
|
1207 |
+
_(aten, reflection_pad3d_backward) \
|
1208 |
+
_(aten, relu) \
|
1209 |
+
_(aten, relu6) \
|
1210 |
+
_(aten, relu6_) \
|
1211 |
+
_(aten, relu_) \
|
1212 |
+
_(aten, remainder) \
|
1213 |
+
_(aten, remainder_) \
|
1214 |
+
_(aten, rename) \
|
1215 |
+
_(aten, rename_) \
|
1216 |
+
_(aten, renorm) \
|
1217 |
+
_(aten, renorm_) \
|
1218 |
+
_(aten, repeat) \
|
1219 |
+
_(aten, repeat_interleave) \
|
1220 |
+
_(aten, replication_pad1d) \
|
1221 |
+
_(aten, replication_pad1d_backward) \
|
1222 |
+
_(aten, replication_pad2d) \
|
1223 |
+
_(aten, replication_pad2d_backward) \
|
1224 |
+
_(aten, replication_pad3d) \
|
1225 |
+
_(aten, replication_pad3d_backward) \
|
1226 |
+
_(aten, requires_grad) \
|
1227 |
+
_(aten, requires_grad_) \
|
1228 |
+
_(aten, reshape) \
|
1229 |
+
_(aten, reshape_as) \
|
1230 |
+
_(aten, resize) \
|
1231 |
+
_(aten, resize_) \
|
1232 |
+
_(aten, resize_as) \
|
1233 |
+
_(aten, resize_as_) \
|
1234 |
+
_(aten, resize_as_sparse) \
|
1235 |
+
_(aten, resize_as_sparse_) \
|
1236 |
+
_(aten, resolve_conj) \
|
1237 |
+
_(aten, resolve_neg) \
|
1238 |
+
_(aten, result_type) \
|
1239 |
+
_(aten, retain_grad) \
|
1240 |
+
_(aten, retains_grad) \
|
1241 |
+
_(aten, rnn_relu) \
|
1242 |
+
_(aten, rnn_relu_cell) \
|
1243 |
+
_(aten, rnn_tanh) \
|
1244 |
+
_(aten, rnn_tanh_cell) \
|
1245 |
+
_(aten, roll) \
|
1246 |
+
_(aten, rot90) \
|
1247 |
+
_(aten, round) \
|
1248 |
+
_(aten, round_) \
|
1249 |
+
_(aten, row_indices) \
|
1250 |
+
_(aten, row_indices_copy) \
|
1251 |
+
_(aten, row_stack) \
|
1252 |
+
_(aten, rrelu) \
|
1253 |
+
_(aten, rrelu_) \
|
1254 |
+
_(aten, rrelu_with_noise) \
|
1255 |
+
_(aten, rrelu_with_noise_) \
|
1256 |
+
_(aten, rrelu_with_noise_backward) \
|
1257 |
+
_(aten, rshift) \
|
1258 |
+
_(aten, rsqrt) \
|
1259 |
+
_(aten, rsqrt_) \
|
1260 |
+
_(aten, rsub) \
|
1261 |
+
_(aten, scalar_tensor) \
|
1262 |
+
_(aten, scaled_dot_product_attention) \
|
1263 |
+
_(aten, scatter) \
|
1264 |
+
_(aten, scatter_) \
|
1265 |
+
_(aten, scatter_add) \
|
1266 |
+
_(aten, scatter_add_) \
|
1267 |
+
_(aten, scatter_reduce) \
|
1268 |
+
_(aten, scatter_reduce_) \
|
1269 |
+
_(aten, searchsorted) \
|
1270 |
+
_(aten, segment_reduce) \
|
1271 |
+
_(aten, select) \
|
1272 |
+
_(aten, select_backward) \
|
1273 |
+
_(aten, select_copy) \
|
1274 |
+
_(aten, select_scatter) \
|
1275 |
+
_(aten, selu) \
|
1276 |
+
_(aten, selu_) \
|
1277 |
+
_(aten, set) \
|
1278 |
+
_(aten, set_) \
|
1279 |
+
_(aten, set_data) \
|
1280 |
+
_(aten, sgn) \
|
1281 |
+
_(aten, sgn_) \
|
1282 |
+
_(aten, sigmoid) \
|
1283 |
+
_(aten, sigmoid_) \
|
1284 |
+
_(aten, sigmoid_backward) \
|
1285 |
+
_(aten, sign) \
|
1286 |
+
_(aten, sign_) \
|
1287 |
+
_(aten, signbit) \
|
1288 |
+
_(aten, silu) \
|
1289 |
+
_(aten, silu_) \
|
1290 |
+
_(aten, silu_backward) \
|
1291 |
+
_(aten, sin) \
|
1292 |
+
_(aten, sin_) \
|
1293 |
+
_(aten, sinc) \
|
1294 |
+
_(aten, sinc_) \
|
1295 |
+
_(aten, sinh) \
|
1296 |
+
_(aten, sinh_) \
|
1297 |
+
_(aten, size) \
|
1298 |
+
_(aten, slice) \
|
1299 |
+
_(aten, slice_backward) \
|
1300 |
+
_(aten, slice_copy) \
|
1301 |
+
_(aten, slice_scatter) \
|
1302 |
+
_(aten, slogdet) \
|
1303 |
+
_(aten, slow_conv3d) \
|
1304 |
+
_(aten, slow_conv3d_forward) \
|
1305 |
+
_(aten, slow_conv_dilated2d) \
|
1306 |
+
_(aten, slow_conv_dilated3d) \
|
1307 |
+
_(aten, slow_conv_transpose2d) \
|
1308 |
+
_(aten, slow_conv_transpose3d) \
|
1309 |
+
_(aten, smm) \
|
1310 |
+
_(aten, smooth_l1_loss) \
|
1311 |
+
_(aten, smooth_l1_loss_backward) \
|
1312 |
+
_(aten, soft_margin_loss) \
|
1313 |
+
_(aten, soft_margin_loss_backward) \
|
1314 |
+
_(aten, softmax) \
|
1315 |
+
_(aten, softplus) \
|
1316 |
+
_(aten, softplus_backward) \
|
1317 |
+
_(aten, softshrink) \
|
1318 |
+
_(aten, softshrink_backward) \
|
1319 |
+
_(aten, sort) \
|
1320 |
+
_(aten, sparse_bsc_tensor) \
|
1321 |
+
_(aten, sparse_bsr_tensor) \
|
1322 |
+
_(aten, sparse_compressed_tensor) \
|
1323 |
+
_(aten, sparse_coo_tensor) \
|
1324 |
+
_(aten, sparse_csc_tensor) \
|
1325 |
+
_(aten, sparse_csr_tensor) \
|
1326 |
+
_(aten, sparse_dim) \
|
1327 |
+
_(aten, sparse_mask) \
|
1328 |
+
_(aten, sparse_resize) \
|
1329 |
+
_(aten, sparse_resize_) \
|
1330 |
+
_(aten, sparse_resize_and_clear) \
|
1331 |
+
_(aten, sparse_resize_and_clear_) \
|
1332 |
+
_(aten, sparse_sampled_addmm) \
|
1333 |
+
_(aten, special_airy_ai) \
|
1334 |
+
_(aten, special_bessel_j0) \
|
1335 |
+
_(aten, special_bessel_j1) \
|
1336 |
+
_(aten, special_bessel_y0) \
|
1337 |
+
_(aten, special_bessel_y1) \
|
1338 |
+
_(aten, special_chebyshev_polynomial_t) \
|
1339 |
+
_(aten, special_chebyshev_polynomial_u) \
|
1340 |
+
_(aten, special_chebyshev_polynomial_v) \
|
1341 |
+
_(aten, special_chebyshev_polynomial_w) \
|
1342 |
+
_(aten, special_digamma) \
|
1343 |
+
_(aten, special_entr) \
|
1344 |
+
_(aten, special_erf) \
|
1345 |
+
_(aten, special_erfc) \
|
1346 |
+
_(aten, special_erfcx) \
|
1347 |
+
_(aten, special_erfinv) \
|
1348 |
+
_(aten, special_exp2) \
|
1349 |
+
_(aten, special_expit) \
|
1350 |
+
_(aten, special_expm1) \
|
1351 |
+
_(aten, special_gammainc) \
|
1352 |
+
_(aten, special_gammaincc) \
|
1353 |
+
_(aten, special_gammaln) \
|
1354 |
+
_(aten, special_hermite_polynomial_h) \
|
1355 |
+
_(aten, special_hermite_polynomial_he) \
|
1356 |
+
_(aten, special_i0) \
|
1357 |
+
_(aten, special_i0e) \
|
1358 |
+
_(aten, special_i1) \
|
1359 |
+
_(aten, special_i1e) \
|
1360 |
+
_(aten, special_laguerre_polynomial_l) \
|
1361 |
+
_(aten, special_legendre_polynomial_p) \
|
1362 |
+
_(aten, special_log1p) \
|
1363 |
+
_(aten, special_log_ndtr) \
|
1364 |
+
_(aten, special_log_softmax) \
|
1365 |
+
_(aten, special_logit) \
|
1366 |
+
_(aten, special_logsumexp) \
|
1367 |
+
_(aten, special_modified_bessel_i0) \
|
1368 |
+
_(aten, special_modified_bessel_i1) \
|
1369 |
+
_(aten, special_modified_bessel_k0) \
|
1370 |
+
_(aten, special_modified_bessel_k1) \
|
1371 |
+
_(aten, special_multigammaln) \
|
1372 |
+
_(aten, special_ndtr) \
|
1373 |
+
_(aten, special_ndtri) \
|
1374 |
+
_(aten, special_polygamma) \
|
1375 |
+
_(aten, special_psi) \
|
1376 |
+
_(aten, special_round) \
|
1377 |
+
_(aten, special_scaled_modified_bessel_k0) \
|
1378 |
+
_(aten, special_scaled_modified_bessel_k1) \
|
1379 |
+
_(aten, special_shifted_chebyshev_polynomial_t) \
|
1380 |
+
_(aten, special_shifted_chebyshev_polynomial_u) \
|
1381 |
+
_(aten, special_shifted_chebyshev_polynomial_v) \
|
1382 |
+
_(aten, special_shifted_chebyshev_polynomial_w) \
|
1383 |
+
_(aten, special_sinc) \
|
1384 |
+
_(aten, special_softmax) \
|
1385 |
+
_(aten, special_spherical_bessel_j0) \
|
1386 |
+
_(aten, special_xlog1py) \
|
1387 |
+
_(aten, special_xlogy) \
|
1388 |
+
_(aten, special_zeta) \
|
1389 |
+
_(aten, split) \
|
1390 |
+
_(aten, split_copy) \
|
1391 |
+
_(aten, split_with_sizes) \
|
1392 |
+
_(aten, split_with_sizes_copy) \
|
1393 |
+
_(aten, sqrt) \
|
1394 |
+
_(aten, sqrt_) \
|
1395 |
+
_(aten, square) \
|
1396 |
+
_(aten, square_) \
|
1397 |
+
_(aten, squeeze) \
|
1398 |
+
_(aten, squeeze_) \
|
1399 |
+
_(aten, squeeze_copy) \
|
1400 |
+
_(aten, sspaddmm) \
|
1401 |
+
_(aten, stack) \
|
1402 |
+
_(aten, std) \
|
1403 |
+
_(aten, std_mean) \
|
1404 |
+
_(aten, stft) \
|
1405 |
+
_(aten, stride) \
|
1406 |
+
_(aten, sub) \
|
1407 |
+
_(aten, sub_) \
|
1408 |
+
_(aten, subtract) \
|
1409 |
+
_(aten, subtract_) \
|
1410 |
+
_(aten, sum) \
|
1411 |
+
_(aten, sum_to_size) \
|
1412 |
+
_(aten, svd) \
|
1413 |
+
_(aten, swapaxes) \
|
1414 |
+
_(aten, swapaxes_) \
|
1415 |
+
_(aten, swapdims) \
|
1416 |
+
_(aten, swapdims_) \
|
1417 |
+
_(aten, sym_constrain_range) \
|
1418 |
+
_(aten, sym_constrain_range_for_size) \
|
1419 |
+
_(aten, sym_numel) \
|
1420 |
+
_(aten, sym_size) \
|
1421 |
+
_(aten, sym_storage_offset) \
|
1422 |
+
_(aten, sym_stride) \
|
1423 |
+
_(aten, t) \
|
1424 |
+
_(aten, t_) \
|
1425 |
+
_(aten, t_copy) \
|
1426 |
+
_(aten, take) \
|
1427 |
+
_(aten, take_along_dim) \
|
1428 |
+
_(aten, tan) \
|
1429 |
+
_(aten, tan_) \
|
1430 |
+
_(aten, tanh) \
|
1431 |
+
_(aten, tanh_) \
|
1432 |
+
_(aten, tanh_backward) \
|
1433 |
+
_(aten, tensor_split) \
|
1434 |
+
_(aten, tensordot) \
|
1435 |
+
_(aten, thnn_conv2d) \
|
1436 |
+
_(aten, threshold) \
|
1437 |
+
_(aten, threshold_) \
|
1438 |
+
_(aten, threshold_backward) \
|
1439 |
+
_(aten, tile) \
|
1440 |
+
_(aten, to) \
|
1441 |
+
_(aten, to_dense) \
|
1442 |
+
_(aten, to_dense_backward) \
|
1443 |
+
_(aten, to_mkldnn) \
|
1444 |
+
_(aten, to_mkldnn_backward) \
|
1445 |
+
_(aten, to_padded_tensor) \
|
1446 |
+
_(aten, to_sparse) \
|
1447 |
+
_(aten, to_sparse_bsc) \
|
1448 |
+
_(aten, to_sparse_bsr) \
|
1449 |
+
_(aten, to_sparse_csc) \
|
1450 |
+
_(aten, to_sparse_csr) \
|
1451 |
+
_(aten, topk) \
|
1452 |
+
_(aten, trace) \
|
1453 |
+
_(aten, trace_backward) \
|
1454 |
+
_(aten, transpose) \
|
1455 |
+
_(aten, transpose_) \
|
1456 |
+
_(aten, transpose_copy) \
|
1457 |
+
_(aten, trapezoid) \
|
1458 |
+
_(aten, trapz) \
|
1459 |
+
_(aten, triangular_solve) \
|
1460 |
+
_(aten, tril) \
|
1461 |
+
_(aten, tril_) \
|
1462 |
+
_(aten, tril_indices) \
|
1463 |
+
_(aten, triplet_margin_loss) \
|
1464 |
+
_(aten, triu) \
|
1465 |
+
_(aten, triu_) \
|
1466 |
+
_(aten, triu_indices) \
|
1467 |
+
_(aten, true_divide) \
|
1468 |
+
_(aten, true_divide_) \
|
1469 |
+
_(aten, trunc) \
|
1470 |
+
_(aten, trunc_) \
|
1471 |
+
_(aten, type_as) \
|
1472 |
+
_(aten, unbind) \
|
1473 |
+
_(aten, unbind_copy) \
|
1474 |
+
_(aten, unflatten) \
|
1475 |
+
_(aten, unflatten_dense_tensors) \
|
1476 |
+
_(aten, unfold) \
|
1477 |
+
_(aten, unfold_backward) \
|
1478 |
+
_(aten, unfold_copy) \
|
1479 |
+
_(aten, uniform) \
|
1480 |
+
_(aten, uniform_) \
|
1481 |
+
_(aten, unique_consecutive) \
|
1482 |
+
_(aten, unique_dim) \
|
1483 |
+
_(aten, unique_dim_consecutive) \
|
1484 |
+
_(aten, unsafe_chunk) \
|
1485 |
+
_(aten, unsafe_split) \
|
1486 |
+
_(aten, unsafe_split_with_sizes) \
|
1487 |
+
_(aten, unsqueeze) \
|
1488 |
+
_(aten, unsqueeze_) \
|
1489 |
+
_(aten, unsqueeze_copy) \
|
1490 |
+
_(aten, upsample_bicubic2d) \
|
1491 |
+
_(aten, upsample_bicubic2d_backward) \
|
1492 |
+
_(aten, upsample_bilinear2d) \
|
1493 |
+
_(aten, upsample_bilinear2d_backward) \
|
1494 |
+
_(aten, upsample_linear1d) \
|
1495 |
+
_(aten, upsample_linear1d_backward) \
|
1496 |
+
_(aten, upsample_nearest1d) \
|
1497 |
+
_(aten, upsample_nearest1d_backward) \
|
1498 |
+
_(aten, upsample_nearest2d) \
|
1499 |
+
_(aten, upsample_nearest2d_backward) \
|
1500 |
+
_(aten, upsample_nearest3d) \
|
1501 |
+
_(aten, upsample_nearest3d_backward) \
|
1502 |
+
_(aten, upsample_trilinear3d) \
|
1503 |
+
_(aten, upsample_trilinear3d_backward) \
|
1504 |
+
_(aten, value_selecting_reduction_backward) \
|
1505 |
+
_(aten, values) \
|
1506 |
+
_(aten, values_copy) \
|
1507 |
+
_(aten, vander) \
|
1508 |
+
_(aten, var) \
|
1509 |
+
_(aten, var_mean) \
|
1510 |
+
_(aten, vdot) \
|
1511 |
+
_(aten, view) \
|
1512 |
+
_(aten, view_as) \
|
1513 |
+
_(aten, view_as_complex) \
|
1514 |
+
_(aten, view_as_complex_copy) \
|
1515 |
+
_(aten, view_as_real) \
|
1516 |
+
_(aten, view_as_real_copy) \
|
1517 |
+
_(aten, view_copy) \
|
1518 |
+
_(aten, vsplit) \
|
1519 |
+
_(aten, vstack) \
|
1520 |
+
_(aten, where) \
|
1521 |
+
_(aten, xlogy) \
|
1522 |
+
_(aten, xlogy_) \
|
1523 |
+
_(aten, zero) \
|
1524 |
+
_(aten, zero_) \
|
1525 |
+
_(aten, zeros) \
|
1526 |
+
_(aten, zeros_like)
|
1527 |
+
|
1528 |
+
#define FORALL_ATTR_BASE_SYMBOLS(_) \
|
1529 |
+
_(attr, A) \
|
1530 |
+
_(attr, B) \
|
1531 |
+
_(attr, C) \
|
1532 |
+
_(attr, H) \
|
1533 |
+
_(attr, HxW) \
|
1534 |
+
_(attr, K) \
|
1535 |
+
_(attr, L) \
|
1536 |
+
_(attr, LD) \
|
1537 |
+
_(attr, LU) \
|
1538 |
+
_(attr, LU_data) \
|
1539 |
+
_(attr, LU_pivots) \
|
1540 |
+
_(attr, M) \
|
1541 |
+
_(attr, N) \
|
1542 |
+
_(attr, P) \
|
1543 |
+
_(attr, Q) \
|
1544 |
+
_(attr, R) \
|
1545 |
+
_(attr, S) \
|
1546 |
+
_(attr, U) \
|
1547 |
+
_(attr, UPLO) \
|
1548 |
+
_(attr, V) \
|
1549 |
+
_(attr, Vh) \
|
1550 |
+
_(attr, W) \
|
1551 |
+
_(attr, X) \
|
1552 |
+
_(attr, a) \
|
1553 |
+
_(attr, abs) \
|
1554 |
+
_(attr, accumulate) \
|
1555 |
+
_(attr, accumulate_matches) \
|
1556 |
+
_(attr, activation) \
|
1557 |
+
_(attr, addends) \
|
1558 |
+
_(attr, adjoint) \
|
1559 |
+
_(attr, align_corners) \
|
1560 |
+
_(attr, allow_tf32) \
|
1561 |
+
_(attr, alpha) \
|
1562 |
+
_(attr, amsgrad) \
|
1563 |
+
_(attr, anchor) \
|
1564 |
+
_(attr, angle) \
|
1565 |
+
_(attr, api_name) \
|
1566 |
+
_(attr, append) \
|
1567 |
+
_(attr, approximate) \
|
1568 |
+
_(attr, arg1) \
|
1569 |
+
_(attr, arg2) \
|
1570 |
+
_(attr, arg3) \
|
1571 |
+
_(attr, arg_out) \
|
1572 |
+
_(attr, assert_msg) \
|
1573 |
+
_(attr, assume_unique) \
|
1574 |
+
_(attr, atol) \
|
1575 |
+
_(attr, attn_bias) \
|
1576 |
+
_(attr, attn_mask) \
|
1577 |
+
_(attr, average_attn_weights) \
|
1578 |
+
_(attr, averaging_const) \
|
1579 |
+
_(attr, aweights) \
|
1580 |
+
_(attr, axis) \
|
1581 |
+
_(attr, axis0) \
|
1582 |
+
_(attr, axis1) \
|
1583 |
+
_(attr, b) \
|
1584 |
+
_(attr, b_hh) \
|
1585 |
+
_(attr, b_ih) \
|
1586 |
+
_(attr, bag_size) \
|
1587 |
+
_(attr, base) \
|
1588 |
+
_(attr, batch1) \
|
1589 |
+
_(attr, batch2) \
|
1590 |
+
_(attr, batch_dim) \
|
1591 |
+
_(attr, batch_first) \
|
1592 |
+
_(attr, batch_size) \
|
1593 |
+
_(attr, batch_sizes) \
|
1594 |
+
_(attr, benchmark) \
|
1595 |
+
_(attr, beta) \
|
1596 |
+
_(attr, beta1) \
|
1597 |
+
_(attr, beta2) \
|
1598 |
+
_(attr, bias) \
|
1599 |
+
_(attr, bias_defined) \
|
1600 |
+
_(attr, bias_g) \
|
1601 |
+
_(attr, bias_requires_grad) \
|
1602 |
+
_(attr, bias_sizes) \
|
1603 |
+
_(attr, bidirectional) \
|
1604 |
+
_(attr, bin_edges) \
|
1605 |
+
_(attr, bins) \
|
1606 |
+
_(attr, bit_width) \
|
1607 |
+
_(attr, blank) \
|
1608 |
+
_(attr, blocksize) \
|
1609 |
+
_(attr, boundaries) \
|
1610 |
+
_(attr, buffer) \
|
1611 |
+
_(attr, causal_diagonal) \
|
1612 |
+
_(attr, ccol_indices) \
|
1613 |
+
_(attr, cdim) \
|
1614 |
+
_(attr, cdist) \
|
1615 |
+
_(attr, ceil_mode) \
|
1616 |
+
_(attr, cell_state_fwd) \
|
1617 |
+
_(attr, center) \
|
1618 |
+
_(attr, ch_axis) \
|
1619 |
+
_(attr, check_errors) \
|
1620 |
+
_(attr, chunks) \
|
1621 |
+
_(attr, coalesced) \
|
1622 |
+
_(attr, coefficients) \
|
1623 |
+
_(attr, col) \
|
1624 |
+
_(attr, col_indices) \
|
1625 |
+
_(attr, col_offsets) \
|
1626 |
+
_(attr, col_offsets_hh) \
|
1627 |
+
_(attr, col_offsets_ih) \
|
1628 |
+
_(attr, compressed_A) \
|
1629 |
+
_(attr, compressed_idx) \
|
1630 |
+
_(attr, compressed_indices) \
|
1631 |
+
_(attr, compressed_indices_dtype) \
|
1632 |
+
_(attr, compute_log_sumexp) \
|
1633 |
+
_(attr, compute_mode) \
|
1634 |
+
_(attr, compute_uv) \
|
1635 |
+
_(attr, compute_v) \
|
1636 |
+
_(attr, condition) \
|
1637 |
+
_(attr, copy) \
|
1638 |
+
_(attr, correction) \
|
1639 |
+
_(attr, count) \
|
1640 |
+
_(attr, count_include_pad) \
|
1641 |
+
_(attr, counts) \
|
1642 |
+
_(attr, cpu_dtype) \
|
1643 |
+
_(attr, cpu_enabled) \
|
1644 |
+
_(attr, cpu_nested_shape_example) \
|
1645 |
+
_(attr, create_graph) \
|
1646 |
+
_(attr, crow_indices) \
|
1647 |
+
_(attr, cu_seqlens_k) \
|
1648 |
+
_(attr, cu_seqlens_q) \
|
1649 |
+
_(attr, cuda_dtype) \
|
1650 |
+
_(attr, cuda_enabled) \
|
1651 |
+
_(attr, cudnn_enable) \
|
1652 |
+
_(attr, cudnn_enabled) \
|
1653 |
+
_(attr, cum_seq_k) \
|
1654 |
+
_(attr, cum_seq_q) \
|
1655 |
+
_(attr, custom_mask_type) \
|
1656 |
+
_(attr, cx) \
|
1657 |
+
_(attr, cx_) \
|
1658 |
+
_(attr, cx_tmp) \
|
1659 |
+
_(attr, cy) \
|
1660 |
+
_(attr, cy_) \
|
1661 |
+
_(attr, d) \
|
1662 |
+
_(attr, data) \
|
1663 |
+
_(attr, decimals) \
|
1664 |
+
_(attr, delta) \
|
1665 |
+
_(attr, dense) \
|
1666 |
+
_(attr, dense_B) \
|
1667 |
+
_(attr, dense_dim) \
|
1668 |
+
_(attr, density) \
|
1669 |
+
_(attr, dep_token) \
|
1670 |
+
_(attr, descending) \
|
1671 |
+
_(attr, destination) \
|
1672 |
+
_(attr, deterministic) \
|
1673 |
+
_(attr, device) \
|
1674 |
+
_(attr, device_index) \
|
1675 |
+
_(attr, dgrad_glu) \
|
1676 |
+
_(attr, diagonal) \
|
1677 |
+
_(attr, diagonals) \
|
1678 |
+
_(attr, dilation) \
|
1679 |
+
_(attr, dim) \
|
1680 |
+
_(attr, dim0) \
|
1681 |
+
_(attr, dim1) \
|
1682 |
+
_(attr, dim2) \
|
1683 |
+
_(attr, dimension) \
|
1684 |
+
_(attr, dims) \
|
1685 |
+
_(attr, dims_other) \
|
1686 |
+
_(attr, dims_self) \
|
1687 |
+
_(attr, divisor_override) \
|
1688 |
+
_(attr, downscale_factor) \
|
1689 |
+
_(attr, driver) \
|
1690 |
+
_(attr, dropout) \
|
1691 |
+
_(attr, dropout_mask) \
|
1692 |
+
_(attr, dropout_p) \
|
1693 |
+
_(attr, dropout_seed) \
|
1694 |
+
_(attr, dropout_state) \
|
1695 |
+
_(attr, dst) \
|
1696 |
+
_(attr, dtype) \
|
1697 |
+
_(attr, dual) \
|
1698 |
+
_(attr, dummy) \
|
1699 |
+
_(attr, dx) \
|
1700 |
+
_(attr, edge_order) \
|
1701 |
+
_(attr, eigenvalues) \
|
1702 |
+
_(attr, eigenvectors) \
|
1703 |
+
_(attr, eigvals) \
|
1704 |
+
_(attr, eigvecs) \
|
1705 |
+
_(attr, element) \
|
1706 |
+
_(attr, elements) \
|
1707 |
+
_(attr, ellipsis_idx) \
|
1708 |
+
_(attr, embed_dim) \
|
1709 |
+
_(attr, end) \
|
1710 |
+
_(attr, end_dim) \
|
1711 |
+
_(attr, eps) \
|
1712 |
+
_(attr, epsilon) \
|
1713 |
+
_(attr, equal_nan) \
|
1714 |
+
_(attr, equation) \
|
1715 |
+
_(attr, exp_avg_sqs) \
|
1716 |
+
_(attr, exp_avgs) \
|
1717 |
+
_(attr, expand1) \
|
1718 |
+
_(attr, expand2) \
|
1719 |
+
_(attr, expand3) \
|
1720 |
+
_(attr, exponent) \
|
1721 |
+
_(attr, exponential_average_factor) \
|
1722 |
+
_(attr, fake_quant_enabled) \
|
1723 |
+
_(attr, fake_quant_on) \
|
1724 |
+
_(attr, ffn_bias_1) \
|
1725 |
+
_(attr, ffn_bias_2) \
|
1726 |
+
_(attr, ffn_weight_1) \
|
1727 |
+
_(attr, ffn_weight_2) \
|
1728 |
+
_(attr, filename) \
|
1729 |
+
_(attr, fill_value) \
|
1730 |
+
_(attr, flat) \
|
1731 |
+
_(attr, forward) \
|
1732 |
+
_(attr, found_inf) \
|
1733 |
+
_(attr, from) \
|
1734 |
+
_(attr, full) \
|
1735 |
+
_(attr, full_matrices) \
|
1736 |
+
_(attr, fuse_transform_0213) \
|
1737 |
+
_(attr, fweights) \
|
1738 |
+
_(attr, g) \
|
1739 |
+
_(attr, gO) \
|
1740 |
+
_(attr, generator) \
|
1741 |
+
_(attr, ggI) \
|
1742 |
+
_(attr, ggW) \
|
1743 |
+
_(attr, ggb) \
|
1744 |
+
_(attr, glu) \
|
1745 |
+
_(attr, grad) \
|
1746 |
+
_(attr, grad_bias) \
|
1747 |
+
_(attr, grad_cy) \
|
1748 |
+
_(attr, grad_factor) \
|
1749 |
+
_(attr, grad_glu) \
|
1750 |
+
_(attr, grad_hy) \
|
1751 |
+
_(attr, grad_in) \
|
1752 |
+
_(attr, grad_input) \
|
1753 |
+
_(attr, grad_input_mask) \
|
1754 |
+
_(attr, grad_out) \
|
1755 |
+
_(attr, grad_out_) \
|
1756 |
+
_(attr, grad_output) \
|
1757 |
+
_(attr, grad_scale) \
|
1758 |
+
_(attr, grad_w) \
|
1759 |
+
_(attr, grad_weight) \
|
1760 |
+
_(attr, grad_x) \
|
1761 |
+
_(attr, grad_y) \
|
1762 |
+
_(attr, gradient) \
|
1763 |
+
_(attr, grads) \
|
1764 |
+
_(attr, grid) \
|
1765 |
+
_(attr, group) \
|
1766 |
+
_(attr, groups) \
|
1767 |
+
_(attr, growth_interval) \
|
1768 |
+
_(attr, growth_tracker) \
|
1769 |
+
_(attr, half_to_float) \
|
1770 |
+
_(attr, has_bias) \
|
1771 |
+
_(attr, has_biases) \
|
1772 |
+
_(attr, hermitian) \
|
1773 |
+
_(attr, hidden_bias) \
|
1774 |
+
_(attr, hidden_gates) \
|
1775 |
+
_(attr, hidden_size) \
|
1776 |
+
_(attr, high) \
|
1777 |
+
_(attr, hist) \
|
1778 |
+
_(attr, hop_length) \
|
1779 |
+
_(attr, hx) \
|
1780 |
+
_(attr, hx_) \
|
1781 |
+
_(attr, hy_) \
|
1782 |
+
_(attr, i1) \
|
1783 |
+
_(attr, i2) \
|
1784 |
+
_(attr, i3) \
|
1785 |
+
_(attr, ignore_index) \
|
1786 |
+
_(attr, imag) \
|
1787 |
+
_(attr, impl_index) \
|
1788 |
+
_(attr, implicit) \
|
1789 |
+
_(attr, include_last_offset) \
|
1790 |
+
_(attr, include_self) \
|
1791 |
+
_(attr, increasing) \
|
1792 |
+
_(attr, ind) \
|
1793 |
+
_(attr, index) \
|
1794 |
+
_(attr, indexing) \
|
1795 |
+
_(attr, indices) \
|
1796 |
+
_(attr, info) \
|
1797 |
+
_(attr, initial) \
|
1798 |
+
_(attr, innerKTiles) \
|
1799 |
+
_(attr, input) \
|
1800 |
+
_(attr, input1) \
|
1801 |
+
_(attr, input2) \
|
1802 |
+
_(attr, input3) \
|
1803 |
+
_(attr, input_bias) \
|
1804 |
+
_(attr, input_dtype) \
|
1805 |
+
_(attr, input_g) \
|
1806 |
+
_(attr, input_gates) \
|
1807 |
+
_(attr, input_lengths) \
|
1808 |
+
_(attr, input_scale) \
|
1809 |
+
_(attr, input_size) \
|
1810 |
+
_(attr, input_sizes) \
|
1811 |
+
_(attr, inputs) \
|
1812 |
+
_(attr, interpolation) \
|
1813 |
+
_(attr, interpolation_mode) \
|
1814 |
+
_(attr, inv_scale) \
|
1815 |
+
_(attr, inverse) \
|
1816 |
+
_(attr, invert) \
|
1817 |
+
_(attr, invstd) \
|
1818 |
+
_(attr, is_causal) \
|
1819 |
+
_(attr, is_coalesced) \
|
1820 |
+
_(attr, is_crow) \
|
1821 |
+
_(attr, is_matrix) \
|
1822 |
+
_(attr, is_result) \
|
1823 |
+
_(attr, is_target) \
|
1824 |
+
_(attr, k) \
|
1825 |
+
_(attr, keepdim) \
|
1826 |
+
_(attr, kernel_size) \
|
1827 |
+
_(attr, key) \
|
1828 |
+
_(attr, label_smoothing) \
|
1829 |
+
_(attr, lambd) \
|
1830 |
+
_(attr, largest) \
|
1831 |
+
_(attr, last_dim_size) \
|
1832 |
+
_(attr, layersOutputs) \
|
1833 |
+
_(attr, layout) \
|
1834 |
+
_(attr, left) \
|
1835 |
+
_(attr, length) \
|
1836 |
+
_(attr, lengths) \
|
1837 |
+
_(attr, level) \
|
1838 |
+
_(attr, like) \
|
1839 |
+
_(attr, list) \
|
1840 |
+
_(attr, log_alpha) \
|
1841 |
+
_(attr, log_input) \
|
1842 |
+
_(attr, log_probs) \
|
1843 |
+
_(attr, log_target) \
|
1844 |
+
_(attr, logabsdet) \
|
1845 |
+
_(attr, logsumexp) \
|
1846 |
+
_(attr, low) \
|
1847 |
+
_(attr, lower) \
|
1848 |
+
_(attr, lr) \
|
1849 |
+
_(attr, ltm) \
|
1850 |
+
_(attr, m) \
|
1851 |
+
_(attr, mantissa) \
|
1852 |
+
_(attr, margin) \
|
1853 |
+
_(attr, mask) \
|
1854 |
+
_(attr, mask_check) \
|
1855 |
+
_(attr, mask_type) \
|
1856 |
+
_(attr, masked_grad) \
|
1857 |
+
_(attr, mat) \
|
1858 |
+
_(attr, mat1) \
|
1859 |
+
_(attr, mat2) \
|
1860 |
+
_(attr, matrices) \
|
1861 |
+
_(attr, max) \
|
1862 |
+
_(attr, max_exp_avg_sqs) \
|
1863 |
+
_(attr, max_k) \
|
1864 |
+
_(attr, max_norm) \
|
1865 |
+
_(attr, max_q) \
|
1866 |
+
_(attr, max_seqlen_k) \
|
1867 |
+
_(attr, max_seqlen_q) \
|
1868 |
+
_(attr, max_size) \
|
1869 |
+
_(attr, max_val) \
|
1870 |
+
_(attr, max_values) \
|
1871 |
+
_(attr, maximize) \
|
1872 |
+
_(attr, maximum_indices) \
|
1873 |
+
_(attr, maxnorm) \
|
1874 |
+
_(attr, mean) \
|
1875 |
+
_(attr, median) \
|
1876 |
+
_(attr, memory_format) \
|
1877 |
+
_(attr, meta) \
|
1878 |
+
_(attr, min) \
|
1879 |
+
_(attr, min_indices) \
|
1880 |
+
_(attr, min_val) \
|
1881 |
+
_(attr, minlength) \
|
1882 |
+
_(attr, mode) \
|
1883 |
+
_(attr, momentum) \
|
1884 |
+
_(attr, n) \
|
1885 |
+
_(attr, n_bins) \
|
1886 |
+
_(attr, n_fft) \
|
1887 |
+
_(attr, names) \
|
1888 |
+
_(attr, nan) \
|
1889 |
+
_(attr, need_weights) \
|
1890 |
+
_(attr, neg_log_likelihood) \
|
1891 |
+
_(attr, negative) \
|
1892 |
+
_(attr, negative_slope) \
|
1893 |
+
_(attr, neginf) \
|
1894 |
+
_(attr, nested_size) \
|
1895 |
+
_(attr, nested_strides) \
|
1896 |
+
_(attr, new_data) \
|
1897 |
+
_(attr, nnz) \
|
1898 |
+
_(attr, noise) \
|
1899 |
+
_(attr, non_blocking) \
|
1900 |
+
_(attr, norm) \
|
1901 |
+
_(attr, norm_bias_1) \
|
1902 |
+
_(attr, norm_bias_2) \
|
1903 |
+
_(attr, norm_first) \
|
1904 |
+
_(attr, norm_type) \
|
1905 |
+
_(attr, norm_weight_1) \
|
1906 |
+
_(attr, norm_weight_2) \
|
1907 |
+
_(attr, normalization) \
|
1908 |
+
_(attr, normalized) \
|
1909 |
+
_(attr, normalized_shape) \
|
1910 |
+
_(attr, nt_example) \
|
1911 |
+
_(attr, num_classes) \
|
1912 |
+
_(attr, num_generated) \
|
1913 |
+
_(attr, num_groups) \
|
1914 |
+
_(attr, num_head) \
|
1915 |
+
_(attr, num_heads) \
|
1916 |
+
_(attr, num_layers) \
|
1917 |
+
_(attr, num_samples) \
|
1918 |
+
_(attr, num_splits_key) \
|
1919 |
+
_(attr, num_weights) \
|
1920 |
+
_(attr, numel) \
|
1921 |
+
_(attr, observer_on) \
|
1922 |
+
_(attr, offset) \
|
1923 |
+
_(attr, offset2bag) \
|
1924 |
+
_(attr, offsets) \
|
1925 |
+
_(attr, onesided) \
|
1926 |
+
_(attr, ord) \
|
1927 |
+
_(attr, order) \
|
1928 |
+
_(attr, other) \
|
1929 |
+
_(attr, out) \
|
1930 |
+
_(attr, out0) \
|
1931 |
+
_(attr, out1) \
|
1932 |
+
_(attr, out2) \
|
1933 |
+
_(attr, out3) \
|
1934 |
+
_(attr, out4) \
|
1935 |
+
_(attr, out5) \
|
1936 |
+
_(attr, out6) \
|
1937 |
+
_(attr, out_amax) \
|
1938 |
+
_(attr, out_dim) \
|
1939 |
+
_(attr, out_dtype) \
|
1940 |
+
_(attr, out_int32) \
|
1941 |
+
_(attr, outdim) \
|
1942 |
+
_(attr, output) \
|
1943 |
+
_(attr, output_mask) \
|
1944 |
+
_(attr, output_padding) \
|
1945 |
+
_(attr, output_scale) \
|
1946 |
+
_(attr, output_size) \
|
1947 |
+
_(attr, output_zero_point) \
|
1948 |
+
_(attr, p) \
|
1949 |
+
_(attr, packed) \
|
1950 |
+
_(attr, packed_hh) \
|
1951 |
+
_(attr, packed_ih) \
|
1952 |
+
_(attr, packed_weight) \
|
1953 |
+
_(attr, pad) \
|
1954 |
+
_(attr, pad_mode) \
|
1955 |
+
_(attr, padded) \
|
1956 |
+
_(attr, padding) \
|
1957 |
+
_(attr, padding_idx) \
|
1958 |
+
_(attr, padding_mode) \
|
1959 |
+
_(attr, padding_value) \
|
1960 |
+
_(attr, params) \
|
1961 |
+
_(attr, path) \
|
1962 |
+
_(attr, pdist) \
|
1963 |
+
_(attr, per_row_fake_quant) \
|
1964 |
+
_(attr, per_sample_weights) \
|
1965 |
+
_(attr, periodic) \
|
1966 |
+
_(attr, philox_offset) \
|
1967 |
+
_(attr, philox_seed) \
|
1968 |
+
_(attr, physical_layout) \
|
1969 |
+
_(attr, pin_memory) \
|
1970 |
+
_(attr, pivot) \
|
1971 |
+
_(attr, pivots) \
|
1972 |
+
_(attr, plain_idx) \
|
1973 |
+
_(attr, plain_indices) \
|
1974 |
+
_(attr, pos_weight) \
|
1975 |
+
_(attr, posinf) \
|
1976 |
+
_(attr, positive) \
|
1977 |
+
_(attr, pow) \
|
1978 |
+
_(attr, prepend) \
|
1979 |
+
_(attr, primal) \
|
1980 |
+
_(attr, prob) \
|
1981 |
+
_(attr, proj_bias) \
|
1982 |
+
_(attr, proj_size) \
|
1983 |
+
_(attr, proj_weight) \
|
1984 |
+
_(attr, q) \
|
1985 |
+
_(attr, qGroupSize) \
|
1986 |
+
_(attr, qScaleAndZeros) \
|
1987 |
+
_(attr, qkv) \
|
1988 |
+
_(attr, qkv_bias) \
|
1989 |
+
_(attr, qkv_weight) \
|
1990 |
+
_(attr, qtensor) \
|
1991 |
+
_(attr, quant_max) \
|
1992 |
+
_(attr, quant_min) \
|
1993 |
+
_(attr, quasi) \
|
1994 |
+
_(attr, query) \
|
1995 |
+
_(attr, r) \
|
1996 |
+
_(attr, random_samples) \
|
1997 |
+
_(attr, range) \
|
1998 |
+
_(attr, rank) \
|
1999 |
+
_(attr, ratio) \
|
2000 |
+
_(attr, rcond) \
|
2001 |
+
_(attr, real) \
|
2002 |
+
_(attr, reduce) \
|
2003 |
+
_(attr, reduce_range) \
|
2004 |
+
_(attr, reduction) \
|
2005 |
+
_(attr, repeats) \
|
2006 |
+
_(attr, replacement) \
|
2007 |
+
_(attr, requires_grad) \
|
2008 |
+
_(attr, reserve) \
|
2009 |
+
_(attr, reserveSpace) \
|
2010 |
+
_(attr, reservedSpace) \
|
2011 |
+
_(attr, residuals) \
|
2012 |
+
_(attr, result) \
|
2013 |
+
_(attr, retain_graph) \
|
2014 |
+
_(attr, return_complex) \
|
2015 |
+
_(attr, return_counts) \
|
2016 |
+
_(attr, return_debug_mask) \
|
2017 |
+
_(attr, return_inverse) \
|
2018 |
+
_(attr, reverse) \
|
2019 |
+
_(attr, right) \
|
2020 |
+
_(attr, rounding_mode) \
|
2021 |
+
_(attr, row) \
|
2022 |
+
_(attr, row_indices) \
|
2023 |
+
_(attr, rstd) \
|
2024 |
+
_(attr, rtol) \
|
2025 |
+
_(attr, running_max) \
|
2026 |
+
_(attr, running_mean) \
|
2027 |
+
_(attr, running_min) \
|
2028 |
+
_(attr, running_var) \
|
2029 |
+
_(attr, s) \
|
2030 |
+
_(attr, save_invstd) \
|
2031 |
+
_(attr, save_mean) \
|
2032 |
+
_(attr, save_var) \
|
2033 |
+
_(attr, save_var_transform) \
|
2034 |
+
_(attr, saved_g) \
|
2035 |
+
_(attr, saved_norms) \
|
2036 |
+
_(attr, saved_v) \
|
2037 |
+
_(attr, scalar) \
|
2038 |
+
_(attr, scalar1) \
|
2039 |
+
_(attr, scalar2) \
|
2040 |
+
_(attr, scalars) \
|
2041 |
+
_(attr, scale) \
|
2042 |
+
_(attr, scale_a) \
|
2043 |
+
_(attr, scale_b) \
|
2044 |
+
_(attr, scale_backoff_factor) \
|
2045 |
+
_(attr, scale_factors) \
|
2046 |
+
_(attr, scale_grad_by_freq) \
|
2047 |
+
_(attr, scale_growth_factor) \
|
2048 |
+
_(attr, scale_hh) \
|
2049 |
+
_(attr, scale_ih) \
|
2050 |
+
_(attr, scale_result) \
|
2051 |
+
_(attr, scales) \
|
2052 |
+
_(attr, scales_d) \
|
2053 |
+
_(attr, scales_h) \
|
2054 |
+
_(attr, scales_w) \
|
2055 |
+
_(attr, sections) \
|
2056 |
+
_(attr, seed) \
|
2057 |
+
_(attr, self) \
|
2058 |
+
_(attr, self_is_result) \
|
2059 |
+
_(attr, self_num_batch_dims) \
|
2060 |
+
_(attr, self_or_result) \
|
2061 |
+
_(attr, self_sizes) \
|
2062 |
+
_(attr, seqlen_k) \
|
2063 |
+
_(attr, sequences) \
|
2064 |
+
_(attr, shape) \
|
2065 |
+
_(attr, shared) \
|
2066 |
+
_(attr, shifts) \
|
2067 |
+
_(attr, side) \
|
2068 |
+
_(attr, sigma) \
|
2069 |
+
_(attr, sign) \
|
2070 |
+
_(attr, singular_values) \
|
2071 |
+
_(attr, size) \
|
2072 |
+
_(attr, sizes) \
|
2073 |
+
_(attr, sobolstate) \
|
2074 |
+
_(attr, solution) \
|
2075 |
+
_(attr, some) \
|
2076 |
+
_(attr, sorted) \
|
2077 |
+
_(attr, sorted_sequence) \
|
2078 |
+
_(attr, sorter) \
|
2079 |
+
_(attr, source) \
|
2080 |
+
_(attr, spacing) \
|
2081 |
+
_(attr, sparse) \
|
2082 |
+
_(attr, sparse_dim) \
|
2083 |
+
_(attr, sparse_grad) \
|
2084 |
+
_(attr, split_size) \
|
2085 |
+
_(attr, split_sizes) \
|
2086 |
+
_(attr, src) \
|
2087 |
+
_(attr, stable) \
|
2088 |
+
_(attr, start) \
|
2089 |
+
_(attr, start_dim) \
|
2090 |
+
_(attr, state_steps) \
|
2091 |
+
_(attr, std) \
|
2092 |
+
_(attr, step) \
|
2093 |
+
_(attr, steps) \
|
2094 |
+
_(attr, storage_offset) \
|
2095 |
+
_(attr, stride) \
|
2096 |
+
_(attr, sum_dy) \
|
2097 |
+
_(attr, sum_dy_xmu) \
|
2098 |
+
_(attr, sumdim) \
|
2099 |
+
_(attr, swap) \
|
2100 |
+
_(attr, symmetric_quant) \
|
2101 |
+
_(attr, t) \
|
2102 |
+
_(attr, tangent) \
|
2103 |
+
_(attr, target) \
|
2104 |
+
_(attr, target_lengths) \
|
2105 |
+
_(attr, targets) \
|
2106 |
+
_(attr, tau) \
|
2107 |
+
_(attr, tensor) \
|
2108 |
+
_(attr, tensor1) \
|
2109 |
+
_(attr, tensor2) \
|
2110 |
+
_(attr, tensor_indices_or_sections) \
|
2111 |
+
_(attr, tensors) \
|
2112 |
+
_(attr, tensors1) \
|
2113 |
+
_(attr, test_element) \
|
2114 |
+
_(attr, test_elements) \
|
2115 |
+
_(attr, the_template) \
|
2116 |
+
_(attr, theta) \
|
2117 |
+
_(attr, threshold) \
|
2118 |
+
_(attr, to) \
|
2119 |
+
_(attr, tol) \
|
2120 |
+
_(attr, total) \
|
2121 |
+
_(attr, total_length) \
|
2122 |
+
_(attr, total_weight) \
|
2123 |
+
_(attr, train) \
|
2124 |
+
_(attr, training) \
|
2125 |
+
_(attr, transpose) \
|
2126 |
+
_(attr, transpose_result) \
|
2127 |
+
_(attr, transposed) \
|
2128 |
+
_(attr, type1) \
|
2129 |
+
_(attr, type2) \
|
2130 |
+
_(attr, unbiased) \
|
2131 |
+
_(attr, unitriangular) \
|
2132 |
+
_(attr, unpack_data) \
|
2133 |
+
_(attr, unpack_pivots) \
|
2134 |
+
_(attr, unroll_dim) \
|
2135 |
+
_(attr, unsafe) \
|
2136 |
+
_(attr, upper) \
|
2137 |
+
_(attr, upscale_factor) \
|
2138 |
+
_(attr, use_fast_accum) \
|
2139 |
+
_(attr, use_gelu) \
|
2140 |
+
_(attr, use_input_stats) \
|
2141 |
+
_(attr, v) \
|
2142 |
+
_(attr, value) \
|
2143 |
+
_(attr, values) \
|
2144 |
+
_(attr, var) \
|
2145 |
+
_(attr, vec) \
|
2146 |
+
_(attr, vec1) \
|
2147 |
+
_(attr, vec2) \
|
2148 |
+
_(attr, w_hh) \
|
2149 |
+
_(attr, w_ih) \
|
2150 |
+
_(attr, weight) \
|
2151 |
+
_(attr, weight0) \
|
2152 |
+
_(attr, weight1) \
|
2153 |
+
_(attr, weight2) \
|
2154 |
+
_(attr, weight3) \
|
2155 |
+
_(attr, weight4) \
|
2156 |
+
_(attr, weight_arr) \
|
2157 |
+
_(attr, weight_buf) \
|
2158 |
+
_(attr, weight_decay) \
|
2159 |
+
_(attr, weight_g) \
|
2160 |
+
_(attr, weight_scale) \
|
2161 |
+
_(attr, weight_stride0) \
|
2162 |
+
_(attr, weight_zero_point) \
|
2163 |
+
_(attr, weights) \
|
2164 |
+
_(attr, win_length) \
|
2165 |
+
_(attr, window) \
|
2166 |
+
_(attr, window_length) \
|
2167 |
+
_(attr, with_replacement) \
|
2168 |
+
_(attr, workspace) \
|
2169 |
+
_(attr, wrap) \
|
2170 |
+
_(attr, x) \
|
2171 |
+
_(attr, x1) \
|
2172 |
+
_(attr, x2) \
|
2173 |
+
_(attr, y) \
|
2174 |
+
_(attr, z) \
|
2175 |
+
_(attr, z_state) \
|
2176 |
+
_(attr, zero_infinity) \
|
2177 |
+
_(attr, zero_point) \
|
2178 |
+
_(attr, zero_point_hh) \
|
2179 |
+
_(attr, zero_point_ih) \
|
2180 |
+
_(attr, zero_points)
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/blob.h
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstddef>
|
4 |
+
#include <sstream>
|
5 |
+
#include <type_traits>
|
6 |
+
#include <typeinfo>
|
7 |
+
#include <vector>
|
8 |
+
|
9 |
+
#include <c10/util/intrusive_ptr.h>
|
10 |
+
#include <c10/util/typeid.h>
|
11 |
+
#include <c10/macros/Macros.h>
|
12 |
+
|
13 |
+
namespace caffe2 {
|
14 |
+
|
15 |
+
class Tensor;
|
16 |
+
|
17 |
+
/**
|
18 |
+
* @brief Blob is a general container that hosts a typed pointer.
|
19 |
+
*
|
20 |
+
* A Blob hosts a pointer as well as its type, and takes charge of deleting it
|
21 |
+
* properly when the blob is deallocated or re-allocated with a new type. A blob
|
22 |
+
* could contain anything, although the most common case is to contain a Tensor.
|
23 |
+
*/
|
24 |
+
class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
25 |
+
public:
|
26 |
+
/**
|
27 |
+
* Initializes an empty Blob.
|
28 |
+
*/
|
29 |
+
Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {}
|
30 |
+
~Blob() override {
|
31 |
+
Reset();
|
32 |
+
}
|
33 |
+
|
34 |
+
Blob(Blob&& other) noexcept : Blob() {
|
35 |
+
swap(other);
|
36 |
+
}
|
37 |
+
|
38 |
+
Blob& operator=(Blob&& other) noexcept {
|
39 |
+
Blob(std::move(other)).swap(*this);
|
40 |
+
return *this;
|
41 |
+
}
|
42 |
+
|
43 |
+
/**
|
44 |
+
* Checks if the content stored in the blob is of type T.
|
45 |
+
*/
|
46 |
+
template <class T>
|
47 |
+
bool IsType() const noexcept {
|
48 |
+
return meta_.Match<T>();
|
49 |
+
}
|
50 |
+
|
51 |
+
/**
|
52 |
+
* Returns the meta info of the blob.
|
53 |
+
*/
|
54 |
+
const TypeMeta meta() const noexcept {
|
55 |
+
return meta_;
|
56 |
+
}
|
57 |
+
|
58 |
+
/**
|
59 |
+
* Returns a printable typename of the blob.
|
60 |
+
*/
|
61 |
+
c10::string_view TypeName() const noexcept {
|
62 |
+
return meta_.name();
|
63 |
+
}
|
64 |
+
|
65 |
+
/**
|
66 |
+
* @brief Gets the const reference of the stored object. The code checks if
|
67 |
+
* the stored object is of the desired type.
|
68 |
+
*/
|
69 |
+
// TODO(jerryzh): add a Get(c10::DeviceType) function?
|
70 |
+
template <class T>
|
71 |
+
const T& Get() const {
|
72 |
+
TORCH_INTERNAL_ASSERT(
|
73 |
+
IsType<T>(),
|
74 |
+
"wrong type for the Blob instance. Blob contains ",
|
75 |
+
meta_.name(),
|
76 |
+
" while caller expects ",
|
77 |
+
TypeMeta::TypeName<T>());
|
78 |
+
// TODO: after we add Get<Tensor>(c10::DeviceType)
|
79 |
+
// and changed all the callsites, we can add
|
80 |
+
// a static assert here to enforce T != Tensor
|
81 |
+
return *static_cast<const T*>(pointer_);
|
82 |
+
}
|
83 |
+
|
84 |
+
const void* GetRaw() const noexcept {
|
85 |
+
return pointer_;
|
86 |
+
}
|
87 |
+
void* GetRaw() noexcept {
|
88 |
+
return pointer_;
|
89 |
+
}
|
90 |
+
|
91 |
+
/**
|
92 |
+
* @brief Gets a mutable pointer to the stored object.
|
93 |
+
*
|
94 |
+
* If the current object is not of the right type, a new object is created
|
95 |
+
* and the old object is freed. Note that type T should have a default
|
96 |
+
* constructor. Otherwise, create the object yourself first, and use
|
97 |
+
* Reset().
|
98 |
+
*/
|
99 |
+
template <class T>
|
100 |
+
T* GetMutable() {
|
101 |
+
static_assert(
|
102 |
+
std::is_default_constructible<T>::value,
|
103 |
+
"GetMutable can't be called with non-default-constructible types. "
|
104 |
+
"Try using specialized methods");
|
105 |
+
if (IsType<T>()) {
|
106 |
+
return static_cast<T*>(pointer_);
|
107 |
+
} else {
|
108 |
+
// TODO Re-enable logging
|
109 |
+
// VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>();
|
110 |
+
return Reset<T>(new T());
|
111 |
+
}
|
112 |
+
}
|
113 |
+
|
114 |
+
template <class T>
|
115 |
+
T* GetMutableOrNull() {
|
116 |
+
if (IsType<T>()) {
|
117 |
+
return static_cast<T*>(pointer_);
|
118 |
+
} else {
|
119 |
+
return nullptr;
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
/**
|
124 |
+
* Sets the underlying object to the allocated one. The Blob then takes over
|
125 |
+
* the ownership of the passed in pointer. If there is already an object in
|
126 |
+
* the Blob, the old object is freed.
|
127 |
+
*
|
128 |
+
* This is used when the underlying class T does not have a default ctor, or
|
129 |
+
* complex initializations needs to be done outside the blob.
|
130 |
+
*/
|
131 |
+
template <class T>
|
132 |
+
T* Reset(T* allocated) {
|
133 |
+
free_();
|
134 |
+
meta_ = TypeMeta::Make<T>();
|
135 |
+
pointer_ = static_cast<void*>(allocated);
|
136 |
+
has_ownership_ = true;
|
137 |
+
return allocated;
|
138 |
+
}
|
139 |
+
|
140 |
+
/**
|
141 |
+
* Sets the underlying object to the allocated one, but does not take over
|
142 |
+
* the ownership of the passed in pointer. If there is already an object in
|
143 |
+
* the Blob, the old object is freed.
|
144 |
+
*
|
145 |
+
* Unlike Reset, this does not take over the ownership of the pointer and the
|
146 |
+
* caller is responsible for making sure that the lifetime of the allocated
|
147 |
+
* blob outlasts the lifetime of any access to this blob, until another Reset
|
148 |
+
* call is made or the blob is destructed.
|
149 |
+
*/
|
150 |
+
template <class T>
|
151 |
+
typename std::remove_const<T>::type* ShareExternal(
|
152 |
+
typename std::remove_const<T>::type* allocated) {
|
153 |
+
return static_cast<T*>(ShareExternal(
|
154 |
+
static_cast<void*>(allocated),
|
155 |
+
TypeMeta::Make<typename std::remove_const<T>::type>()));
|
156 |
+
}
|
157 |
+
|
158 |
+
void* ShareExternal(void* allocated, const TypeMeta meta) {
|
159 |
+
free_();
|
160 |
+
meta_ = meta;
|
161 |
+
pointer_ = allocated;
|
162 |
+
has_ownership_ = false;
|
163 |
+
return allocated;
|
164 |
+
}
|
165 |
+
|
166 |
+
/**
|
167 |
+
* Resets the Blob to an empty one.
|
168 |
+
*/
|
169 |
+
void Reset() {
|
170 |
+
free_();
|
171 |
+
pointer_ = nullptr;
|
172 |
+
meta_ = TypeMeta();
|
173 |
+
has_ownership_ = false;
|
174 |
+
}
|
175 |
+
|
176 |
+
/**
|
177 |
+
* @brief Swaps the underlying storage of two blobs.
|
178 |
+
*/
|
179 |
+
void swap(Blob& rhs) {
|
180 |
+
using std::swap;
|
181 |
+
swap(meta_, rhs.meta_);
|
182 |
+
swap(pointer_, rhs.pointer_);
|
183 |
+
swap(has_ownership_, rhs.has_ownership_);
|
184 |
+
}
|
185 |
+
|
186 |
+
private:
|
187 |
+
void free_() {
|
188 |
+
if (has_ownership_ && pointer_ != nullptr) {
|
189 |
+
(*meta_.deleteFn())(pointer_);
|
190 |
+
}
|
191 |
+
}
|
192 |
+
|
193 |
+
TypeMeta meta_;
|
194 |
+
void* pointer_;
|
195 |
+
bool has_ownership_;
|
196 |
+
|
197 |
+
C10_DISABLE_COPY_AND_ASSIGN(Blob);
|
198 |
+
};
|
199 |
+
|
200 |
+
inline void swap(Blob& lhs, Blob& rhs) {
|
201 |
+
lhs.swap(rhs);
|
202 |
+
}
|
203 |
+
|
204 |
+
inline std::ostream& operator<<(std::ostream& out, const Blob& v) {
|
205 |
+
return out << "Blob[" << v.TypeName() << "]";
|
206 |
+
}
|
207 |
+
|
208 |
+
} // namespace caffe2
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/function.h>
|
4 |
+
#include <ATen/core/ivalue.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/intrusive_ptr.h>
|
7 |
+
#include <functional>
|
8 |
+
#include <utility>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace jit {
|
12 |
+
|
13 |
+
struct BuiltinOpFunction : public Function {
|
14 |
+
BuiltinOpFunction(
|
15 |
+
c10::QualifiedName qualname,
|
16 |
+
c10::FunctionSchema schema,
|
17 |
+
std::function<void(Stack&)> callable,
|
18 |
+
std::string doc_string = "")
|
19 |
+
: name_(std::move(qualname)),
|
20 |
+
callable_(std::move(callable)),
|
21 |
+
schema_(std::move(schema)),
|
22 |
+
doc_string_(std::move(doc_string)) {
|
23 |
+
TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1);
|
24 |
+
}
|
25 |
+
|
26 |
+
c10::string_view doc_string() const override {
|
27 |
+
return doc_string_;
|
28 |
+
}
|
29 |
+
|
30 |
+
void run(Stack& stack) override {
|
31 |
+
callable_(stack);
|
32 |
+
}
|
33 |
+
|
34 |
+
c10::intrusive_ptr<c10::ivalue::Future> runAsync(
|
35 |
+
Stack& stack,
|
36 |
+
TaskLauncher /* not used */) override {
|
37 |
+
run(stack);
|
38 |
+
auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type());
|
39 |
+
res->markCompleted(std::move(stack.front()));
|
40 |
+
return res;
|
41 |
+
}
|
42 |
+
|
43 |
+
const c10::QualifiedName& qualname() const override {
|
44 |
+
return name_;
|
45 |
+
}
|
46 |
+
|
47 |
+
// if this isn't yet defined, run its method_creator function
|
48 |
+
void ensure_defined() override {
|
49 |
+
// nop
|
50 |
+
}
|
51 |
+
|
52 |
+
const c10::FunctionSchema& getSchema() const override {
|
53 |
+
return schema_;
|
54 |
+
}
|
55 |
+
|
56 |
+
size_t num_inputs() const override {
|
57 |
+
return schema_.arguments().size();
|
58 |
+
}
|
59 |
+
|
60 |
+
Function& setSchema(c10::FunctionSchema schema) override {
|
61 |
+
schema_ = std::move(schema);
|
62 |
+
return *this;
|
63 |
+
}
|
64 |
+
|
65 |
+
bool call(Stack& stack, c10::optional<size_t>, c10::function_ref<void(const Code&)>) override {
|
66 |
+
run(stack);
|
67 |
+
return false;
|
68 |
+
}
|
69 |
+
|
70 |
+
bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>) override {
|
71 |
+
run(stack);
|
72 |
+
return false;
|
73 |
+
}
|
74 |
+
|
75 |
+
~BuiltinOpFunction() override = default;
|
76 |
+
|
77 |
+
private:
|
78 |
+
c10::QualifiedName name_;
|
79 |
+
|
80 |
+
std::function<void(Stack&)> callable_;
|
81 |
+
|
82 |
+
c10::FunctionSchema schema_;
|
83 |
+
|
84 |
+
std::string doc_string_;
|
85 |
+
};
|
86 |
+
|
87 |
+
} // namespace jit
|
88 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <typeindex>
|
4 |
+
#include <memory>
|
5 |
+
|
6 |
+
#include <c10/macros/Export.h>
|
7 |
+
#include <c10/macros/Macros.h>
|
8 |
+
#include <c10/util/Exception.h>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
struct ClassType;
|
13 |
+
using ClassTypePtr = std::shared_ptr<ClassType>;
|
14 |
+
|
15 |
+
TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex);
|
16 |
+
|
17 |
+
template <typename T>
|
18 |
+
const c10::ClassTypePtr& getCustomClassType() {
|
19 |
+
// Classes are never unregistered from getCustomClassTypeMap and the
|
20 |
+
// hash lookup can be a hot path, so just cache.
|
21 |
+
// For the same reason, it's fine If this ends up getting duplicated across
|
22 |
+
// DSO boundaries for whatever reason.
|
23 |
+
static c10::ClassTypePtr cache = getCustomClassTypeImpl(
|
24 |
+
std::type_index(typeid(T)));
|
25 |
+
return cache;
|
26 |
+
}
|
27 |
+
|
28 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from enum_tag.h
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
// Enum of valid tags obtained from the entries in tags.yaml
|
7 |
+
enum class Tag {
|
8 |
+
core,
|
9 |
+
data_dependent_output,
|
10 |
+
dynamic_output_shape,
|
11 |
+
generated,
|
12 |
+
inplace_view,
|
13 |
+
nondeterministic_bitwise,
|
14 |
+
nondeterministic_seeded,
|
15 |
+
pointwise,
|
16 |
+
pt2_compliant_tag,
|
17 |
+
view_copy
|
18 |
+
};
|
19 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
|
5 |
+
#include <utility>
|
6 |
+
|
7 |
+
namespace c10 {
|
8 |
+
|
9 |
+
struct EnumType;
|
10 |
+
using EnumTypePtr = std::shared_ptr<EnumType>;
|
11 |
+
using EnumNameValue = std::pair<std::string, IValue>;
|
12 |
+
struct TORCH_API EnumType : public NamedType {
|
13 |
+
friend struct Type;
|
14 |
+
static const TypeKind Kind = TypeKind::EnumType;
|
15 |
+
|
16 |
+
static EnumTypePtr create(
|
17 |
+
const c10::QualifiedName& qualified_class_name,
|
18 |
+
TypePtr value,
|
19 |
+
std::vector<EnumNameValue> enum_names_values,
|
20 |
+
std::weak_ptr<::torch::jit::CompilationUnit> cu) {
|
21 |
+
switch (value->kind()) {
|
22 |
+
case TypeKind::IntType:
|
23 |
+
case TypeKind::FloatType:
|
24 |
+
case TypeKind::StringType:
|
25 |
+
return EnumTypePtr(new EnumType(
|
26 |
+
qualified_class_name,
|
27 |
+
std::move(value),
|
28 |
+
std::move(enum_names_values),
|
29 |
+
std::move(cu)));
|
30 |
+
default:
|
31 |
+
AT_ERROR(
|
32 |
+
"Cannot create Enum with value type '",
|
33 |
+
value->str(),
|
34 |
+
"', only int, float and string are supported");
|
35 |
+
}
|
36 |
+
}
|
37 |
+
|
38 |
+
std::string str() const override {
|
39 |
+
return "Enum<" + annotation_str() + ">";
|
40 |
+
}
|
41 |
+
|
42 |
+
std::string repr_str() const override {
|
43 |
+
return str();
|
44 |
+
}
|
45 |
+
|
46 |
+
const TypePtr& getValueType() const {
|
47 |
+
return value_type_;
|
48 |
+
}
|
49 |
+
|
50 |
+
bool equals(const Type& rhs) const override {
|
51 |
+
if (auto* enum_rhs = rhs.castRaw<EnumType>()) {
|
52 |
+
return name().value() == enum_rhs->name().value() &&
|
53 |
+
*getValueType() == *(enum_rhs->getValueType()) &&
|
54 |
+
this->compilation_unit() == enum_rhs->compilation_unit();
|
55 |
+
}
|
56 |
+
return false;
|
57 |
+
}
|
58 |
+
|
59 |
+
bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
|
60 |
+
|
61 |
+
std::shared_ptr<const ::torch::jit::CompilationUnit> compilation_unit()
|
62 |
+
const {
|
63 |
+
auto cu = cu_.lock();
|
64 |
+
return cu;
|
65 |
+
}
|
66 |
+
|
67 |
+
const QualifiedName& qualifiedClassName() const {
|
68 |
+
return name().value();
|
69 |
+
}
|
70 |
+
|
71 |
+
at::ArrayRef<TypePtr> containedTypes() const override {
|
72 |
+
return value_type_;
|
73 |
+
}
|
74 |
+
|
75 |
+
const at::ArrayRef<EnumNameValue> enumNamesValues() const {
|
76 |
+
return enum_names_values_;
|
77 |
+
}
|
78 |
+
|
79 |
+
private:
|
80 |
+
EnumType(
|
81 |
+
c10::QualifiedName qualified_class_name,
|
82 |
+
TypePtr value_type,
|
83 |
+
std::vector<EnumNameValue> enum_names_values,
|
84 |
+
std::weak_ptr<torch::jit::CompilationUnit> cu)
|
85 |
+
: NamedType(TypeKind::EnumType, std::move(qualified_class_name)),
|
86 |
+
value_type_(std::move(value_type)),
|
87 |
+
enum_names_values_(std::move(enum_names_values)),
|
88 |
+
cu_(std::move(cu)) {}
|
89 |
+
|
90 |
+
std::string annotation_str_impl(
|
91 |
+
C10_UNUSED TypePrinter printer = nullptr) const override {
|
92 |
+
const auto& n = name().value();
|
93 |
+
return n.qualifiedName();
|
94 |
+
}
|
95 |
+
|
96 |
+
TypePtr value_type_;
|
97 |
+
std::vector<EnumNameValue> enum_names_values_;
|
98 |
+
std::weak_ptr<::torch::jit::CompilationUnit> cu_;
|
99 |
+
};
|
100 |
+
|
101 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function.h
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/function_schema.h>
|
4 |
+
#include <ATen/core/ivalue.h>
|
5 |
+
#include <ATen/core/qualified_name.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
#include <c10/util/FunctionRef.h>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
struct FunctionSchema;
|
11 |
+
};
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
TORCH_API void launch(std::function<void()> func);
|
15 |
+
}
|
16 |
+
|
17 |
+
namespace torch {
|
18 |
+
namespace jit {
|
19 |
+
|
20 |
+
struct Graph;
|
21 |
+
struct Code;
|
22 |
+
|
23 |
+
namespace mobile {
|
24 |
+
struct Code;
|
25 |
+
}
|
26 |
+
|
27 |
+
using Stack = std::vector<at::IValue>;
|
28 |
+
using Kwargs = std::unordered_map<std::string, at::IValue>;
|
29 |
+
struct RecursiveMethodCallError : public std::exception {};
|
30 |
+
using TaskLauncher = std::function<void(std::function<void()>)>;
|
31 |
+
|
32 |
+
TORCH_API void preoptimizeGraph(std::shared_ptr<Graph>& graph, bool disable_autocast=false);
|
33 |
+
|
34 |
+
// A Function is a pure Graph with no implicit `self` object bound.
|
35 |
+
// It contains schema information and the executor that manages the
|
36 |
+
// execution of the function. Method is a wrapper around an
|
37 |
+
// underlying Function that also provides a `self` object.
|
38 |
+
struct TORCH_API Function {
|
39 |
+
Function() = default;
|
40 |
+
Function(const Function&) = default;
|
41 |
+
Function& operator=(const Function&) = default;
|
42 |
+
Function(Function&&) noexcept = default;
|
43 |
+
Function& operator=(Function&&) noexcept = default;
|
44 |
+
virtual c10::string_view doc_string() const {
|
45 |
+
static constexpr c10::string_view no_doc_string = "";
|
46 |
+
return no_doc_string;
|
47 |
+
}
|
48 |
+
|
49 |
+
virtual bool isGraphFunction() const {
|
50 |
+
return false;
|
51 |
+
}
|
52 |
+
|
53 |
+
virtual void run(Stack& stack) = 0;
|
54 |
+
|
55 |
+
virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync(
|
56 |
+
Stack& /*stack*/,
|
57 |
+
C10_UNUSED TaskLauncher taskLauncher = at::launch) {
|
58 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
59 |
+
return {};
|
60 |
+
}
|
61 |
+
|
62 |
+
at::IValue operator()(
|
63 |
+
Stack stack,
|
64 |
+
const Kwargs& kwargs = Kwargs()) {
|
65 |
+
getSchema().checkAndNormalizeInputs(stack, kwargs);
|
66 |
+
run(stack);
|
67 |
+
return stack.front();
|
68 |
+
}
|
69 |
+
|
70 |
+
virtual const c10::QualifiedName& qualname() const = 0;
|
71 |
+
|
72 |
+
const std::string& name() const {
|
73 |
+
return qualname().name();
|
74 |
+
}
|
75 |
+
|
76 |
+
// if this isn't yet defined, run its method_creator function
|
77 |
+
virtual void ensure_defined() = 0;
|
78 |
+
|
79 |
+
virtual const c10::FunctionSchema& getSchema() const = 0;
|
80 |
+
|
81 |
+
virtual size_t num_inputs() const = 0;
|
82 |
+
|
83 |
+
virtual Function& setSchema(c10::FunctionSchema schema) = 0;
|
84 |
+
|
85 |
+
// call() defines how different interpreter implementations interacts with
|
86 |
+
// Function objects. Basically interpreters need to provide a callback to
|
87 |
+
// communicate to Functions what to do if provided a Code object.
|
88 |
+
// Alternatively we could design the signature to return an optional Code
|
89 |
+
// object, but that requires special handling the null case in interpreter
|
90 |
+
// and the fallback behavior is not well defined by interpreter but rather
|
91 |
+
// Function themselves, so a callback approach is more reasonable than
|
92 |
+
// returning values.
|
93 |
+
// If call() returns true, then callback completes successfully, otherwise
|
94 |
+
// call() returns false.
|
95 |
+
|
96 |
+
// Overload for server interpreter, a bailout size is needed for graph executor.
|
97 |
+
virtual bool call(Stack&, c10::optional<size_t>, c10::function_ref<void(const Code&)>) {
|
98 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
99 |
+
return false;
|
100 |
+
}
|
101 |
+
|
102 |
+
// Overload for mobile interpreter.
|
103 |
+
virtual bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) {
|
104 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
105 |
+
return false;
|
106 |
+
}
|
107 |
+
|
108 |
+
virtual ~Function() = default;
|
109 |
+
};
|
110 |
+
} // namespace jit
|
111 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h
ADDED
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ostream>
|
3 |
+
#include <sstream>
|
4 |
+
|
5 |
+
// note: windows build doesn't find symbols in operator files unless
|
6 |
+
// this is a header file
|
7 |
+
|
8 |
+
namespace c10 {
|
9 |
+
|
10 |
+
inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) {
|
11 |
+
// eventually this should look almost identical to python arg parser, but
|
12 |
+
// it is simpler for now to work directly on this schema
|
13 |
+
|
14 |
+
out << schema.name();
|
15 |
+
if (!schema.overload_name().empty()) {
|
16 |
+
out << "." << schema.overload_name();
|
17 |
+
}
|
18 |
+
out << "(";
|
19 |
+
|
20 |
+
bool seen_kwarg_only = false;
|
21 |
+
for (const auto i : c10::irange(schema.arguments().size())) {
|
22 |
+
if (i > 0) out << ", ";
|
23 |
+
if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) {
|
24 |
+
out << "*, ";
|
25 |
+
seen_kwarg_only = true;
|
26 |
+
}
|
27 |
+
out << schema.arguments()[i];
|
28 |
+
}
|
29 |
+
|
30 |
+
if(schema.is_vararg()) {
|
31 |
+
if(!schema.arguments().empty())
|
32 |
+
out << ", ";
|
33 |
+
out << "...";
|
34 |
+
}
|
35 |
+
|
36 |
+
out << ") -> ";
|
37 |
+
|
38 |
+
const auto& returns = schema.returns();
|
39 |
+
|
40 |
+
/*
|
41 |
+
* We should skip parenthesis if we return a single item and it's not varret,
|
42 |
+
* or we return nothing but varret.
|
43 |
+
*
|
44 |
+
* Need special handling for schema
|
45 |
+
* aten::items.str(Dict(str, t) self) -> (str,t)[]
|
46 |
+
* Even though this schema returns a single item, we need add parenthesis.
|
47 |
+
* The is necessary so the printed schema can be parsed by the C++ SchemaParser
|
48 |
+
* Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly
|
49 |
+
* treat the return type as a tuple. An alternative is to enhance the Lexer
|
50 |
+
* to lookahead multiple tokens to accurately decide if the return type is
|
51 |
+
* a tuple.
|
52 |
+
*/
|
53 |
+
bool need_paren = !(
|
54 |
+
(returns.size() == 1 && !schema.is_varret()) ||
|
55 |
+
(returns.empty() && schema.is_varret()));
|
56 |
+
|
57 |
+
if (returns.size() == 1 && !schema.is_varret()) {
|
58 |
+
std::stringstream return_ss;
|
59 |
+
return_ss << returns.at(0);
|
60 |
+
auto return_str = return_ss.str();
|
61 |
+
|
62 |
+
// enclosing the single return item with parenthesis if the return type
|
63 |
+
// starts with a left parenthesis.
|
64 |
+
//
|
65 |
+
// There are 2 cases
|
66 |
+
// 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'.
|
67 |
+
// without the extra parenthesis, the c++ schem parser can not parse it.
|
68 |
+
// 2. something like '-> ((str, str))'. Need extra parenthesis so the return
|
69 |
+
// type is a single tuple rather than two strings.
|
70 |
+
// PR (https://github.com/pytorch/pytorch/pull/23204) has more context about
|
71 |
+
// this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15)
|
72 |
+
// also covers this case.
|
73 |
+
if (!return_str.empty() && return_str.front() == '(') {
|
74 |
+
need_paren = true;
|
75 |
+
}
|
76 |
+
}
|
77 |
+
|
78 |
+
if (need_paren) {
|
79 |
+
out << "(";
|
80 |
+
}
|
81 |
+
for (const auto i : c10::irange(returns.size())) {
|
82 |
+
if (i > 0) {
|
83 |
+
out << ", ";
|
84 |
+
}
|
85 |
+
out << returns.at(i);
|
86 |
+
}
|
87 |
+
if (schema.is_varret()) {
|
88 |
+
if (!returns.empty()) {
|
89 |
+
out << ", ";
|
90 |
+
}
|
91 |
+
out << "...";
|
92 |
+
}
|
93 |
+
if (need_paren) {
|
94 |
+
out << ")";
|
95 |
+
}
|
96 |
+
return out;
|
97 |
+
}
|
98 |
+
|
99 |
+
inline size_t findFirstOutArg(const std::vector<Argument>& args) {
|
100 |
+
// find the start of out args in the schema
|
101 |
+
for (const auto out_start_idx : c10::irange(args.size())) {
|
102 |
+
if (args.at(out_start_idx).is_out()) {
|
103 |
+
return out_start_idx;
|
104 |
+
}
|
105 |
+
}
|
106 |
+
return args.size();
|
107 |
+
}
|
108 |
+
|
109 |
+
inline bool Argument::isBackwardCompatibleWith(
|
110 |
+
const Argument& old,
|
111 |
+
std::ostream* why_not) const {
|
112 |
+
const Argument* lhs = this;
|
113 |
+
const Argument* rhs = &old;
|
114 |
+
if (!(lhs->name() == rhs->name()
|
115 |
+
&& lhs->N() == rhs->N()
|
116 |
+
&& (lhs->alias_info() == rhs->alias_info()
|
117 |
+
|| (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
|
118 |
+
&& *lhs->alias_info() == *rhs->alias_info())))) {
|
119 |
+
return false;
|
120 |
+
}
|
121 |
+
if (lhs->kwarg_only() && !rhs->kwarg_only()) {
|
122 |
+
return false;
|
123 |
+
}
|
124 |
+
if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) {
|
125 |
+
return false;
|
126 |
+
}
|
127 |
+
if (rhs->default_value().has_value() &&
|
128 |
+
lhs->default_value() != rhs->default_value()) {
|
129 |
+
return false;
|
130 |
+
}
|
131 |
+
return true;
|
132 |
+
}
|
133 |
+
|
134 |
+
inline bool Argument::isForwardCompatibleWith(
|
135 |
+
const Argument& old,
|
136 |
+
std::ostream* why_not) const {
|
137 |
+
const Argument* lhs = this;
|
138 |
+
const Argument* rhs = &old;
|
139 |
+
if (!(lhs->name() == rhs->name()
|
140 |
+
&& lhs->N() == rhs->N()
|
141 |
+
&& (lhs->alias_info() == rhs->alias_info()
|
142 |
+
|| (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
|
143 |
+
&& *lhs->alias_info() == *rhs->alias_info())))) {
|
144 |
+
return false;
|
145 |
+
}
|
146 |
+
if (lhs->kwarg_only() && !rhs->kwarg_only()) {
|
147 |
+
return false;
|
148 |
+
}
|
149 |
+
if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) {
|
150 |
+
return false;
|
151 |
+
}
|
152 |
+
if (rhs->default_value().has_value() &&
|
153 |
+
lhs->default_value() != rhs->default_value()) {
|
154 |
+
return false;
|
155 |
+
}
|
156 |
+
if (lhs->default_value().has_value() && !rhs->default_value().has_value()) {
|
157 |
+
return false;
|
158 |
+
}
|
159 |
+
return true;
|
160 |
+
}
|
161 |
+
|
162 |
+
inline std::string FunctionSchema::formatTypeMismatchMsg(
|
163 |
+
const Argument& expected,
|
164 |
+
const std::string& actual_type,
|
165 |
+
c10::optional<size_t> position,
|
166 |
+
c10::optional<std::string> value) const {
|
167 |
+
std::string position_str;
|
168 |
+
if (position) {
|
169 |
+
position_str = c10::str("Position: ", *position, "\n");
|
170 |
+
}
|
171 |
+
std::string value_str;
|
172 |
+
if (value) {
|
173 |
+
value_str = c10::str("Value: ", *value, "\n");
|
174 |
+
}
|
175 |
+
return c10::str(
|
176 |
+
name(),
|
177 |
+
"() ",
|
178 |
+
expected.formatTypeMismatchMsg(actual_type),
|
179 |
+
position_str,
|
180 |
+
value_str,
|
181 |
+
"Declaration: ",
|
182 |
+
*this);
|
183 |
+
}
|
184 |
+
|
185 |
+
inline bool FunctionSchema::isBackwardCompatibleWith(
|
186 |
+
const FunctionSchema& old,
|
187 |
+
std::ostream* why_not) const {
|
188 |
+
if (!(name() == old.name()
|
189 |
+
&& overload_name() == old.overload_name()
|
190 |
+
// we are conservative on is_vararg and is_varret,
|
191 |
+
// since they are only used by internal operators
|
192 |
+
&& is_vararg() == old.is_vararg()
|
193 |
+
&& is_varret() == old.is_varret()
|
194 |
+
&& returns().size() == old.returns().size()
|
195 |
+
&& arguments().size() >= old.arguments().size())) {
|
196 |
+
return false;
|
197 |
+
}
|
198 |
+
for (const auto i : c10::irange(returns().size())) {
|
199 |
+
// Backwards compatibility requires covariance on argument types
|
200 |
+
// (i.e. more generic), and contravariance on return types (i.e.
|
201 |
+
// more specific).
|
202 |
+
if (!old.returns().at(i).isBackwardCompatibleWith(
|
203 |
+
returns().at(i),
|
204 |
+
why_not)) {
|
205 |
+
return false;
|
206 |
+
}
|
207 |
+
}
|
208 |
+
|
209 |
+
// we want to test both out and default args separately
|
210 |
+
size_t old_out_start_idx = findFirstOutArg(old.arguments());
|
211 |
+
size_t new_out_start_idx = findFirstOutArg(arguments());
|
212 |
+
|
213 |
+
// make sure among the default args, they are backward compatible
|
214 |
+
for (const auto i : c10::irange(old_out_start_idx)) {
|
215 |
+
if (!arguments().at(i).isBackwardCompatibleWith(
|
216 |
+
old.arguments().at(i), why_not)) {
|
217 |
+
return false;
|
218 |
+
}
|
219 |
+
}
|
220 |
+
|
221 |
+
// Validate that all new arguments provided has a default value
|
222 |
+
for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) {
|
223 |
+
if (!arguments().at(i).default_value()) {
|
224 |
+
if (why_not) {
|
225 |
+
*why_not
|
226 |
+
<< "Function schema not backward compatible since the new argument '"
|
227 |
+
<< arguments().at(i).name() << "' of type "
|
228 |
+
<< arguments().at(i).type()->str()
|
229 |
+
<< " did not provide a default value.";
|
230 |
+
}
|
231 |
+
return false;
|
232 |
+
}
|
233 |
+
}
|
234 |
+
|
235 |
+
// now compare the out args
|
236 |
+
for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) {
|
237 |
+
if (!arguments()
|
238 |
+
.at(i - old_out_start_idx + new_out_start_idx)
|
239 |
+
.isBackwardCompatibleWith(old.arguments().at(i), why_not)) {
|
240 |
+
return false;
|
241 |
+
}
|
242 |
+
}
|
243 |
+
|
244 |
+
return true;
|
245 |
+
}
|
246 |
+
|
247 |
+
inline bool FunctionSchema::isForwardCompatibleWith(
|
248 |
+
const FunctionSchema& old,
|
249 |
+
std::ostringstream& why_not) const {
|
250 |
+
if (!(name() == old.name() &&
|
251 |
+
overload_name() == old.overload_name()
|
252 |
+
// we are conservative on is_vararg and is_varret,
|
253 |
+
// since they are only used by internal operators
|
254 |
+
&& is_vararg() == old.is_vararg() && is_varret() == old.is_varret() &&
|
255 |
+
returns().size() == old.returns().size())) {
|
256 |
+
return false;
|
257 |
+
}
|
258 |
+
|
259 |
+
// we want to test both out and default args separately
|
260 |
+
size_t old_out_start_idx = findFirstOutArg(old.arguments());
|
261 |
+
size_t new_out_start_idx = findFirstOutArg(arguments());
|
262 |
+
|
263 |
+
if (old.arguments().size() - old_out_start_idx !=
|
264 |
+
arguments().size() - new_out_start_idx) {
|
265 |
+
if (why_not) {
|
266 |
+
why_not << "Function schema should have the "
|
267 |
+
<< "same number of out arguments";
|
268 |
+
}
|
269 |
+
return false;
|
270 |
+
}
|
271 |
+
|
272 |
+
// make sure among the default args, they are forward compatible
|
273 |
+
for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) {
|
274 |
+
if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) {
|
275 |
+
if (why_not) {
|
276 |
+
why_not
|
277 |
+
<< "'" << arguments().at(i).name() << "'"
|
278 |
+
<< " is not forward compatible with the older version of the schema";
|
279 |
+
}
|
280 |
+
return false;
|
281 |
+
}
|
282 |
+
}
|
283 |
+
|
284 |
+
// Validate that all new arguments provided has a default value
|
285 |
+
for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) {
|
286 |
+
if (!arguments().at(i).default_value()) {
|
287 |
+
if (why_not) {
|
288 |
+
why_not
|
289 |
+
<< "Function schema is not forward compatible since the new argument '"
|
290 |
+
<< arguments().at(i).name() << "' of type "
|
291 |
+
<< arguments().at(i).type()->str()
|
292 |
+
<< " did not provide a default value.";
|
293 |
+
}
|
294 |
+
return false;
|
295 |
+
}
|
296 |
+
|
297 |
+
auto default_val = arguments().at(i).default_value().value();
|
298 |
+
if (default_val.isList() || default_val.isGenericDict()) {
|
299 |
+
if (why_not) {
|
300 |
+
why_not
|
301 |
+
<< "Function schema is not forward compatible since the new argument '"
|
302 |
+
<< arguments().at(i).name() << "' of type "
|
303 |
+
<< arguments().at(i).type()->str() << " has a container type "
|
304 |
+
<< "as its default value.";
|
305 |
+
}
|
306 |
+
return false;
|
307 |
+
}
|
308 |
+
}
|
309 |
+
|
310 |
+
// now compare the out args
|
311 |
+
for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) {
|
312 |
+
if (!arguments()
|
313 |
+
.at(i - old_out_start_idx + new_out_start_idx)
|
314 |
+
.isForwardCompatibleWith(old.arguments().at(i))) {
|
315 |
+
if (why_not) {
|
316 |
+
why_not << "Out argument '"
|
317 |
+
<< "'" << arguments().at(i).name()
|
318 |
+
<< " is not FC with the older version of the schema";
|
319 |
+
}
|
320 |
+
return false;
|
321 |
+
}
|
322 |
+
}
|
323 |
+
|
324 |
+
return true;
|
325 |
+
}
|
326 |
+
|
327 |
+
template<typename T>
|
328 |
+
inline void FunctionSchema::checkArg(
|
329 |
+
const IValue& value,
|
330 |
+
const Argument& argument,
|
331 |
+
optional<size_t> pos) const {
|
332 |
+
if (value.isTensor() && argument.type() == TensorType::get()) {
|
333 |
+
// Fast-path for the common case
|
334 |
+
return;
|
335 |
+
}
|
336 |
+
if (!value.type<T>()->isSubtypeOf(*argument.type())) {
|
337 |
+
TORCH_CHECK(
|
338 |
+
false,
|
339 |
+
formatTypeMismatchMsg(
|
340 |
+
argument, value.type<T>()->repr_str(), pos));
|
341 |
+
}
|
342 |
+
}
|
343 |
+
|
344 |
+
inline std::string FunctionSchema::findErrorInKwargs(const std::vector<std::string>& kwargs) const {
|
345 |
+
// First check if any of the kwargs are unknown, i.e. don't match the name of
|
346 |
+
// any argument in the schema.
|
347 |
+
for (const auto& kwarg : kwargs) {
|
348 |
+
if (!std::count_if(
|
349 |
+
arguments().begin(),
|
350 |
+
arguments().end(),
|
351 |
+
[&kwarg](const Argument& argument) {
|
352 |
+
return argument.name() == kwarg;
|
353 |
+
})) {
|
354 |
+
return c10::str(
|
355 |
+
"Unknown keyword argument '",
|
356 |
+
kwarg,
|
357 |
+
"' for operator '",
|
358 |
+
name(),
|
359 |
+
"'. Schema: ",
|
360 |
+
*this);
|
361 |
+
}
|
362 |
+
}
|
363 |
+
// If there are unconsumed kwargs but none of them were unknown, the first
|
364 |
+
// positional argument present in the kwargs is duplicated.
|
365 |
+
for (const auto& argument : arguments()) {
|
366 |
+
if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) {
|
367 |
+
AT_ASSERT(!argument.default_value());
|
368 |
+
return c10::str(
|
369 |
+
"Argument '",
|
370 |
+
argument.name(),
|
371 |
+
"' specified both as positional and ",
|
372 |
+
"keyword argument. Schema: ",
|
373 |
+
*this);
|
374 |
+
}
|
375 |
+
}
|
376 |
+
return "";
|
377 |
+
}
|
378 |
+
|
379 |
+
template <typename T>
|
380 |
+
inline void FunctionSchema::checkAndNormalizeInputs(
|
381 |
+
std::vector<IValue>& inputs,
|
382 |
+
const std::unordered_map<std::string, IValue>& kwargs) const {
|
383 |
+
// Do we have more inputs than the schema accepts?
|
384 |
+
TORCH_CHECK(
|
385 |
+
inputs.size() <= arguments().size(),
|
386 |
+
"Expected at most ",
|
387 |
+
arguments().size(),
|
388 |
+
" argument(s) for operator '",
|
389 |
+
name(),
|
390 |
+
"', but received ",
|
391 |
+
inputs.size(),
|
392 |
+
" argument(s). Declaration: ",
|
393 |
+
*this);
|
394 |
+
|
395 |
+
size_t consumed_kwargs = 0;
|
396 |
+
for (const auto pos : c10::irange(arguments().size())) {
|
397 |
+
const auto& argument = arguments()[pos];
|
398 |
+
if (pos < inputs.size()) {
|
399 |
+
checkArg<T>(inputs[pos], argument, pos);
|
400 |
+
continue;
|
401 |
+
}
|
402 |
+
auto it = kwargs.find(argument.name());
|
403 |
+
if (it != kwargs.end()) {
|
404 |
+
checkArg<T>(it->second, argument, nullopt);
|
405 |
+
inputs.push_back(it->second);
|
406 |
+
consumed_kwargs++;
|
407 |
+
continue;
|
408 |
+
}
|
409 |
+
if (argument.default_value()) {
|
410 |
+
inputs.push_back(*argument.default_value());
|
411 |
+
continue;
|
412 |
+
}
|
413 |
+
AT_ERROR(
|
414 |
+
name(),
|
415 |
+
"() is missing value for argument '",
|
416 |
+
argument.name(),
|
417 |
+
"'. Declaration: ",
|
418 |
+
*this);
|
419 |
+
}
|
420 |
+
if (consumed_kwargs != kwargs.size()) {
|
421 |
+
std::vector<std::string> names;
|
422 |
+
names.reserve(kwargs.size());
|
423 |
+
for(const auto& k : kwargs) {
|
424 |
+
names.emplace_back(k.first);
|
425 |
+
}
|
426 |
+
throw std::runtime_error(findErrorInKwargs(names));
|
427 |
+
}
|
428 |
+
}
|
429 |
+
|
430 |
+
inline FunctionSchema FunctionSchema::cloneWithRemappedTypes(
|
431 |
+
const std::function<TypePtr(TypePtr)> type_map) const {
|
432 |
+
auto update_args = [&](const std::vector<Argument>& args) {
|
433 |
+
std::vector<Argument> new_args;
|
434 |
+
new_args.reserve(args.size());
|
435 |
+
for(const Argument& arg : args) {
|
436 |
+
new_args.emplace_back(arg.cloneWithType(type_map(arg.type())));
|
437 |
+
}
|
438 |
+
return new_args;
|
439 |
+
};
|
440 |
+
return FunctionSchema(
|
441 |
+
name(),
|
442 |
+
overload_name(),
|
443 |
+
update_args(arguments()),
|
444 |
+
update_args(returns()),
|
445 |
+
is_vararg(),
|
446 |
+
is_varret());
|
447 |
+
}
|
448 |
+
|
449 |
+
// covariant subtyping of list of Arguments
|
450 |
+
inline bool isSubtypeOfList(
|
451 |
+
ArrayRef<Argument> child,
|
452 |
+
ArrayRef<Argument> parent,
|
453 |
+
std::ostream* why_not) {
|
454 |
+
if (child.size() != parent.size()) {
|
455 |
+
return false;
|
456 |
+
}
|
457 |
+
for (const auto i : c10::irange(child.size())) {
|
458 |
+
const Argument& c = child[i];
|
459 |
+
const Argument& p = parent[i];
|
460 |
+
if (c.name() != p.name()) {
|
461 |
+
return false;
|
462 |
+
}
|
463 |
+
if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) {
|
464 |
+
return false;
|
465 |
+
}
|
466 |
+
}
|
467 |
+
return true;
|
468 |
+
}
|
469 |
+
|
470 |
+
inline bool FunctionSchema::isSubtypeOf(
|
471 |
+
const FunctionSchema& rhs,
|
472 |
+
bool as_method,
|
473 |
+
std::ostream* why_not) const {
|
474 |
+
size_t start = as_method ? 1 : 0;
|
475 |
+
// functions are contravariant in arguments but covariant in returns
|
476 |
+
return isSubtypeOfList(
|
477 |
+
ArrayRef<Argument>(rhs.arguments()).slice(start),
|
478 |
+
ArrayRef<Argument>(arguments()).slice(start),
|
479 |
+
why_not) &&
|
480 |
+
isSubtypeOfList(returns(), rhs.returns(), why_not);
|
481 |
+
}
|
482 |
+
|
483 |
+
} // namespace c10
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/functional.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
// The passed in function must take T by value (T), or by
|
9 |
+
// const reference (const T&); taking T by non-const reference
|
10 |
+
// will result in an error like:
|
11 |
+
//
|
12 |
+
// error: no type named 'type' in 'class std::result_of<foobar::__lambda(T)>'
|
13 |
+
//
|
14 |
+
// No explicit template parameters are required.
|
15 |
+
|
16 |
+
// Overload for explicit function and ArrayRef
|
17 |
+
template<class F, class T>
|
18 |
+
inline auto fmap(const T& inputs, const F& fn) -> std::vector<decltype(fn(*inputs.begin()))> {
|
19 |
+
std::vector<decltype(fn(*inputs.begin()))> r;
|
20 |
+
r.reserve(inputs.size());
|
21 |
+
for(const auto & input : inputs)
|
22 |
+
r.push_back(fn(input));
|
23 |
+
return r;
|
24 |
+
}
|
25 |
+
|
26 |
+
// C++ forbids taking an address of a constructor, so here's a workaround...
|
27 |
+
// Overload for constructor (R) application
|
28 |
+
template<typename R, typename T>
|
29 |
+
inline std::vector<R> fmap(const T& inputs) {
|
30 |
+
std::vector<R> r;
|
31 |
+
r.reserve(inputs.size());
|
32 |
+
for(auto & input : inputs)
|
33 |
+
r.push_back(R(input));
|
34 |
+
return r;
|
35 |
+
}
|
36 |
+
|
37 |
+
template<typename F, typename T>
|
38 |
+
inline std::vector<T> filter(at::ArrayRef<T> inputs, const F& fn) {
|
39 |
+
std::vector<T> r;
|
40 |
+
r.reserve(inputs.size());
|
41 |
+
for(auto & input : inputs) {
|
42 |
+
if (fn(input)) {
|
43 |
+
r.push_back(input);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
return r;
|
47 |
+
}
|
48 |
+
|
49 |
+
template<typename F, typename T>
|
50 |
+
inline std::vector<T> filter(const std::vector<T>& inputs, const F& fn) {
|
51 |
+
return filter<F, T>(static_cast<at::ArrayRef<T>>(inputs), fn);
|
52 |
+
}
|
53 |
+
|
54 |
+
} // namespace c10
|