applied-ai-018 commited on
Commit
9c39b2e
·
verified ·
1 Parent(s): 281b268

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/12.attention.dense.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h +3 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h +13 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h +46 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h +165 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/core/Array.h +39 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h +2 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h +25 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h +139 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h +13 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h +337 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h +190 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h +39 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h +201 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/core/List.h +490 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h +360 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h +194 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h +139 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h +242 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h +28 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h +23 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h +83 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h +16 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h +1 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h +1 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h +276 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h +0 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h +173 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h +1 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h +95 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h +96 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h +151 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h +2213 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/core/blob.h +208 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h +176 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h +99 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h +260 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h +229 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h +27 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +387 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +600 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h +441 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
ckpts/universal/global_step120/zero/12.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e91c163d30684fbd094fddaacd8af69c144a92baf418c045a9d307c2793dc571
3
+ size 16778411
ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ef7b4c40d47c5b06eb84f9164e0dfd3366c9b933fb0a55c0adbade60eb201c
3
+ size 33555612
ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b37308b3acc3c87b7d6a81004acb9329d63548452460b25fee898177b647df
3
+ size 33555627
ckpts/universal/global_step120/zero/3.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f2935cf6d73b1137d55b32db0f6a3ce1b4903bed8d455e20ff4636dff0b56da
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+ struct OperatorName;
7
+ }
8
+
9
+ namespace at {
10
+
11
+ // check if an op is a custom op (i.e. did not come from native_functions.yaml)
12
+ TORCH_API bool is_custom_op(const c10::OperatorName& opName);
13
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/QScheme.h>
3
+
4
+ // Forward declarations of core ATen types used in dispatch functions
5
+ namespace c10 {
6
+
7
+ template<typename T>
8
+ class List;
9
+ template<typename T>
10
+ class IListRef;
11
+ class Stream;
12
+ class Scalar;
13
+ class SymInt;
14
+ class SymIntList;
15
+ struct Storage;
16
+ struct TensorOptions;
17
+ template <typename T>
18
+ class ArrayRef;
19
+ template <typename T>
20
+ class OptionalArrayRef;
21
+
22
+ } // namespace c10
23
+
24
+ namespace at {
25
+
26
+ class Tensor;
27
+ class OptionalTensorRef;
28
+ struct Dimname;
29
+ struct Generator;
30
+ using TensorList = c10::ArrayRef<Tensor>;
31
+ using ITensorListRef = c10::IListRef<Tensor>;
32
+ using IOptTensorListRef = c10::IListRef<OptionalTensorRef>;
33
+ using DimnameList = c10::ArrayRef<Dimname>;
34
+ using IntArrayRef = c10::ArrayRef<int64_t>;
35
+ using OptionalIntArrayRef = c10::OptionalArrayRef<int64_t>;
36
+ using OptionalSymIntArrayRef = c10::OptionalArrayRef<c10::SymInt>;
37
+
38
+ using c10::Stream;
39
+ using c10::Storage;
40
+ using c10::QScheme;
41
+ using c10::Scalar;
42
+ using c10::SymInt;
43
+ using c10::SymIntList;
44
+ using c10::TensorOptions;
45
+
46
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This global header must not depend on native_functions.yaml or
2
+ // incremental builds will be next to useless
3
+ #pragma push_macro("TORCH_ASSERT_NO_OPERATORS")
4
+ #define TORCH_ASSERT_NO_OPERATORS
5
+
6
+ // This macro doesn't work if defined after the first time inttypes.h
7
+ // is included, so won't work anywhere if not defined here.
8
+ #ifndef __STDC_FORMAT_MACROS
9
+ #define __STDC_FORMAT_MACROS
10
+ #endif
11
+ #include <cinttypes>
12
+
13
+ // This list of headers was generated using a script that finds
14
+ // high-impact headers and then manually tweaked to remove OS specific
15
+ // or duplicate headers (e.g. <cassert> and <assert.h>) and to remove
16
+ // "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10).
17
+
18
+ // To generate the initial list:
19
+ // 1. Build pytorch from scratch with all build caching disabled
20
+ // 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing)
21
+ // $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json
22
+ // 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/
23
+ // $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json
24
+ // Where the threshold can be tweaked until c10 and some of ATen
25
+ // core are included but TORCH_ASSERT_NO_OPERATORS still passes.
26
+
27
+ #include <cerrno>
28
+ #include <cmath>
29
+ #include <cstddef>
30
+ #include <cstdint>
31
+ #include <cstdlib>
32
+ #include <cstring>
33
+
34
+ #include <algorithm>
35
+ #include <array>
36
+ #include <atomic>
37
+ #include <chrono>
38
+ #include <complex>
39
+ #include <deque>
40
+ #include <exception>
41
+ #include <functional>
42
+ #include <initializer_list>
43
+ #include <iomanip>
44
+ #include <iosfwd>
45
+ #include <iterator>
46
+ #include <limits>
47
+ #include <list>
48
+ #include <map>
49
+ #include <memory>
50
+ #include <mutex>
51
+ #include <new>
52
+ #include <numeric>
53
+ #include <ostream>
54
+ #include <sstream>
55
+ #include <stdexcept>
56
+ #include <string>
57
+ #include <tuple>
58
+ #include <type_traits>
59
+ #include <typeindex>
60
+ #include <typeinfo>
61
+ #include <unordered_map>
62
+ #include <unordered_set>
63
+ #include <utility>
64
+ #include <vector>
65
+
66
+ #include <c10/core/Allocator.h>
67
+ #include <c10/core/AutogradState.h>
68
+ #include <c10/core/Backend.h>
69
+ #include <c10/core/DefaultDtype.h>
70
+ #include <c10/core/Device.h>
71
+ #include <c10/core/DeviceType.h>
72
+ #include <c10/core/DispatchKey.h>
73
+ #include <c10/core/DispatchKeySet.h>
74
+ #include <c10/core/GeneratorImpl.h>
75
+ #include <c10/core/InferenceMode.h>
76
+ #include <c10/core/Layout.h>
77
+ #include <c10/core/MemoryFormat.h>
78
+ #include <c10/core/OptionalRef.h>
79
+ #include <c10/core/QScheme.h>
80
+ #include <c10/core/Scalar.h>
81
+ #include <c10/core/ScalarType.h>
82
+ #include <c10/core/ScalarTypeToTypeMeta.h>
83
+ #include <c10/core/Storage.h>
84
+ #include <c10/core/StorageImpl.h>
85
+ #include <c10/core/SymBool.h>
86
+ #include <c10/core/SymFloat.h>
87
+ #include <c10/core/SymInt.h>
88
+ #include <c10/core/SymIntArrayRef.h>
89
+ #include <c10/core/SymNodeImpl.h>
90
+ #include <c10/core/TensorImpl.h>
91
+ #include <c10/core/TensorOptions.h>
92
+ #include <c10/core/UndefinedTensorImpl.h>
93
+ #include <c10/core/WrapDimMinimal.h>
94
+ #include <c10/core/impl/LocalDispatchKeySet.h>
95
+ #include <c10/core/impl/PyInterpreter.h>
96
+ #include <c10/core/impl/SizesAndStrides.h>
97
+
98
+ #include <c10/macros/Export.h>
99
+ #include <c10/macros/Macros.h>
100
+
101
+ #include <c10/util/AlignOf.h>
102
+ #include <c10/util/ArrayRef.h>
103
+ #include <c10/util/BFloat16.h>
104
+ #include <c10/util/C++17.h>
105
+ #include <c10/util/ConstexprCrc.h>
106
+ #include <c10/util/Deprecated.h>
107
+ #include <c10/util/DimVector.h>
108
+ #include <c10/util/Exception.h>
109
+ #include <c10/util/ExclusivelyOwned.h>
110
+ #include <c10/util/Flags.h>
111
+ #include <c10/util/Float8_e4m3fn.h>
112
+ #include <c10/util/Float8_e5m2.h>
113
+ #include <c10/util/Float8_e4m3fnuz.h>
114
+ #include <c10/util/Float8_e5m2fnuz.h>
115
+ #include <c10/util/FunctionRef.h>
116
+ #include <c10/util/Half.h>
117
+ #include <c10/util/IdWrapper.h>
118
+ #include <c10/util/Logging.h>
119
+ #include <c10/util/MaybeOwned.h>
120
+ #include <c10/util/Metaprogramming.h>
121
+ #include <c10/util/Optional.h>
122
+ #include <c10/util/Registry.h>
123
+ #include <c10/util/SmallVector.h>
124
+ #include <c10/util/StringUtil.h>
125
+ #include <c10/util/ThreadLocalDebugInfo.h>
126
+ #include <c10/util/Type.h>
127
+ #include <c10/util/TypeCast.h>
128
+ #include <c10/util/TypeIndex.h>
129
+ #include <c10/util/TypeList.h>
130
+ #include <c10/util/TypeSafeSignMath.h>
131
+ #include <c10/util/TypeTraits.h>
132
+ #include <c10/util/UniqueVoidPtr.h>
133
+ #include <c10/util/accumulate.h>
134
+ #include <c10/util/bit_cast.h>
135
+ #include <c10/util/bits.h>
136
+ #include <c10/util/complex.h>
137
+ #include <c10/util/floating_point_utils.h>
138
+ #include <c10/util/intrusive_ptr.h>
139
+ #include <c10/util/irange.h>
140
+ #include <c10/util/llvmMathExtras.h>
141
+ #include <c10/util/python_stub.h>
142
+ #include <c10/util/qint32.h>
143
+ #include <c10/util/qint8.h>
144
+ #include <c10/util/quint2x4.h>
145
+ #include <c10/util/quint4x2.h>
146
+ #include <c10/util/quint8.h>
147
+ #include <c10/util/safe_numerics.h>
148
+ #include <c10/util/string_utils.h>
149
+ #include <c10/util/string_view.h>
150
+ #include <c10/util/typeid.h>
151
+
152
+ #include <ATen/StorageUtils.h>
153
+ #include <ATen/core/ATen_fwd.h>
154
+ #include <ATen/core/DeprecatedTypeProperties.h>
155
+ #include <ATen/core/DeprecatedTypePropertiesRegistry.h>
156
+ #include <ATen/core/DimVector.h>
157
+ #include <ATen/core/Dimname.h>
158
+ #include <ATen/core/Generator.h>
159
+ #include <ATen/core/NamedTensor.h>
160
+ #include <ATen/core/QuantizerBase.h>
161
+ #include <ATen/core/TensorAccessor.h>
162
+ #include <ATen/core/TensorBase.h>
163
+ #include <ATen/core/symbol.h>
164
+
165
+ #pragma pop_macro("TORCH_ASSERT_NO_OPERATORS")
venv/lib/python3.10/site-packages/torch/include/ATen/core/Array.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // A fixed-size array type usable from both host and
4
+ // device code.
5
+
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at { namespace detail {
10
+
11
+ template <typename T, int size_>
12
+ struct Array {
13
+ T data[size_];
14
+
15
+ C10_HOST_DEVICE T operator[](int i) const {
16
+ return data[i];
17
+ }
18
+ C10_HOST_DEVICE T& operator[](int i) {
19
+ return data[i];
20
+ }
21
+ #if defined(USE_ROCM)
22
+ C10_HOST_DEVICE Array() = default;
23
+ C10_HOST_DEVICE Array(const Array&) = default;
24
+ C10_HOST_DEVICE Array& operator=(const Array&) = default;
25
+ #else
26
+ Array() = default;
27
+ Array(const Array&) = default;
28
+ Array& operator=(const Array&) = default;
29
+ #endif
30
+ static constexpr int size(){return size_;}
31
+ // Fill the array with x.
32
+ C10_HOST_DEVICE Array(T x) {
33
+ for (int i = 0; i < size_; i++) {
34
+ data[i] = x;
35
+ }
36
+ }
37
+ };
38
+
39
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #include <c10/util/Backtrace.h>
2
+ #include <c10/util/Type.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/TensorOptions.h>
2
+
3
+ namespace c10 { namespace impl {
4
+
5
+ inline c10::optional<MemoryFormat>
6
+ check_tensor_options_and_extract_memory_format(
7
+ const TensorOptions& options,
8
+ c10::optional<MemoryFormat> memory_format) {
9
+ TORCH_CHECK(
10
+ options.requires_grad_opt() == c10::nullopt ||
11
+ options.requires_grad_opt().value() == false,
12
+ "Operators taking TensorOptions cannot take a TensorOptions with "
13
+ "options.requires_grad set as true. This isn't implemented yet.");
14
+ TORCH_CHECK(
15
+ !(options.has_memory_format() && memory_format.has_value()),
16
+ "Cannot set memory_format both in TensorOptions and explicit argument; please delete "
17
+ "the redundant setter.");
18
+ if (memory_format.has_value()) {
19
+ return memory_format;
20
+ } else {
21
+ return options.memory_format_opt();
22
+ }
23
+ }
24
+
25
+ }} // namespace impl namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/core/Layout.h>
6
+ #include <c10/core/TensorOptions.h>
7
+ #include <c10/core/Storage.h>
8
+ #include <ATen/core/DeprecatedTypePropertiesRegistry.h>
9
+ #include <ATen/core/Generator.h>
10
+
11
+
12
+ namespace at {
13
+
14
+ class Tensor;
15
+
16
+ // This class specifies a Backend and a ScalarType. Currently, it primarily
17
+ // serves as a replacement return value for Tensor::type(). Previously,
18
+ // Tensor::type() returned Type&, but we are changing Type to not be
19
+ // dtype-specific.
20
+ class TORCH_API DeprecatedTypeProperties {
21
+ public:
22
+ DeprecatedTypeProperties(Backend backend, ScalarType scalar_type)
23
+ : backend_(backend), scalar_type_(scalar_type) {}
24
+
25
+ Backend backend() const {
26
+ return backend_;
27
+ }
28
+
29
+ Layout layout() const {
30
+ return layout_from_backend(backend_);
31
+ }
32
+
33
+ bool is_sparse() const {
34
+ return layout_from_backend(backend()) == kSparse;
35
+ }
36
+
37
+ bool is_sparse_csr() const {
38
+ return layout_from_backend(backend()) == kSparseCsr;
39
+ }
40
+
41
+ c10::DeviceType device_type() const {
42
+ return backendToDeviceType(backend_);
43
+ }
44
+
45
+ bool is_cuda() const {
46
+ return backendToDeviceType(backend_) == kCUDA;
47
+ }
48
+
49
+ ScalarType scalarType() const {
50
+ return scalar_type_;
51
+ }
52
+
53
+ caffe2::TypeMeta typeMeta() const {
54
+ return scalarTypeToTypeMeta(scalar_type_);
55
+ }
56
+
57
+ bool operator==(const DeprecatedTypeProperties& other) const {
58
+ return backend_ == other.backend() && scalar_type_ == other.scalarType();
59
+ }
60
+
61
+ bool operator!=(const DeprecatedTypeProperties& other) const {
62
+ return !(*this == other);
63
+ }
64
+
65
+ std::string toString() const {
66
+ std::string base_str;
67
+ if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) {
68
+ base_str = "UndefinedType";
69
+ } else {
70
+ base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type";
71
+ }
72
+ return base_str;
73
+ }
74
+
75
+ DeprecatedTypeProperties & toBackend(Backend b) const {
76
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
77
+ b, scalar_type_);
78
+ }
79
+
80
+ DeprecatedTypeProperties & toScalarType(ScalarType s) const {
81
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
82
+ backend_, s);
83
+ }
84
+
85
+ DeprecatedTypeProperties & cpu() const {
86
+ return toBackend(Backend::CPU);
87
+ }
88
+
89
+ DeprecatedTypeProperties & cuda() const {
90
+ return toBackend(Backend::CUDA);
91
+ }
92
+
93
+ DeprecatedTypeProperties & hip() const {
94
+ return toBackend(Backend::HIP);
95
+ }
96
+
97
+ DeprecatedTypeProperties & privateUser1() const {
98
+ return toBackend(Backend::PrivateUse1);
99
+ }
100
+
101
+ /// Constructs the `TensorOptions` from a type and a `device_index`.
102
+ TensorOptions options(int16_t device_index = -1) const {
103
+ return TensorOptions().dtype(typeMeta())
104
+ .device(device_type(), static_cast<c10::DeviceIndex>(device_index))
105
+ .layout(layout());
106
+ }
107
+
108
+ /// Constructs the `TensorOptions` from a type and a Device. Asserts that
109
+ /// the device type matches the device type of the type.
110
+ TensorOptions options(c10::optional<Device> device_opt) const {
111
+ if (!device_opt.has_value()) {
112
+ return options(-1);
113
+ } else {
114
+ Device device = device_opt.value();
115
+ AT_ASSERT(device.type() == device_type());
116
+ return options(device.index());
117
+ }
118
+ }
119
+
120
+ operator TensorOptions() const {
121
+ return options();
122
+ }
123
+
124
+ int64_t id() const {
125
+ return static_cast<int64_t>(backend()) *
126
+ static_cast<int64_t>(ScalarType::NumOptions) +
127
+ static_cast<int64_t>(scalarType());
128
+ }
129
+
130
+ Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
131
+ Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
132
+ Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const;
133
+
134
+ private:
135
+ Backend backend_;
136
+ ScalarType scalar_type_;
137
+ };
138
+
139
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/DimVector.h>
3
+
4
+ namespace at {
5
+
6
+ // Re-declaring 'DimVector' type and size inside 'at' namespace.
7
+ // This is done to avoid modifying every use into their 'c10'
8
+ // equivalent.
9
+
10
+ using c10::kDimVectorStaticSize;
11
+ using c10::DimVector;
12
+
13
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Array.h>
4
+ #include <ATen/core/TransformationHelper.h>
5
+ #include <c10/util/Half.h>
6
+ #include <c10/util/BFloat16.h>
7
+ #include <c10/util/MathConstants.h>
8
+ #include <c10/util/Optional.h>
9
+ #include <c10/macros/Macros.h>
10
+
11
+ #include <type_traits>
12
+ #include <limits>
13
+ #include <cmath>
14
+
15
+ /**
16
+ * Distributions kernel adapted from THRandom.cpp
17
+ * The kernels try to follow std::random distributions signature
18
+ * For instance: in ATen
19
+ * auto gen = at::detail::createCPUGenerator();
20
+ * at::uniform_real_distribution<double> uniform(0, 1);
21
+ * auto sample = uniform(gen.get());
22
+ *
23
+ * vs std::random
24
+ *
25
+ * std::mt19937 gen;
26
+ * std::uniform_real_distribution uniform(0, 1);
27
+ * auto sample = uniform(gen);
28
+ */
29
+
30
+
31
+ namespace at {
32
+ namespace {
33
+
34
+ /**
35
+ * Samples a discrete uniform distribution in the range [base, base+range) of type T
36
+ */
37
+ template <typename T>
38
+ struct uniform_int_from_to_distribution {
39
+
40
+ C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
41
+
42
+ template <typename RNG>
43
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
44
+ if ((
45
+ std::is_same<T, int64_t>::value ||
46
+ std::is_same<T, double>::value ||
47
+ std::is_same<T, float>::value ||
48
+ std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
49
+ {
50
+ return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
51
+ } else {
52
+ return transformation::uniform_int_from_to<T>(generator->random(), range_, base_);
53
+ }
54
+ }
55
+
56
+ private:
57
+ uint64_t range_;
58
+ int64_t base_;
59
+ };
60
+
61
+ /**
62
+ * Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)]
63
+ */
64
+ template <typename T>
65
+ struct uniform_int_full_range_distribution {
66
+
67
+ template <typename RNG>
68
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
69
+ return transformation::uniform_int_full_range<T>(generator->random64());
70
+ }
71
+
72
+ };
73
+
74
+ /**
75
+ * Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types
76
+ * and [0, 2^mantissa] for floating-point types.
77
+ */
78
+ template <typename T>
79
+ struct uniform_int_distribution {
80
+
81
+ template <typename RNG>
82
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
83
+ if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) {
84
+ return transformation::uniform_int<T>(generator->random64());
85
+ } else {
86
+ return transformation::uniform_int<T>(generator->random());
87
+ }
88
+ }
89
+
90
+ };
91
+
92
+ /**
93
+ * Samples a uniform distribution in the range [from, to) of type T
94
+ */
95
+ template <typename T>
96
+ struct uniform_real_distribution {
97
+
98
+ C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
99
+ TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
100
+ TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
101
+ from_ = from;
102
+ to_ = to;
103
+ }
104
+
105
+ template <typename RNG>
106
+ C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
107
+ if constexpr (std::is_same_v<T, double>) {
108
+ return transformation::uniform_real<T>(generator->random64(), from_, to_);
109
+ } else {
110
+ return transformation::uniform_real<T>(generator->random(), from_, to_);
111
+ }
112
+ }
113
+
114
+ private:
115
+ T from_;
116
+ T to_;
117
+ };
118
+
119
+ // The SFINAE checks introduced in #39816 looks overcomplicated and must revisited
120
+ // https://github.com/pytorch/pytorch/issues/40052
121
+ #define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \
122
+ template <typename T> \
123
+ struct has_member_##member \
124
+ { \
125
+ typedef char yes; \
126
+ typedef long no; \
127
+ template <typename U> static yes test(decltype(&U::member)); \
128
+ template <typename U> static no test(...); \
129
+ static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \
130
+ }
131
+
132
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample);
133
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample);
134
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample);
135
+ DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample);
136
+
137
+ #define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \
138
+ \
139
+ template <typename RNG, typename ret_type, \
140
+ typename std::enable_if_t<( \
141
+ has_member_next_##TYPE##_normal_sample<RNG>::value && \
142
+ has_member_set_next_##TYPE##_normal_sample<RNG>::value \
143
+ ), int> = 0> \
144
+ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
145
+ if (generator->next_##TYPE##_normal_sample()) { \
146
+ *ret = *(generator->next_##TYPE##_normal_sample()); \
147
+ generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \
148
+ return true; \
149
+ } \
150
+ return false; \
151
+ } \
152
+ \
153
+ template <typename RNG, typename ret_type, \
154
+ typename std::enable_if_t<( \
155
+ !has_member_next_##TYPE##_normal_sample<RNG>::value || \
156
+ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \
157
+ ), int> = 0> \
158
+ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \
159
+ return false; \
160
+ } \
161
+ \
162
+ template <typename RNG, typename ret_type, \
163
+ typename std::enable_if_t<( \
164
+ has_member_set_next_##TYPE##_normal_sample<RNG>::value \
165
+ ), int> = 0> \
166
+ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \
167
+ generator->set_next_##TYPE##_normal_sample(cache); \
168
+ } \
169
+ \
170
+ template <typename RNG, typename ret_type, \
171
+ typename std::enable_if_t<( \
172
+ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \
173
+ ), int> = 0> \
174
+ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
175
+ }
176
+
177
+ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
178
+ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
179
+
180
+ /**
181
+ * Samples a normal distribution using the Box-Muller method
182
+ * Takes mean and standard deviation as inputs
183
+ * Note that Box-muller method returns two samples at a time.
184
+ * Hence, we cache the "next" sample in the CPUGeneratorImpl class.
185
+ */
186
+ template <typename T>
187
+ struct normal_distribution {
188
+
189
+ C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
190
+ TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
191
+ mean = mean_in;
192
+ stdv = stdv_in;
193
+ }
194
+
195
+ template <typename RNG>
196
+ C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){
197
+ dist_acctype<T> ret;
198
+ // return cached values if available
199
+ if constexpr (std::is_same_v<T, double>) {
200
+ if (maybe_get_next_double_normal_sample(generator, &ret)) {
201
+ return transformation::normal(ret, mean, stdv);
202
+ }
203
+ } else {
204
+ if (maybe_get_next_float_normal_sample(generator, &ret)) {
205
+ return transformation::normal(ret, mean, stdv);
206
+ }
207
+ }
208
+ // otherwise generate new normal values
209
+ uniform_real_distribution<T> uniform(0.0, 1.0);
210
+ const dist_acctype<T> u1 = uniform(generator);
211
+ const dist_acctype<T> u2 = uniform(generator);
212
+ const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2));
213
+ const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1;
214
+ if constexpr (std::is_same_v<T, double>) {
215
+ maybe_set_next_double_normal_sample(generator, r * ::sin(theta));
216
+ } else {
217
+ maybe_set_next_float_normal_sample(generator, r * ::sin(theta));
218
+ }
219
+ ret = r * ::cos(theta);
220
+ return transformation::normal(ret, mean, stdv);
221
+ }
222
+
223
+ private:
224
+ T mean;
225
+ T stdv;
226
+ };
227
+
228
+ template <typename T>
229
+ struct DiscreteDistributionType { using type = float; };
230
+
231
+ template <> struct DiscreteDistributionType<double> { using type = double; };
232
+
233
+ /**
234
+ * Samples a bernoulli distribution given a probability input
235
+ */
236
+ template <typename T>
237
+ struct bernoulli_distribution {
238
+
239
+ C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
240
+ TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
241
+ p = p_in;
242
+ }
243
+
244
+ template <typename RNG>
245
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
246
+ uniform_real_distribution<T> uniform(0.0, 1.0);
247
+ return transformation::bernoulli<T>(uniform(generator), p);
248
+ }
249
+
250
+ private:
251
+ T p;
252
+ };
253
+
254
+ /**
255
+ * Samples a geometric distribution given a probability input
256
+ */
257
+ template <typename T>
258
+ struct geometric_distribution {
259
+
260
+ C10_HOST_DEVICE inline geometric_distribution(T p_in) {
261
+ TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
262
+ p = p_in;
263
+ }
264
+
265
+ template <typename RNG>
266
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
267
+ uniform_real_distribution<T> uniform(0.0, 1.0);
268
+ return transformation::geometric<T>(uniform(generator), p);
269
+ }
270
+
271
+ private:
272
+ T p;
273
+ };
274
+
275
+ /**
276
+ * Samples an exponential distribution given a lambda input
277
+ */
278
+ template <typename T>
279
+ struct exponential_distribution {
280
+
281
+ C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
282
+
283
+ template <typename RNG>
284
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
285
+ uniform_real_distribution<T> uniform(0.0, 1.0);
286
+ return transformation::exponential<T>(uniform(generator), lambda);
287
+ }
288
+
289
+ private:
290
+ T lambda;
291
+ };
292
+
293
+ /**
294
+ * Samples a cauchy distribution given median and sigma as inputs
295
+ */
296
+ template <typename T>
297
+ struct cauchy_distribution {
298
+
299
+ C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
300
+
301
+ template <typename RNG>
302
+ C10_HOST_DEVICE inline T operator()(RNG generator) {
303
+ uniform_real_distribution<T> uniform(0.0, 1.0);
304
+ return transformation::cauchy<T>(uniform(generator), median, sigma);
305
+ }
306
+
307
+ private:
308
+ T median;
309
+ T sigma;
310
+ };
311
+
312
+ /**
313
+ * Samples a lognormal distribution
314
+ * Takes mean and standard deviation as inputs
315
+ * Outputs two samples at a time
316
+ */
317
+ template <typename T>
318
+ struct lognormal_distribution {
319
+
320
+ C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
321
+ TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
322
+ mean = mean_in;
323
+ stdv = stdv_in;
324
+ }
325
+
326
+ template<typename RNG>
327
+ C10_HOST_DEVICE inline T operator()(RNG generator){
328
+ normal_distribution<T> normal(mean, stdv);
329
+ return transformation::log_normal<T>(normal(generator));
330
+ }
331
+
332
+ private:
333
+ T mean;
334
+ T stdv;
335
+ };
336
+ }
337
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <mutex>
4
+ #include <deque>
5
+ #include <atomic>
6
+ #include <typeinfo>
7
+ #include <utility>
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/intrusive_ptr.h>
13
+ #include <c10/core/Device.h>
14
+ #include <c10/core/DispatchKeySet.h>
15
+
16
+ // For the record I don't think this is a correct pimpl idiom.
17
+ // Including Impl header in interface header defeats the purpose
18
+ // because you can't change Impl private members without forcing
19
+ // everything that included the interface to rebuild.
20
+ // Impl should be forward-declared in the interface header instead.
21
+ #include <c10/core/GeneratorImpl.h>
22
+
23
+ /**
24
+ * Note [Generator]
25
+ * ~~~~~~~~~~~~~~~~
26
+ * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to
27
+ * generate a seemingly random sequence of numbers, that may be later be used in creating
28
+ * a random distribution. Such an engine almost always maintains a state and requires a
29
+ * seed to start off the creation of random numbers. Often times, users have
30
+ * found it beneficial to be able to explicitly create, retain, and destroy
31
+ * PRNG states and also be able to have control over the seed value.
32
+ *
33
+ * A Generator in ATen gives users the ability to read, write and modify a PRNG engine.
34
+ * For instance, it does so by letting users seed a PRNG engine, fork the state of the
35
+ * engine, etc.
36
+ *
37
+ * By default, there is one generator per device, and a device's generator is
38
+ * lazily created. A user can use the torch.Generator() api to create their own generator.
39
+ */
40
+
41
+ /**
42
+ * Note [Acquire lock when using random generators]
43
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
44
+ * Generator and its derived classes are NOT thread-safe. Please note that most of the
45
+ * places where we have inserted locking for generators are historically based, and we
46
+ * haven't actually checked that everything is truly thread safe (and it probably isn't).
47
+ * Please use the public mutex_ when using any methods from these classes, except for the
48
+ * read-only methods. You can learn about the usage by looking into the unittests
49
+ * (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard.
50
+ *
51
+ * TODO: Look into changing the threading semantics of Generators in ATen (e.g., making
52
+ * them non-thread safe and instead making the generator state splittable, to accommodate
53
+ * forks into other threads).
54
+ */
55
+
56
+ namespace at {
57
+
58
+ class Tensor;
59
+
60
+ struct TORCH_API Generator {
61
+ Generator() = default;
62
+
63
+ explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
64
+ : impl_(std::move(gen_impl)) {
65
+ if (impl_.get() == nullptr) {
66
+ throw std::runtime_error("GeneratorImpl with nullptr is not supported");
67
+ }
68
+ }
69
+
70
+ bool operator==(const Generator& rhs) const {
71
+ return this->impl_ == rhs.impl_;
72
+ }
73
+
74
+ bool operator!=(const Generator& rhs) const {
75
+ return !((*this) == rhs);
76
+ }
77
+
78
+ bool defined() const {
79
+ return static_cast<bool>(impl_);
80
+ }
81
+
82
+ c10::GeneratorImpl* unsafeGetGeneratorImpl() const {
83
+ return impl_.get();
84
+ }
85
+
86
+ c10::GeneratorImpl* unsafeReleaseGeneratorImpl() {
87
+ return impl_.release();
88
+ }
89
+
90
+ const c10::intrusive_ptr<c10::GeneratorImpl>& getIntrusivePtr() const {
91
+ return impl_;
92
+ }
93
+
94
+ void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); }
95
+ // Sets the offset of Generator state to the desired offset. This is currently
96
+ // supported for only Philox based Generators, i.e., CUDA and MPS.
97
+ void set_offset(uint64_t offset) { impl_->set_offset(offset); }
98
+
99
+ // Returns the offset of Generator state. This is currently supported for only
100
+ // Philox based Generators, i.e., CUDA and MPS.
101
+ uint64_t get_offset() const { return impl_->get_offset(); }
102
+
103
+ uint64_t current_seed() const { return impl_->current_seed(); }
104
+
105
+ uint64_t seed() { return impl_->seed(); }
106
+
107
+ // Implementation not inlined to prevent cycle reference between
108
+ // `ATen/core/Generator.h` and `ATen/core/Tensor.h`
109
+ void set_state(const at::Tensor& new_state);
110
+
111
+ at::Tensor get_state() const;
112
+
113
+ std::mutex& mutex() {
114
+ return impl_->mutex_;
115
+ }
116
+
117
+ DispatchKeySet key_set() const {
118
+ return impl_->key_set();
119
+ }
120
+
121
+ Device device() const { return impl_->device(); }
122
+
123
+ inline void set_pyobj(PyObject* pyobj) const noexcept {
124
+ impl_->set_pyobj(pyobj);
125
+ }
126
+
127
+ inline PyObject* pyobj() const noexcept {
128
+ return impl_->pyobj();
129
+ }
130
+
131
+ template<typename T>
132
+ T* get() const { return static_cast<T*>(impl_.get()); }
133
+
134
+ Generator clone() const {
135
+ return Generator(impl_->clone());
136
+ }
137
+
138
+ private:
139
+ c10::intrusive_ptr<c10::GeneratorImpl> impl_;
140
+ };
141
+
142
+ template<class Impl, class... Args>
143
+ Generator make_generator(Args&&... args) {
144
+ return Generator(c10::make_intrusive<Impl>(std::forward<Args>(args)...));
145
+ }
146
+
147
+ /**
148
+ * Utility function to static cast input Generator* to
149
+ * the backend generator type (CPU/CUDAGeneratorImpl etc.)
150
+ */
151
+ template <typename T>
152
+ static inline T * check_generator(c10::optional<Generator> gen) {
153
+ TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
154
+ TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
155
+ TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
156
+ return gen->get<T>();
157
+ }
158
+
159
+ /**
160
+ * Utility function used in tensor implementations, which
161
+ * supplies the default generator to tensors, if an input generator
162
+ * is not supplied. The input Generator* is also static casted to
163
+ * the backend generator type (CPU/CUDAGeneratorImpl etc.)
164
+ */
165
+ template <typename T>
166
+ static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) {
167
+ return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
168
+ }
169
+
170
+ namespace detail {
171
+
172
+ /**
173
+ * Helper function for checking the validity of new random generator
174
+ * state. Right now following conditions are checked:
175
+ *
176
+ * - The new state tensor must be a torch.ByteTensor
177
+ * - Data of the new state tensor must be contiguous
178
+ */
179
+ static inline void check_rng_state(const c10::TensorImpl& new_state) {
180
+ TORCH_CHECK_TYPE(
181
+ new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte,
182
+ "RNG state must be a torch.ByteTensor"
183
+ );
184
+
185
+ TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous");
186
+ }
187
+
188
+ } // namespace detail
189
+
190
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <c10/util/intrusive_ptr.h>
5
+
6
+ namespace at {
7
+
8
+ using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
9
+
10
+ c10::optional<GeneratorFuncType>& GetGeneratorPrivate();
11
+
12
+ class TORCH_API _GeneratorRegister {
13
+ public:
14
+ explicit _GeneratorRegister(const GeneratorFuncType& func);
15
+ };
16
+
17
+ TORCH_API at::Generator GetGeneratorForPrivateuse1(
18
+ c10::DeviceIndex device_index);
19
+
20
+ /**
21
+ * This is used to register Generator to PyTorch for `privateuse1` key.
22
+ *
23
+ * Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1)
24
+ *
25
+ * class CustomGeneratorImpl : public c10::GeneratorImpl {
26
+ * CustomGeneratorImpl(DeviceIndex device_index = -1);
27
+ * explicit ~CustomGeneratorImpl() override = default;
28
+ * ...
29
+ * };
30
+ *
31
+ * at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) {
32
+ * return at::make_generator<CustomGeneratorImpl>(id);
33
+ * }
34
+ */
35
+
36
+ #define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \
37
+ static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate);
38
+
39
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/List.h>
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ namespace at {
7
+ class Tensor;
8
+ class OptionalTensorRef;
9
+ }
10
+
11
+ namespace c10 {
12
+ namespace detail {
13
+
14
+ /*
15
+ * Specializations of `IListRefTagImplBase` that implement the default
16
+ * implementation for `IListRefTag::Unboxed`.
17
+ */
18
+ template <typename T, typename ListElemT>
19
+ class IListRefTagImplBase<IListRefTag::Unboxed, T, ListElemT> {
20
+ public:
21
+ using elem_type = ListElemT;
22
+ using list_type = ArrayRef<elem_type>;
23
+
24
+ /*
25
+ * These `unwrap` static methods unwraps the inner containers out
26
+ * of `IListRef<T>` (and `IListRefIterator<T>`). They are required when
27
+ * the macro `TORCH_ILISTREF_UNWRAP` is called.
28
+ */
29
+ static const list_type& unwrap(const IListRef<T>& ilist) {
30
+ return ilist.payload_.unboxed;
31
+ }
32
+
33
+ static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
34
+ return it.payload_.unboxed_iterator;
35
+ }
36
+
37
+ static const typename list_type::const_iterator& unwrap(
38
+ const IListRefIterator<T>& it) {
39
+ return it.payload_.unboxed_iterator;
40
+ }
41
+
42
+ /*
43
+ * We have these function (besides the `unwrap`s above) because the
44
+ * implementation for both `IListRef::operator[]` and `IListRefIterator::operator*`
45
+ * weren't syntatically equal for the existing tags at the time
46
+ * (`Unboxed` and `Boxed`).
47
+ */
48
+ static IListRefConstRef<T> front(const list_type& lst) {
49
+ return lst.front();
50
+ }
51
+
52
+ static IListRefConstRef<T> iterator_get(
53
+ const typename list_type::const_iterator& it) {
54
+ return *it;
55
+ }
56
+ };
57
+
58
+ /*
59
+ * Specializations of `IListRefTagImplBase` that implement the default
60
+ * implementation for `IListRefTag::Boxed`.
61
+ */
62
+ template <typename T, typename ListElemT>
63
+ class IListRefTagImplBase<IListRefTag::Boxed, T, ListElemT> {
64
+ public:
65
+ using elem_type = ListElemT;
66
+ using list_type = List<elem_type>;
67
+
68
+ static const list_type& unwrap(const IListRef<T>& ilist) {
69
+ return *ilist.payload_.boxed;
70
+ }
71
+
72
+ static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
73
+ return it.payload_.boxed_iterator;
74
+ }
75
+
76
+ static const typename list_type::const_iterator& unwrap(
77
+ const IListRefIterator<T>& it) {
78
+ return it.payload_.boxed_iterator;
79
+ }
80
+
81
+ static IListRefConstRef<T> front(const list_type& lst) {
82
+ return lst[0];
83
+ }
84
+
85
+ static IListRefConstRef<T> iterator_get(
86
+ const typename list_type::const_iterator& it) {
87
+ return (*it).get().toTensor();
88
+ }
89
+ };
90
+
91
+ /*
92
+ * Specializations of `IListRefTagImplBase` that implement the default
93
+ * implementation for `IListRefTag::Materialized`.
94
+ */
95
+ template <typename T>
96
+ class IListRefTagImplBase<IListRefTag::Materialized, T, MaterializedIListRefElem<T>> {
97
+ public:
98
+ using elem_type = MaterializedIListRefElem<T>;
99
+ using list_type = MaterializedIListRef<T>;
100
+
101
+ static const list_type& unwrap(const IListRef<T>& ilist) {
102
+ return *ilist.payload_.materialized;
103
+ }
104
+
105
+ static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) {
106
+ return it.payload_.materialized_iterator;
107
+ }
108
+
109
+ static const typename list_type::const_iterator& unwrap(
110
+ const IListRefIterator<T>& it) {
111
+ return it.payload_.materialized_iterator;
112
+ }
113
+
114
+ static IListRefConstRef<T> front(const list_type& lst) {
115
+ return lst[0];
116
+ }
117
+
118
+ static IListRefConstRef<T> iterator_get(
119
+ const typename list_type::const_iterator& it) {
120
+ return *it;
121
+ }
122
+ };
123
+
124
+ /*
125
+ * [Note: ITensorListRef]
126
+ * Specializations necessary for `IListRef<at::Tensor>` type.
127
+ *
128
+ * Since the default implementations are usually done with supporting
129
+ * `Tensor` in mind, we only have to inherit from the base implementations.
130
+ */
131
+ template <>
132
+ class IListRefTagImpl<IListRefTag::Unboxed, at::Tensor>
133
+ : public IListRefTagImplBase<IListRefTag::Unboxed, at::Tensor> {};
134
+
135
+ template <>
136
+ class IListRefTagImpl<IListRefTag::Boxed, at::Tensor>
137
+ : public IListRefTagImplBase<IListRefTag::Boxed, at::Tensor> {};
138
+
139
+ template <>
140
+ class IListRefTagImpl<IListRefTag::Materialized, at::Tensor>
141
+ : public IListRefTagImplBase<
142
+ IListRefTag::Materialized,
143
+ at::Tensor,
144
+ MaterializedIListRefElem<at::Tensor>> {};
145
+
146
+ /*
147
+ * [Note: IOptTensorListRef]
148
+ * Specializations necessary for `IListRef<at::OptionalTensorRef>` type.
149
+ *
150
+ * We can't get an `at::OptionalTensorRef` directly from an instance of
151
+ * `List<optional<Tensor>>` (the type that corresponds to the boxed world).
152
+ *
153
+ * So, the default implementation won't help us. Thus, we have to implement
154
+ * this method ourselves.
155
+ */
156
+ template <>
157
+ class IListRefTagImpl<IListRefTag::Unboxed, at::OptionalTensorRef>
158
+ : public IListRefTagImplBase<IListRefTag::Unboxed, at::OptionalTensorRef> {};
159
+
160
+ template <>
161
+ class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
162
+ : public IListRefTagImplBase<IListRefTag::Boxed, at::OptionalTensorRef, optional<at::Tensor>> {
163
+
164
+ public:
165
+ /*
166
+ * Given an instance of the types corresponding to the `Boxed` tag, we override
167
+ * the default implementation, so that we can return a `at::OptionalTensorRef`.
168
+ */
169
+ static IListRefConstRef<at::OptionalTensorRef> iterator_get(
170
+ const typename list_type::const_iterator& it) {
171
+ const auto& ivalue = (*it).get();
172
+ if (!ivalue.isNone()) {
173
+ const auto& tensor = ivalue.toTensor();
174
+ return (tensor.defined()) ? tensor : at::OptionalTensorRef{};
175
+ }
176
+ return {};
177
+ }
178
+ };
179
+
180
+ template <>
181
+ class IListRefTagImpl<IListRefTag::Materialized, at::OptionalTensorRef>
182
+ : public IListRefTagImplBase<
183
+ IListRefTag::Materialized,
184
+ at::OptionalTensorRef,
185
+ MaterializedIListRefElem<at::OptionalTensorRef>> {};
186
+
187
+ } // namespace detail
188
+ } // namespace c10
189
+
190
+ namespace at {
191
+
192
+ // [Note: ITensorListRef]
193
+ using ITensorListRef = c10::IListRef<at::Tensor>;
194
+ using ITensorListRefIterator = c10::IListRefIterator<at::Tensor>;
195
+ using MaterializedITensorListRef = c10::detail::MaterializedIListRef<at::Tensor>;
196
+ // [Note: IOptTensorListRef]
197
+ using IOptTensorListRef = c10::IListRef<at::OptionalTensorRef>;
198
+ using IOptTensorListRefIterator = c10::IListRefIterator<at::OptionalTensorRef>;
199
+ using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef<at::OptionalTensorRef>;
200
+
201
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/List.h ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue_to.h>
4
+ #include <ATen/core/jit_type_base.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/TypeTraits.h>
8
+ #include <c10/util/TypeList.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <c10/util/ArrayRef.h>
11
+ #include <c10/util/Optional.h>
12
+ #include <vector>
13
+
14
+ namespace at {
15
+ class Tensor;
16
+ }
17
+ namespace c10 {
18
+ struct IValue;
19
+ template<class T> class List;
20
+ struct Type;
21
+
22
+ namespace detail {
23
+
24
+ struct ListImpl final : public c10::intrusive_ptr_target {
25
+ using list_type = std::vector<IValue>;
26
+
27
+ explicit TORCH_API ListImpl(list_type list_, TypePtr elementType_);
28
+
29
+ list_type list;
30
+
31
+ TypePtr elementType;
32
+
33
+ intrusive_ptr<ListImpl> copy() const {
34
+ return make_intrusive<ListImpl>(list, elementType);
35
+ }
36
+ friend TORCH_API bool operator==(const ListImpl& lhs, const ListImpl& rhs);
37
+ };
38
+ }
39
+
40
+ namespace impl {
41
+
42
+ template<class T, class Iterator> class ListIterator;
43
+
44
+ template<class T, class Iterator> class ListElementReference;
45
+
46
+ template<class T, class Iterator>
47
+ void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs);
48
+
49
+ template<class T, class Iterator>
50
+ bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs);
51
+
52
+ template<class T, class Iterator>
53
+ bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs);
54
+
55
+ template<class T>
56
+ struct ListElementConstReferenceTraits {
57
+ // In the general case, we use IValue::to().
58
+ using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type;
59
+ };
60
+
61
+ // There is no to() overload for c10::optional<std::string>.
62
+ template<>
63
+ struct ListElementConstReferenceTraits<c10::optional<std::string>> {
64
+ using const_reference = c10::optional<std::reference_wrapper<const std::string>>;
65
+ };
66
+
67
+ template<class T, class Iterator>
68
+ class ListElementReference final {
69
+ public:
70
+ operator std::conditional_t<
71
+ std::is_reference<typename c10::detail::
72
+ ivalue_to_const_ref_overload_return<T>::type>::value,
73
+ const T&,
74
+ T>() const;
75
+
76
+ ListElementReference& operator=(T&& new_value) &&;
77
+
78
+ ListElementReference& operator=(const T& new_value) &&;
79
+
80
+ // assigning another ref to this assigns the underlying value
81
+ ListElementReference& operator=(ListElementReference&& rhs) && noexcept;
82
+
83
+ const IValue& get() const& {
84
+ return *iterator_;
85
+ }
86
+
87
+ friend void swap<T, Iterator>(ListElementReference&& lhs, ListElementReference&& rhs);
88
+
89
+ ListElementReference(const ListElementReference&) = delete;
90
+ ListElementReference& operator=(const ListElementReference&) = delete;
91
+
92
+ private:
93
+ ListElementReference(Iterator iter)
94
+ : iterator_(iter) {}
95
+
96
+ // allow moving, but only our friends (i.e. the List class) can move us
97
+ ListElementReference(ListElementReference&&) noexcept = default;
98
+ ListElementReference& operator=(ListElementReference&& rhs) & noexcept {
99
+ iterator_ = std::move(rhs.iterator_);
100
+ return *this;
101
+ }
102
+
103
+ friend class List<T>;
104
+ friend class ListIterator<T, Iterator>;
105
+
106
+ Iterator iterator_;
107
+ };
108
+
109
+ // this wraps vector::iterator to make sure user code can't rely
110
+ // on it being the type of the underlying vector.
111
+ template <class T, class Iterator>
112
+ class ListIterator final {
113
+ public:
114
+ // C++17 friendly std::iterator implementation
115
+ using iterator_category = std::random_access_iterator_tag;
116
+ using value_type = T;
117
+ using difference_type = std::ptrdiff_t;
118
+ using pointer = T*;
119
+ using reference = ListElementReference<T, Iterator>;
120
+
121
+ explicit ListIterator() = default;
122
+ ~ListIterator() = default;
123
+
124
+ ListIterator(const ListIterator&) = default;
125
+ ListIterator(ListIterator&&) noexcept = default;
126
+ ListIterator& operator=(const ListIterator&) = default;
127
+ ListIterator& operator=(ListIterator&&) noexcept = default;
128
+
129
+ ListIterator& operator++() {
130
+ ++iterator_;
131
+ return *this;
132
+ }
133
+
134
+ ListIterator operator++(int) {
135
+ ListIterator copy(*this);
136
+ ++*this;
137
+ return copy;
138
+ }
139
+
140
+ ListIterator& operator--() {
141
+ --iterator_;
142
+ return *this;
143
+ }
144
+
145
+ ListIterator operator--(int) {
146
+ ListIterator copy(*this);
147
+ --*this;
148
+ return copy;
149
+ }
150
+
151
+ ListIterator& operator+=(typename List<T>::size_type offset) {
152
+ iterator_ += offset;
153
+ return *this;
154
+ }
155
+
156
+ ListIterator& operator-=(typename List<T>::size_type offset) {
157
+ iterator_ -= offset;
158
+ return *this;
159
+ }
160
+
161
+ ListIterator operator+(typename List<T>::size_type offset) const {
162
+ return ListIterator{iterator_ + offset};
163
+ }
164
+
165
+ ListIterator operator-(typename List<T>::size_type offset) const {
166
+ return ListIterator{iterator_ - offset};
167
+ }
168
+
169
+ friend difference_type operator-(const ListIterator& lhs, const ListIterator& rhs) {
170
+ return lhs.iterator_ - rhs.iterator_;
171
+ }
172
+
173
+ ListElementReference<T, Iterator> operator*() const {
174
+ return {iterator_};
175
+ }
176
+
177
+ ListElementReference<T, Iterator> operator[](typename List<T>::size_type offset) const {
178
+ return {iterator_ + offset};
179
+ }
180
+
181
+ private:
182
+ explicit ListIterator(Iterator iterator): iterator_(std::move(iterator)) {}
183
+
184
+ Iterator iterator_;
185
+
186
+ friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) {
187
+ return lhs.iterator_ == rhs.iterator_;
188
+ }
189
+
190
+ friend bool operator!=(const ListIterator& lhs, const ListIterator& rhs) {
191
+ return !(lhs == rhs);
192
+ }
193
+
194
+ friend bool operator<(const ListIterator& lhs, const ListIterator& rhs) {
195
+ return lhs.iterator_ < rhs.iterator_;
196
+ }
197
+
198
+ friend bool operator<=(const ListIterator& lhs, const ListIterator& rhs) {
199
+ return lhs.iterator_ <= rhs.iterator_;
200
+ }
201
+
202
+ friend bool operator>(const ListIterator& lhs, const ListIterator& rhs) {
203
+ return lhs.iterator_ > rhs.iterator_;
204
+ }
205
+
206
+ friend bool operator>=(const ListIterator& lhs, const ListIterator& rhs) {
207
+ return lhs.iterator_ >= rhs.iterator_;
208
+ }
209
+
210
+ friend class ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
211
+ friend class List<T>;
212
+ };
213
+
214
+ template<class T> List<T> toTypedList(List<IValue> list);
215
+ template<class T> List<IValue> toList(List<T>&& list);
216
+ template<class T> List<IValue> toList(const List<T>& list);
217
+ const IValue* ptr_to_first_element(const List<IValue>& list);
218
+ }
219
+
220
+ /**
221
+ * An object of this class stores a list of values of type T.
222
+ *
223
+ * This is a pointer type. After a copy, both Lists
224
+ * will share the same storage:
225
+ *
226
+ * > List<int> a;
227
+ * > List<int> b = a;
228
+ * > b.push_back("three");
229
+ * > ASSERT("three" == a.get(0));
230
+ *
231
+ * We use this class in the PyTorch kernel API instead of
232
+ * std::vector<T>, because that allows us to do optimizations
233
+ * and switch out the underlying list implementation without
234
+ * breaking backwards compatibility for the kernel API.
235
+ */
236
+ template<class T>
237
+ class List final {
238
+ private:
239
+ // This is an intrusive_ptr because List is a pointer type.
240
+ // Invariant: This will never be a nullptr, there will always be a valid
241
+ // ListImpl.
242
+ c10::intrusive_ptr<c10::detail::ListImpl> impl_;
243
+
244
+ using internal_reference_type = impl::ListElementReference<T, typename c10::detail::ListImpl::list_type::iterator>;
245
+ using internal_const_reference_type = typename impl::ListElementConstReferenceTraits<T>::const_reference;
246
+
247
+ public:
248
+ using value_type = T;
249
+ using size_type = typename c10::detail::ListImpl::list_type::size_type;
250
+ using iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
251
+ using const_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::iterator>;
252
+ using reverse_iterator = impl::ListIterator<T, typename c10::detail::ListImpl::list_type::reverse_iterator>;
253
+
254
+ /**
255
+ * Constructs an empty list.
256
+ */
257
+ explicit List();
258
+
259
+ /**
260
+ * Constructs a list with some initial values.
261
+ * Example:
262
+ * List<int> a({2, 3, 4});
263
+ */
264
+ List(std::initializer_list<T> initial_values);
265
+ explicit List(ArrayRef<T> initial_values);
266
+
267
+ /**
268
+ * Create a generic list with runtime type information.
269
+ * This only works for c10::impl::GenericList and is not part of the public API
270
+ * but only supposed to be used internally by PyTorch.
271
+ */
272
+ explicit List(TypePtr elementType);
273
+
274
+ List(const List&) = default;
275
+ List& operator=(const List&) = default;
276
+
277
+ /**
278
+ * Create a new List pointing to a deep copy of the same data.
279
+ * The List returned is a new list with separate storage.
280
+ * Changes in it are not reflected in the original list or vice versa.
281
+ */
282
+ List copy() const;
283
+
284
+ /**
285
+ * Returns the element at specified location pos, with bounds checking.
286
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
287
+ */
288
+ internal_const_reference_type get(size_type pos) const;
289
+
290
+ /**
291
+ * Moves out the element at the specified location pos and returns it, with bounds checking.
292
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
293
+ * The list contains an invalid element at position pos afterwards. Any operations
294
+ * on it before re-setting it are invalid.
295
+ */
296
+ value_type extract(size_type pos) const;
297
+
298
+ /**
299
+ * Returns a reference to the element at specified location pos, with bounds checking.
300
+ * If pos is not within the range of the container, an exception of type std::out_of_range is thrown.
301
+ *
302
+ * You cannot store the reference, but you can read it and assign new values to it:
303
+ *
304
+ * List<int64_t> list = ...;
305
+ * list[2] = 5;
306
+ * int64_t v = list[1];
307
+ */
308
+ internal_const_reference_type operator[](size_type pos) const;
309
+
310
+ internal_reference_type operator[](size_type pos);
311
+
312
+ /**
313
+ * Assigns a new value to the element at location pos.
314
+ */
315
+ void set(size_type pos, const value_type& value) const;
316
+
317
+ /**
318
+ * Assigns a new value to the element at location pos.
319
+ */
320
+ void set(size_type pos, value_type&& value) const;
321
+
322
+ /**
323
+ * Returns an iterator to the first element of the container.
324
+ * If the container is empty, the returned iterator will be equal to end().
325
+ */
326
+ iterator begin() const;
327
+
328
+ /**
329
+ * Returns an iterator to the element following the last element of the container.
330
+ * This element acts as a placeholder; attempting to access it results in undefined behavior.
331
+ */
332
+ iterator end() const;
333
+
334
+ /**
335
+ * Checks if the container has no elements.
336
+ */
337
+ bool empty() const;
338
+
339
+ /**
340
+ * Returns the number of elements in the container
341
+ */
342
+ size_type size() const;
343
+
344
+ /**
345
+ * Increase the capacity of the vector to a value that's greater or equal to new_cap.
346
+ */
347
+ void reserve(size_type new_cap) const;
348
+
349
+ /**
350
+ * Erases all elements from the container. After this call, size() returns zero.
351
+ * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated.
352
+ */
353
+ void clear() const;
354
+
355
+ /**
356
+ * Inserts value before pos.
357
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
358
+ */
359
+ iterator insert(iterator pos, const T& value) const;
360
+
361
+ /**
362
+ * Inserts value before pos.
363
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
364
+ */
365
+ iterator insert(iterator pos, T&& value) const;
366
+
367
+ /**
368
+ * Inserts a new element into the container directly before pos.
369
+ * The new element is constructed with the given arguments.
370
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
371
+ */
372
+ template<class... Args>
373
+ iterator emplace(iterator pos, Args&&... value) const;
374
+
375
+ /**
376
+ * Appends the given element value to the end of the container.
377
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
378
+ */
379
+ void push_back(const T& value) const;
380
+
381
+ /**
382
+ * Appends the given element value to the end of the container.
383
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
384
+ */
385
+ void push_back(T&& value) const;
386
+
387
+ /**
388
+ * Appends the given list to the end of the container. Uses at most one memory allocation.
389
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
390
+ */
391
+ void append(List<T> lst) const;
392
+
393
+ /**
394
+ * Appends the given element value to the end of the container.
395
+ * The new element is constructed with the given arguments.
396
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
397
+ */
398
+ template<class... Args>
399
+ void emplace_back(Args&&... args) const;
400
+
401
+ /**
402
+ * Removes the element at pos.
403
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
404
+ */
405
+ iterator erase(iterator pos) const;
406
+
407
+ /**
408
+ * Removes the elements in the range [first, last).
409
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
410
+ */
411
+ iterator erase(iterator first, iterator last) const;
412
+
413
+ /**
414
+ * Removes the last element of the container.
415
+ * Calling pop_back on an empty container is undefined.
416
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
417
+ */
418
+ void pop_back() const;
419
+
420
+ /**
421
+ * Resizes the container to contain count elements.
422
+ * If the current size is less than count, additional default-inserted elements are appended.
423
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
424
+ */
425
+ void resize(size_type count) const;
426
+
427
+ /**
428
+ * Resizes the container to contain count elements.
429
+ * If the current size is less than count, additional copies of value are appended.
430
+ * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated.
431
+ */
432
+ void resize(size_type count, const T& value) const;
433
+
434
+ /**
435
+ * Value equality comparison. This function implements Python-like semantics for
436
+ * equality: two lists with the same identity (e.g. same pointer) trivially
437
+ * compare equal, otherwise each element is compared for equality.
438
+ */
439
+ template <class T_>
440
+ friend bool operator==(const List<T_>& lhs, const List<T_>& rhs);
441
+
442
+ template <class T_>
443
+ friend bool operator!=(const List<T_>& lhs, const List<T_>& rhs);
444
+
445
+ /**
446
+ * Identity comparison. Returns true if and only if `rhs` represents the same
447
+ * List object as `this`.
448
+ */
449
+ bool is(const List<T>& rhs) const;
450
+
451
+ std::vector<T> vec() const;
452
+
453
+ /**
454
+ * Returns the number of Lists currently pointing to this same list.
455
+ * If this is the only instance pointing to this list, returns 1.
456
+ */
457
+ // TODO Test use_count
458
+ size_t use_count() const;
459
+
460
+ TypePtr elementType() const;
461
+
462
+ // See [unsafe set type] for why this exists.
463
+ void unsafeSetElementType(TypePtr t);
464
+
465
+ private:
466
+ explicit List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements);
467
+ explicit List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements);
468
+ friend struct IValue;
469
+ template<class T_> friend List<T_> impl::toTypedList(List<IValue>);
470
+ template<class T_> friend List<IValue> impl::toList(List<T_>&&);
471
+ template<class T_> friend List<IValue> impl::toList(const List<T_>&);
472
+ friend const IValue* impl::ptr_to_first_element(const List<IValue>& list);
473
+ };
474
+
475
+ namespace impl {
476
+ // GenericList is how IValue stores lists. It is, however, not part of the
477
+ // public API. Kernels should use Lists with concrete types instead
478
+ // (maybe except for some internal prim ops).
479
+ using GenericList = List<IValue>;
480
+
481
+ const IValue* ptr_to_first_element(const GenericList& list);
482
+
483
+ }
484
+ }
485
+
486
+ namespace torch {
487
+ template<class T> using List = c10::List<T>;
488
+ }
489
+
490
+ #include <ATen/core/List_inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type_base.h>
4
+ #include <ATen/core/ivalue.h>
5
+
6
+ namespace c10 {
7
+
8
+ template<class T> decltype(auto) getTypePtr();
9
+ std::string toString(const Type& type);
10
+
11
+ template<class T>
12
+ List<T>::List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements)
13
+ : impl_(std::move(elements)) {}
14
+
15
+ template<class T>
16
+ List<T>::List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements)
17
+ : impl_(elements) {}
18
+
19
+ template<class T>
20
+ List<T>::List()
21
+ : List(make_intrusive<c10::detail::ListImpl>(
22
+ typename c10::detail::ListImpl::list_type(),
23
+ getTypePtr<T>())) {
24
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
25
+ }
26
+
27
+ template<class T>
28
+ List<T>::List(ArrayRef<T> values)
29
+ : List(make_intrusive<c10::detail::ListImpl>(
30
+ typename c10::detail::ListImpl::list_type(),
31
+ getTypePtr<T>())) {
32
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
33
+ impl_->list.reserve(values.size());
34
+ for (const T& element : values) {
35
+ impl_->list.push_back(element);
36
+ }
37
+ }
38
+
39
+ template<class T>
40
+ List<T>::List(std::initializer_list<T> initial_values)
41
+ : List(ArrayRef<T>(initial_values)) {
42
+ static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
43
+ }
44
+
45
+ template<class T>
46
+ List<T>::List(TypePtr elementType)
47
+ : List(make_intrusive<c10::detail::ListImpl>(
48
+ typename c10::detail::ListImpl::list_type(),
49
+ std::move(elementType))) {
50
+ static_assert(std::is_same<T, IValue>::value || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
51
+ "This constructor is only valid for c10::impl::GenericList or List<Future>.");
52
+ }
53
+
54
+ namespace impl {
55
+ template<class T>
56
+ List<T> toTypedList(impl::GenericList list) {
57
+ // If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant
58
+ // because upcasting would allow people to add types into the new list that would break the old list.
59
+ // However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can
60
+ // allow upcasting. This can be a perf improvement since we can cast List<T> to List<optional<T>>
61
+ // without having to copy it. This is also used to provide backwards compatibility with some old models
62
+ // that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_
63
+ // as List<Tensor> before we changed that argument to be List<optional<Tensor>>. When deserializing, we
64
+ // have list.use_count() == 1 and can deserialize the List<Tensor> directly as List<optional<Tensor>>.
65
+ TORCH_CHECK(*list.impl_->elementType == *getTypePtr<T>()
66
+ || (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr<T>()))
67
+ , "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr<T>()), ">. Types mismatch.");
68
+ return List<T>(std::move(list.impl_));
69
+ }
70
+
71
+ template<class T>
72
+ impl::GenericList toList(List<T>&& list) {
73
+ return GenericList(std::move(list.impl_));
74
+ }
75
+ template<class T>
76
+ impl::GenericList toList(const List<T>& list) {
77
+ return GenericList(list.impl_);
78
+ }
79
+ }
80
+
81
+ template<class T>
82
+ List<T> List<T>::copy() const {
83
+ return List<T>(impl_->copy());
84
+ }
85
+
86
+ namespace detail {
87
+ template<class T>
88
+ T list_element_to(T element) {
89
+ return element;
90
+ }
91
+ template<class T>
92
+ T list_element_to(const IValue& element) {
93
+ return element.template to<T>();
94
+ }
95
+ template<class T>
96
+ T list_element_to(IValue&& element) {
97
+ return std::move(element).template to<T>();
98
+ }
99
+ template<class T>
100
+ struct ListElementFrom {
101
+ static IValue from(const T& element) {
102
+ return element;
103
+ }
104
+ static IValue from(T&& element) {
105
+ return std::move(element);
106
+ }
107
+ };
108
+ template<>
109
+ struct ListElementFrom<IValue> {
110
+ static const IValue& from(const IValue& element) {
111
+ return element;
112
+ }
113
+ static IValue&& from(IValue&& element) {
114
+ return std::move(element);
115
+ }
116
+ };
117
+ }
118
+
119
+ namespace impl {
120
+
121
+ template <class T, class Iterator>
122
+ ListElementReference<T, Iterator>::operator std::conditional_t<
123
+ std::is_reference<typename c10::detail::ivalue_to_const_ref_overload_return<
124
+ T>::type>::value,
125
+ const T&,
126
+ T>() const {
127
+ return iterator_->template to<T>();
128
+ }
129
+
130
+ template<class T, class Iterator>
131
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(T&& new_value) && {
132
+ *iterator_ = c10::detail::ListElementFrom<T>::from(std::move(new_value));
133
+ return *this;
134
+ }
135
+
136
+ template<class T, class Iterator>
137
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(const T& new_value) && {
138
+ *iterator_ = c10::detail::ListElementFrom<T>::from(new_value);
139
+ return *this;
140
+ }
141
+
142
+ template<class T, class Iterator>
143
+ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && noexcept {
144
+ *iterator_ = *rhs.iterator_;
145
+ return *this;
146
+ }
147
+
148
+ template<class T, class Iterator>
149
+ void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs) {
150
+ std::swap(*lhs.iterator_, *rhs.iterator_);
151
+ }
152
+
153
+ template<class T, class Iterator>
154
+ bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs) {
155
+ const T& lhs_tmp = lhs;
156
+ return lhs_tmp == rhs;
157
+ }
158
+
159
+ template<class T, class Iterator>
160
+ inline bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs) {
161
+ return rhs == lhs;
162
+ }
163
+
164
+ template<class T>
165
+ inline typename ListElementConstReferenceTraits<T>::const_reference
166
+ list_element_to_const_ref(const IValue& element) {
167
+ return element.template to<T>();
168
+ }
169
+
170
+ template<>
171
+ inline typename ListElementConstReferenceTraits<c10::optional<std::string>>::const_reference
172
+ list_element_to_const_ref<c10::optional<std::string>>(const IValue& element) {
173
+ return element.toOptionalStringRef();
174
+ }
175
+
176
+ } // namespace impl
177
+
178
+ template<class T>
179
+ void List<T>::set(size_type pos, const value_type& value) const {
180
+ impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(value);
181
+ }
182
+
183
+ template<class T>
184
+ void List<T>::set(size_type pos, value_type&& value) const {
185
+ impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(std::move(value));
186
+ }
187
+
188
+ template<class T>
189
+ typename List<T>::internal_const_reference_type List<T>::get(size_type pos) const {
190
+ return operator[](pos);
191
+ }
192
+
193
+ template<class T>
194
+ typename List<T>::internal_const_reference_type List<T>::operator[](size_type pos) const {
195
+ return c10::impl::list_element_to_const_ref<T>(impl_->list.at(pos));
196
+ }
197
+
198
+ template<class T>
199
+ typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
200
+ static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
201
+ return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
202
+ }
203
+
204
+ template<class T>
205
+ typename List<T>::value_type List<T>::extract(size_type pos) const {
206
+ auto& elem = impl_->list.at(pos);
207
+ auto result = c10::detail::list_element_to<T>(std::move(elem));
208
+ // Reset the list element to a T() instead of None to keep it correctly typed
209
+ elem = c10::detail::ListElementFrom<T>::from(T{});
210
+ return result;
211
+ }
212
+
213
+ template<class T>
214
+ typename List<T>::iterator List<T>::begin() const {
215
+ return iterator(impl_->list.begin());
216
+ }
217
+
218
+ template<class T>
219
+ typename List<T>::iterator List<T>::end() const {
220
+ return iterator(impl_->list.end());
221
+ }
222
+
223
+ template<class T>
224
+ bool List<T>::empty() const {
225
+ return impl_->list.empty();
226
+ }
227
+
228
+ template<class T>
229
+ typename List<T>::size_type List<T>::size() const {
230
+ return impl_->list.size();
231
+ }
232
+
233
+ template<class T>
234
+ void List<T>::reserve(size_type new_cap) const {
235
+ impl_->list.reserve(new_cap);
236
+ }
237
+
238
+ template<class T>
239
+ void List<T>::clear() const {
240
+ impl_->list.clear();
241
+ }
242
+
243
+ template<class T>
244
+ typename List<T>::iterator List<T>::insert(iterator pos, const T& value) const {
245
+ return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(value)) };
246
+ }
247
+
248
+ template<class T>
249
+ typename List<T>::iterator List<T>::insert(iterator pos, T&& value) const {
250
+ return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(std::move(value))) };
251
+ }
252
+
253
+ template<class T>
254
+ template<class... Args>
255
+ typename List<T>::iterator List<T>::emplace(iterator pos, Args&&... value) const {
256
+ // TODO Use list_element_from?
257
+ return iterator { impl_->list.emplace(pos.iterator_, std::forward<Args>(value)...) };
258
+ }
259
+
260
+ template<class T>
261
+ void List<T>::push_back(const T& value) const {
262
+ impl_->list.push_back(c10::detail::ListElementFrom<T>::from(value));
263
+ }
264
+
265
+ template<class T>
266
+ void List<T>::push_back(T&& value) const {
267
+ impl_->list.push_back(c10::detail::ListElementFrom<T>::from(std::move(value)));
268
+ }
269
+
270
+ template<class T>
271
+ void List<T>::append(List<T> b) const {
272
+ if (b.use_count() == 1) {
273
+ impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end()));
274
+ } else {
275
+ impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end());
276
+ }
277
+ }
278
+
279
+ template<class T>
280
+ template<class... Args>
281
+ void List<T>::emplace_back(Args&&... args) const {
282
+ // TODO Use list_element_from?
283
+ impl_->list.push_back(T(std::forward<Args>(args)...));
284
+ }
285
+
286
+ template<class T>
287
+ typename List<T>::iterator List<T>::erase(iterator pos) const {
288
+ return iterator { impl_->list.erase(pos.iterator_) };
289
+ }
290
+
291
+ template<class T>
292
+ typename List<T>::iterator List<T>::erase(iterator first, iterator last) const {
293
+ return iterator { impl_->list.erase(first.iterator_, last.iterator_) };
294
+ }
295
+
296
+ template<class T>
297
+ void List<T>::pop_back() const {
298
+ impl_->list.pop_back();
299
+ }
300
+
301
+ template<class T>
302
+ void List<T>::resize(size_type count) const {
303
+ impl_->list.resize(count, T{});
304
+ }
305
+
306
+ template<class T>
307
+ void List<T>::resize(size_type count, const T& value) const {
308
+ impl_->list.resize(count, value);
309
+ }
310
+
311
+ template<class T>
312
+ bool operator==(const List<T>& lhs, const List<T>& rhs) {
313
+ // Lists with the same identity trivially compare equal.
314
+ if (lhs.impl_ == rhs.impl_) {
315
+ return true;
316
+ }
317
+
318
+ // Otherwise, just compare values directly.
319
+ return *lhs.impl_ == *rhs.impl_;
320
+ }
321
+
322
+ template<class T>
323
+ bool operator!=(const List<T>& lhs, const List<T>& rhs) {
324
+ return !(lhs == rhs);
325
+ }
326
+
327
+ template<class T>
328
+ bool List<T>::is(const List<T>& rhs) const {
329
+ return this->impl_ == rhs.impl_;
330
+ }
331
+
332
+ template<class T>
333
+ std::vector<T> List<T>::vec() const {
334
+ std::vector<T> result(begin(), end());
335
+ return result;
336
+ }
337
+
338
+ template<class T>
339
+ size_t List<T>::use_count() const {
340
+ return impl_.use_count();
341
+ }
342
+
343
+ template <class T>
344
+ TypePtr List<T>::elementType() const {
345
+ return impl_->elementType;
346
+ }
347
+
348
+ template <class T>
349
+ void List<T>::unsafeSetElementType(TypePtr t) {
350
+ impl_->elementType = std::move(t);
351
+ }
352
+
353
+ namespace impl {
354
+
355
+ inline const IValue* ptr_to_first_element(const GenericList& list) {
356
+ return &list.impl_->list[0];
357
+ }
358
+
359
+ }
360
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+
5
+ // define constants like M_PI and C keywords for MSVC
6
+ #ifdef _MSC_VER
7
+ #ifndef _USE_MATH_DEFINES
8
+ #define _USE_MATH_DEFINES
9
+ #endif
10
+ #include <math.h>
11
+ #endif
12
+
13
+ #include <array>
14
+ #include <cmath>
15
+ #include <cstdint>
16
+
17
+ namespace at {
18
+
19
+ constexpr int MERSENNE_STATE_N = 624;
20
+ constexpr int MERSENNE_STATE_M = 397;
21
+ constexpr uint32_t MATRIX_A = 0x9908b0df;
22
+ constexpr uint32_t UMASK = 0x80000000;
23
+ constexpr uint32_t LMASK = 0x7fffffff;
24
+
25
+ /**
26
+ * Note [Mt19937 Engine implementation]
27
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28
+ * Originally implemented in:
29
+ * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c
30
+ * and modified with C++ constructs. Moreover the state array of the engine
31
+ * has been modified to hold 32 bit uints instead of 64 bits.
32
+ *
33
+ * Note that we reimplemented mt19937 instead of using std::mt19937 because,
34
+ * at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2
35
+ * by default and following are the benchmark numbers (benchmark code can be found at
36
+ * https://github.com/syed-ahmed/benchmark-rngs):
37
+ *
38
+ * with -O2
39
+ * Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s
40
+ * Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s
41
+ * Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s
42
+ * Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s
43
+ *
44
+ * std::mt19937 is faster when used in conjunction with std::uniform_real_distribution,
45
+ * however we can't use std::uniform_real_distribution because of this bug:
46
+ * http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used
47
+ * std::uniform_real_distribution and filtered out the 1's, it is a different algorithm
48
+ * than what's in pytorch currently and that messes up the tests in tests_distributions.py.
49
+ * The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower
50
+ * than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter.
51
+ *
52
+ * Copyright notice:
53
+ * A C-program for MT19937, with initialization improved 2002/2/10.
54
+ * Coded by Takuji Nishimura and Makoto Matsumoto.
55
+ * This is a faster version by taking Shawn Cokus's optimization,
56
+ * Matthe Bellew's simplification, Isaku Wada's real version.
57
+ *
58
+ * Before using, initialize the state by using init_genrand(seed)
59
+ * or init_by_array(init_key, key_length).
60
+ *
61
+ * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
62
+ * All rights reserved.
63
+ *
64
+ * Redistribution and use in source and binary forms, with or without
65
+ * modification, are permitted provided that the following conditions
66
+ * are met:
67
+ *
68
+ * 1. Redistributions of source code must retain the above copyright
69
+ * notice, this list of conditions and the following disclaimer.
70
+ *
71
+ * 2. Redistributions in binary form must reproduce the above copyright
72
+ * notice, this list of conditions and the following disclaimer in the
73
+ * documentation and/or other materials provided with the distribution.
74
+ *
75
+ * 3. The names of its contributors may not be used to endorse or promote
76
+ * products derived from this software without specific prior written
77
+ * permission.
78
+ *
79
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
83
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
84
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
85
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
86
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
87
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
88
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
89
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90
+ *
91
+ *
92
+ * Any feedback is very welcome.
93
+ * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
94
+ * email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
95
+ */
96
+
97
+ /**
98
+ * mt19937_data_pod is used to get POD data in and out
99
+ * of mt19937_engine. Used in torch.get_rng_state and
100
+ * torch.set_rng_state functions.
101
+ */
102
+ struct mt19937_data_pod {
103
+ uint64_t seed_;
104
+ int left_;
105
+ bool seeded_;
106
+ uint32_t next_;
107
+ std::array<uint32_t, MERSENNE_STATE_N> state_;
108
+ };
109
+
110
+ class mt19937_engine {
111
+ public:
112
+
113
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
114
+ inline explicit mt19937_engine(uint64_t seed = 5489) {
115
+ init_with_uint32(seed);
116
+ }
117
+
118
+ inline mt19937_data_pod data() const {
119
+ return data_;
120
+ }
121
+
122
+ inline void set_data(const mt19937_data_pod& data) {
123
+ data_ = data;
124
+ }
125
+
126
+ inline uint64_t seed() const {
127
+ return data_.seed_;
128
+ }
129
+
130
+ inline bool is_valid() {
131
+ if ((data_.seeded_ == true)
132
+ && (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N)
133
+ && (data_.next_ <= MERSENNE_STATE_N)) {
134
+ return true;
135
+ }
136
+ return false;
137
+ }
138
+
139
+ inline uint32_t operator()() {
140
+ if (--(data_.left_) == 0) {
141
+ next_state();
142
+ }
143
+ uint32_t y = *(data_.state_.data() + data_.next_++);
144
+ y ^= (y >> 11);
145
+ y ^= (y << 7) & 0x9d2c5680;
146
+ y ^= (y << 15) & 0xefc60000;
147
+ y ^= (y >> 18);
148
+
149
+ return y;
150
+ }
151
+
152
+ private:
153
+ mt19937_data_pod data_;
154
+
155
+ inline void init_with_uint32(uint64_t seed) {
156
+ data_.seed_ = seed;
157
+ data_.seeded_ = true;
158
+ data_.state_[0] = seed & 0xffffffff;
159
+ for (const auto j : c10::irange(1, MERSENNE_STATE_N)) {
160
+ data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j);
161
+ }
162
+ data_.left_ = 1;
163
+ data_.next_ = 0;
164
+ }
165
+
166
+ inline uint32_t mix_bits(uint32_t u, uint32_t v) {
167
+ return (u & UMASK) | (v & LMASK);
168
+ }
169
+
170
+ inline uint32_t twist(uint32_t u, uint32_t v) {
171
+ return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0);
172
+ }
173
+
174
+ inline void next_state() {
175
+ uint32_t* p = data_.state_.data();
176
+ data_.left_ = MERSENNE_STATE_N;
177
+ data_.next_ = 0;
178
+
179
+ for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) {
180
+ *p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]);
181
+ }
182
+
183
+ for(int j = MERSENNE_STATE_M; --j; p++) {
184
+ *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]);
185
+ }
186
+
187
+ *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]);
188
+ }
189
+
190
+ };
191
+
192
+ typedef mt19937_engine mt19937;
193
+
194
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Dimname.h>
4
+ #include <c10/core/TensorImpl.h>
5
+
6
+ namespace at {
7
+
8
+ class TensorBase;
9
+
10
+ // XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen.
11
+ // Due to the c10/ATen library split, TensorImpl cannot depend on Dimname,
12
+ // so we have a couple of workarounds.
13
+ //
14
+ // In the long term, we'll move Dimname to c10 and everything in this file
15
+ // can be refactored out. The main blocker for that is that "c10::Symbol"
16
+ // actually exists outside of c10 and needs to be moved in.
17
+
18
+ // TensorImpl has a unique_ptr<NamedTensorMetaInterface> field.
19
+ // XXX: Ideally we would just put optional<vector<Dimname>> into TensorImpl.
20
+ //
21
+ // This class has an important invariant: there must be at least ONE
22
+ // non-wildcard
23
+ struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
24
+ // This enum is to remind people that the invariant on constructors is that
25
+ // the list of dimnames must have at least one non-wildcard
26
+ enum HAS_NON_WILDCARD {
27
+ HasNonWildcard
28
+ };
29
+
30
+ explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
31
+ : names_(names.vec()) {
32
+ check_invariants();
33
+ }
34
+ explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
35
+ : names_(std::move(names)) {
36
+ check_invariants();
37
+ }
38
+
39
+ std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
40
+ return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_);
41
+ }
42
+
43
+ DimnameList names() const { return names_; }
44
+
45
+ // Used for an assertion in TensorImpl.h
46
+ int64_t slow_dim() const override {
47
+ return names_.size();
48
+ }
49
+
50
+ void check_invariants() const {
51
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
52
+ std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
53
+ }
54
+
55
+ void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
56
+ TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
57
+ std::copy(new_names.begin(), new_names.end(), names_.begin());
58
+ check_invariants();
59
+ }
60
+
61
+ void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
62
+ TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
63
+ names_ = std::move(new_names);
64
+ check_invariants();
65
+ }
66
+
67
+ // INVARIANT: at least one Dimname is non-WILDCARD
68
+ std::vector<Dimname> names_;
69
+ };
70
+
71
+ // When NamesMode is disabled, then all operations ignore tensors' names fields.
72
+ // Concretely speaking, all tensors are treated as having nullopt names.
73
+ struct TORCH_API NamesMode {
74
+ static bool is_enabled();
75
+ static void set_enabled(bool enabled);
76
+ };
77
+
78
+
79
+ // A RAII, thread local (!) guard that enables or disables names upon
80
+ // construction, and sets it back to the original value upon destruction.
81
+ struct TORCH_API NoNamesGuard {
82
+ NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) {
83
+ NamesMode::set_enabled(false);
84
+ }
85
+ ~NoNamesGuard() {
86
+ if (initialized) {
87
+ reset();
88
+ }
89
+ }
90
+ void reset() {
91
+ TORCH_INTERNAL_ASSERT(initialized);
92
+ NamesMode::set_enabled(prev_mode);
93
+ }
94
+ private:
95
+ bool prev_mode;
96
+ bool initialized;
97
+ };
98
+
99
+ void check_names_valid_for(const TensorBase& tensor, DimnameList names);
100
+ void check_names_valid_for(size_t tensor_dim, DimnameList names);
101
+
102
+ // Sets the names of `tensor` to be `names`.
103
+ TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names);
104
+ TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
105
+
106
+ constexpr size_t kMaxNamedTensorDim = 64;
107
+
108
+ DimnameList default_names(size_t len);
109
+
110
+ namespace impl {
111
+
112
+ // Some helper functions on TensorImpl. Useful for working with names in TH.
113
+ // XXX: Ideally these would exist as methods on TensorImpl
114
+ TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
115
+ TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
116
+
117
+ void check_names_valid_for(TensorImpl* impl, DimnameList names);
118
+
119
+ // Returns true if the tensor's names exist and are not all 'None'.
120
+ // Returns false if the tensor's names don't exist (were not allocated),
121
+ // or if all names are 'None'.
122
+ // We treat not-allocated-names the same as allocated names that are all 'None'.
123
+ TORCH_API bool has_names(const TensorImpl* impl);
124
+
125
+ // Returns the names of the tensor's dimensions.
126
+ // Unnamed tensors are treated as having 'None' in all dimension; this method
127
+ // would return a DimnameList of all 'None's for an unnamed tensor.
128
+ TORCH_API DimnameList get_names(const TensorImpl* impl);
129
+
130
+ // This is more of an implementation detail; one should use impl::get_names /
131
+ // Tensor::names() whenever possible because it provides a cleaner API.
132
+ // Returns the names of the tensor if they have been allocated; returns nullopt
133
+ // instead if the haven't been. The names of a tensor are not allocated if a
134
+ // tensor is constructed with names=None.
135
+ TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
136
+
137
+ } // namespace impl
138
+
139
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // define constants like M_PI and C keywords for MSVC
4
+ #ifdef _MSC_VER
5
+ #define _USE_MATH_DEFINES
6
+ #include <math.h>
7
+ #endif
8
+
9
+
10
+ #ifdef __CUDACC__
11
+ #include <cuda.h>
12
+ #endif
13
+
14
+ #include <ATen/core/Array.h>
15
+ #include <c10/macros/Macros.h>
16
+ #include <c10/util/Exception.h>
17
+ #include <c10/util/Half.h>
18
+ #include <cmath>
19
+ #include <cstdint>
20
+
21
+ namespace at {
22
+
23
+ // typedefs for holding vector data
24
+ namespace detail {
25
+
26
+ typedef at::detail::Array<uint32_t, 4> UINT4;
27
+ typedef at::detail::Array<uint32_t, 2> UINT2;
28
+ typedef at::detail::Array<double, 2> DOUBLE2;
29
+ typedef at::detail::Array<float, 2> FLOAT2;
30
+
31
+ } // namespace detail
32
+
33
+ /**
34
+ * Note [Philox Engine implementation]
35
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
36
+ * Originally implemented in PyTorch's fusion compiler
37
+ * Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
38
+ * for details regarding the engine.
39
+ *
40
+ * Note that currently this implementation of the philox engine is not used
41
+ * anywhere except for tests in cpu_generator_test.cpp. However, this engine
42
+ * will replace curandStatePhilox4_32_10_t in the future.
43
+ *
44
+ * The philox engine takes a seed value, a subsequeunce
45
+ * for starting the generation and an offset for the subsequence.
46
+ * Think of this engine as an algorithm producing a huge array. We are
47
+ * parallelizing this array by partitioning the huge array and assigning
48
+ * a thread index to each partition. In other words, each seed value
49
+ * (there are 2^64 possible seed values) gives a sub array of size
50
+ * 2^128 (each element in that array is a 128 bit number). Reasoning
51
+ * behind the array being of size 2^128 is, there are 2^64 possible
52
+ * thread index value and there is an array of size 2^64 for each of
53
+ * those thread index. Hence 2^64 * 2^64 = 2^128 for each seed value.
54
+ *
55
+ * In short, this generator can produce 2^64 (seed values) * 2^128 (number
56
+ * of elements in an array given by a seed value) = 2^192 values.
57
+ *
58
+ * Arguments:
59
+ * seed: Seed values could be any number from 0 to 2^64-1.
60
+ * subsequence: Subsequence is just the cuda thread indexing with:
61
+ * - blockIdx.x * blockDim.x + threadIdx.x
62
+ * offset: The offset variable in PhiloxEngine decides how many 128-bit
63
+ * random numbers to skip (i.e. how many groups of 4, 32-bit numbers to skip)
64
+ * and hence really decides the total number of randoms that can be achieved
65
+ * for the given subsequence.
66
+ */
67
+
68
+ class philox_engine {
69
+ public:
70
+
71
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
72
+ C10_HOST_DEVICE inline explicit philox_engine(uint64_t seed = 67280421310721,
73
+ uint64_t subsequence = 0,
74
+ uint64_t offset = 0) {
75
+
76
+ reset_state(seed, subsequence);
77
+ incr_n(offset);
78
+ }
79
+
80
+ C10_HOST_DEVICE inline void reset_state(uint64_t seed = 67280421310721,
81
+ uint64_t subsequence = 0) {
82
+ key_[0] = static_cast<uint32_t>(seed);
83
+ key_[1] = static_cast<uint32_t>(seed >> 32);
84
+ counter_ = detail::UINT4(0);
85
+ counter_[2] = static_cast<uint32_t>(subsequence);
86
+ counter_[3] = static_cast<uint32_t>(subsequence >> 32);
87
+ STATE = 0;
88
+ }
89
+
90
+ /**
91
+ * Set the offset field of Philox Generator to the desired offset.
92
+ */
93
+ C10_HOST_DEVICE inline void set_offset(uint64_t offset) {
94
+ counter_[0] = static_cast<uint32_t>(offset);
95
+ counter_[1] = static_cast<uint32_t>(offset >> 32);
96
+ }
97
+
98
+ /**
99
+ * Gets the current offset of the Philox Generator.
100
+ */
101
+ C10_HOST_DEVICE uint64_t get_offset() const {
102
+ uint64_t lo = static_cast<uint64_t>(counter_[0]);
103
+ uint64_t hi = static_cast<uint64_t>(counter_[1]) << 32;
104
+ return lo | hi;
105
+ }
106
+
107
+ /**
108
+ * Produces a unique 32-bit pseudo random number on every invocation. Bookeeps state to avoid waste.
109
+ */
110
+ C10_HOST_DEVICE inline uint32_t operator()(int32_t n_rounds = 10) { // 10 here to preserve back-compat behavior
111
+ if(STATE == 0) {
112
+ detail::UINT4 counter = counter_;
113
+ detail::UINT2 key = key_;
114
+ output_ = rand(counter, key, n_rounds);
115
+ incr();
116
+ }
117
+ uint32_t ret = output_[static_cast<int>(STATE)];
118
+ STATE = (STATE + 1) & 3;
119
+ return ret;
120
+ }
121
+
122
+ inline float randn(uint32_t n_rounds) {
123
+ #ifdef __CUDA_ARCH__
124
+ AT_ASSERT(false, "Unsupported invocation of randn on CUDA");
125
+ #endif
126
+ if(STATE == 0) {
127
+ detail::UINT4 counter = counter_;
128
+ detail::UINT2 key = key_;
129
+ output_ = rand(counter, key, n_rounds);
130
+ incr();
131
+ }
132
+ // TODO(min-jean-cho) change to Polar method, a more efficient version of Box-Muller method
133
+ // TODO(voz) We use std:: below, and thus need a separate impl for CUDA.
134
+ float u1 = 1 - uint32_to_uniform_float(output_[0]); // uint32_to_uniform_float returns [0,1), we need (0,1] to avoid passing 0 to log.
135
+ float u2 = 1 - uint32_to_uniform_float(output_[1]);
136
+ return static_cast<float>(std::sqrt(-2.0 * std::log(u1)) * std::cos(2.0 * M_PI * u2));
137
+ }
138
+
139
+ /**
140
+ * Function that Skips N 128 bit numbers in a subsequence
141
+ */
142
+ C10_HOST_DEVICE inline void incr_n(uint64_t n) {
143
+ uint32_t nlo = static_cast<uint32_t>(n);
144
+ uint32_t nhi = static_cast<uint32_t>(n >> 32);
145
+ counter_[0] += nlo;
146
+ // if overflow in x has occurred, carry over to nhi
147
+ if (counter_[0] < nlo) {
148
+ nhi++;
149
+ // if overflow in nhi has occurred during carry over,
150
+ // propagate that overflow to y and exit to increment z
151
+ // otherwise return
152
+ counter_[1] += nhi;
153
+ if(nhi != 0) {
154
+ if (nhi <= counter_[1]) {
155
+ return;
156
+ }
157
+ }
158
+ } else {
159
+ // if overflow in y has occurred during addition,
160
+ // exit to increment z
161
+ // otherwise return
162
+ counter_[1] += nhi;
163
+ if (nhi <= counter_[1]) {
164
+ return;
165
+ }
166
+ }
167
+ if (++counter_[2])
168
+ return;
169
+ ++counter_[3];
170
+ }
171
+
172
+ /**
173
+ * Function that Skips one 128 bit number in a subsequence
174
+ */
175
+ C10_HOST_DEVICE inline void incr() {
176
+ if (++counter_[0])
177
+ return;
178
+ if (++counter_[1])
179
+ return;
180
+ if (++counter_[2]) {
181
+ return;
182
+ }
183
+ ++counter_[3];
184
+ }
185
+
186
+ private:
187
+ detail::UINT4 counter_;
188
+ detail::UINT4 output_;
189
+ detail::UINT2 key_;
190
+ uint32_t STATE;
191
+
192
+ C10_HOST_DEVICE inline uint32_t mulhilo32(uint32_t a, uint32_t b,
193
+ uint32_t *result_high) {
194
+ #ifdef __CUDA_ARCH__
195
+ *result_high = __umulhi(a, b);
196
+ return a*b;
197
+ #else
198
+ const uint64_t product = static_cast<uint64_t>(a) * b;
199
+ *result_high = static_cast<uint32_t>(product >> 32);
200
+ return static_cast<uint32_t>(product);
201
+ #endif
202
+ }
203
+
204
+ C10_HOST_DEVICE inline detail::UINT4 single_round(detail::UINT4 ctr, detail::UINT2 in_key) {
205
+ uint32_t hi0 = 0;
206
+ uint32_t hi1 = 0;
207
+ uint32_t lo0 = mulhilo32(kPhiloxSA, ctr[0], &hi0);
208
+ uint32_t lo1 = mulhilo32(kPhiloxSB, ctr[2], &hi1);
209
+ detail::UINT4 ret;
210
+ ret[0] = hi1 ^ ctr[1] ^ in_key[0];
211
+ ret[1] = lo1;
212
+ ret[2] = hi0 ^ ctr[3] ^ in_key[1];
213
+ ret[3] = lo0;
214
+ return ret;
215
+ }
216
+
217
+ C10_HOST_DEVICE constexpr float uint32_to_uniform_float(uint32_t value) {
218
+ // maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
219
+ constexpr float scale = 4.6566127342e-10;
220
+ return static_cast<float>(value & 0x7FFFFFFF) * scale;
221
+ }
222
+
223
+
224
+
225
+ C10_HOST_DEVICE inline detail::UINT4 rand(detail::UINT4& counter, detail::UINT2& key, uint32_t n_rounds) {
226
+ for (uint32_t round = 0; round < (n_rounds - 1); round++) {
227
+ counter = single_round(counter, key);
228
+ key[0] += (kPhilox10A); key[1] += (kPhilox10B);
229
+ }
230
+ return single_round(counter, key);
231
+ }
232
+
233
+
234
+ static const uint32_t kPhilox10A = 0x9E3779B9;
235
+ static const uint32_t kPhilox10B = 0xBB67AE85;
236
+ static const uint32_t kPhiloxSA = 0xD2511F53;
237
+ static const uint32_t kPhiloxSB = 0xCD9E8D57;
238
+ };
239
+
240
+ typedef philox_engine Philox4_32;
241
+
242
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TorchDispatchUtils.h>
3
+
4
+ namespace at {
5
+ namespace impl {
6
+
7
+ struct TORCH_API RestorePythonTLSSnapshot {
8
+ RestorePythonTLSSnapshot();
9
+ ~RestorePythonTLSSnapshot();
10
+
11
+ private:
12
+ c10::impl::LocalDispatchKeySet saved_;
13
+ c10::impl::ForceDispatchKeyGuard guard_;
14
+ };
15
+
16
+
17
+ // RAII guard to make working with the above TLS safer.
18
+ struct TORCH_API MaybeSetTLSOnEntryGuard {
19
+ public:
20
+ MaybeSetTLSOnEntryGuard();
21
+ ~MaybeSetTLSOnEntryGuard();
22
+
23
+ private:
24
+ bool value_set_;
25
+ };
26
+
27
+ } // namespace impl
28
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dispatch/Dispatcher.h>
4
+
5
+ // TODO: this can probably live in c10
6
+
7
+ namespace at {
8
+ namespace impl {
9
+
10
+ class TORCH_API PythonOpRegistrationTrampoline final {
11
+ static std::atomic<c10::impl::PyInterpreter*> interpreter_;
12
+
13
+ public:
14
+ // Returns true if you successfully registered yourself (that means
15
+ // you are in the hot seat for doing the operator registrations!)
16
+ static bool registerInterpreter(c10::impl::PyInterpreter*);
17
+
18
+ // Returns nullptr if no interpreter has been registered yet.
19
+ static c10::impl::PyInterpreter* getInterpreter();
20
+ };
21
+
22
+ } // namespace impl
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/core/QScheme.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace at {
8
+
9
+ class Tensor;
10
+ struct QTensorImpl;
11
+ struct Quantizer;
12
+ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
13
+ using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
14
+
15
+ /**
16
+ * Quantizer is the class for storing all the information
17
+ * that's necessary to perform quantize and dequantize
18
+ * operation.
19
+ *
20
+ * We might have different types of quantization schemes and this is
21
+ * the base class for all quantizers.
22
+ *
23
+ * QTensorImpl will hold a pointer to Quantizer so that we can support
24
+ * different quantization schemes on Tensor.
25
+ *
26
+ * For example, the most common quantization scheme, Affine Quantization,
27
+ * requires scale and zero_point as parameters, we'll store scale and zero_point
28
+ * inside the instance and we can use it to quantize a float Tensor or
29
+ * dequantize a quantized Tensor.
30
+ *
31
+ * When you add new types of leaf Quantizer class, please also
32
+ * make sure to add a corresponding QScheme enum since
33
+ * they should have one to one mapping.
34
+ *
35
+ * Note about intrusive_ptr:
36
+ * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can
37
+ * share the same Quantizer. Quantizer should be immutable.
38
+ */
39
+ struct TORCH_API Quantizer : public c10::intrusive_ptr_target {
40
+ const ScalarType scalar_type_;
41
+ explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {}
42
+ ~Quantizer() override;
43
+
44
+ // Copied from torch/csrc/jit/ir/scope.h
45
+ QuantizerPtr intrusive_from_this() {
46
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
47
+ // from a raw `this` pointer
48
+ // so we need to bump the refcount
49
+ // to account for this ownership
50
+ return c10::intrusive_ptr<Quantizer>::reclaim(this);
51
+ }
52
+
53
+ /**
54
+ * Each concrete Quantizer type should have a unique QScheme type.
55
+ */
56
+ virtual QScheme qscheme() const = 0;
57
+
58
+ ScalarType scalar_type() const {
59
+ return scalar_type_;
60
+ }
61
+
62
+ /**
63
+ * quantize a float Tensor into a quantized Tensor.
64
+ */
65
+ virtual Tensor quantize(const Tensor& t) = 0;
66
+
67
+ /**
68
+ * dequantize a quantized Tensor into a float Tensor.
69
+ */
70
+ virtual Tensor dequantize(const Tensor& t) = 0;
71
+
72
+ /**
73
+ * dequantize a quantized Tensor into a float Tensor, out= variant
74
+ */
75
+ virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0;
76
+
77
+ /**
78
+ * Compare against `other` for equality.
79
+ */
80
+ virtual bool equalTo(QuantizerPtr other) const = 0;
81
+ };
82
+
83
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ namespace Reduction {
5
+
6
+ // NB: Keep this in sync with Reduction class in torch/nn/_reduction.py
7
+ // These constants control the reduction behavior of loss functions.
8
+ // Ideally, this would be a scoped enum, but jit doesn't support that
9
+ enum Reduction {
10
+ None, // Do not reduce
11
+ Mean, // (Possibly weighted) mean of losses
12
+ Sum, // Sum losses
13
+ END
14
+ };
15
+ } // namespace Reduction
16
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <c10/core/Scalar.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <c10/core/ScalarType.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Deprecated.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/irange.h>
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+
11
+ namespace at {
12
+
13
+ // The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor
14
+ // is used to enable the __restrict__ keyword/modifier for the data
15
+ // passed to cuda.
16
+ template <typename T>
17
+ struct DefaultPtrTraits {
18
+ typedef T* PtrType;
19
+ };
20
+
21
+ #if defined(__CUDACC__) || defined(__HIPCC__)
22
+ template <typename T>
23
+ struct RestrictPtrTraits {
24
+ typedef T* __restrict__ PtrType;
25
+ };
26
+ #endif
27
+
28
+ // TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors.
29
+ // For CUDA tensors it is used in device code (only). This means that we restrict ourselves
30
+ // to functions and types available there (e.g. IntArrayRef isn't).
31
+
32
+ // The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers.
33
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
34
+ class TensorAccessorBase {
35
+ public:
36
+ typedef typename PtrTraits<T>::PtrType PtrType;
37
+
38
+ C10_HOST_DEVICE TensorAccessorBase(
39
+ PtrType data_,
40
+ const index_t* sizes_,
41
+ const index_t* strides_)
42
+ : data_(data_), sizes_(sizes_), strides_(strides_) {}
43
+ C10_HOST IntArrayRef sizes() const {
44
+ return IntArrayRef(sizes_,N);
45
+ }
46
+ C10_HOST IntArrayRef strides() const {
47
+ return IntArrayRef(strides_,N);
48
+ }
49
+ C10_HOST_DEVICE index_t stride(index_t i) const {
50
+ return strides_[i];
51
+ }
52
+ C10_HOST_DEVICE index_t size(index_t i) const {
53
+ return sizes_[i];
54
+ }
55
+ C10_HOST_DEVICE PtrType data() {
56
+ return data_;
57
+ }
58
+ C10_HOST_DEVICE const PtrType data() const {
59
+ return data_;
60
+ }
61
+ protected:
62
+ PtrType data_;
63
+ const index_t* sizes_;
64
+ const index_t* strides_;
65
+ };
66
+
67
+ // The `TensorAccessor` is typically instantiated for CPU `Tensor`s using
68
+ // `Tensor.accessor<T, N>()`.
69
+ // For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only
70
+ // indexing on the device uses `TensorAccessor`s.
71
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
72
+ class TensorAccessor : public TensorAccessorBase<T,N,PtrTraits,index_t> {
73
+ public:
74
+ typedef typename PtrTraits<T>::PtrType PtrType;
75
+
76
+ C10_HOST_DEVICE TensorAccessor(
77
+ PtrType data_,
78
+ const index_t* sizes_,
79
+ const index_t* strides_)
80
+ : TensorAccessorBase<T, N, PtrTraits, index_t>(data_,sizes_,strides_) {}
81
+
82
+ C10_HOST_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
83
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
84
+ }
85
+
86
+ C10_HOST_DEVICE const TensorAccessor<T, N-1, PtrTraits, index_t> operator[](index_t i) const {
87
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1);
88
+ }
89
+ };
90
+
91
+ template<typename T, template <typename U> class PtrTraits, typename index_t>
92
+ class TensorAccessor<T,1,PtrTraits,index_t> : public TensorAccessorBase<T,1,PtrTraits,index_t> {
93
+ public:
94
+ typedef typename PtrTraits<T>::PtrType PtrType;
95
+
96
+ C10_HOST_DEVICE TensorAccessor(
97
+ PtrType data_,
98
+ const index_t* sizes_,
99
+ const index_t* strides_)
100
+ : TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {}
101
+ C10_HOST_DEVICE T & operator[](index_t i) {
102
+ // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
103
+ return this->data_[this->strides_[0]*i];
104
+ }
105
+ C10_HOST_DEVICE const T & operator[](index_t i) const {
106
+ return this->data_[this->strides_[0]*i];
107
+ }
108
+ };
109
+
110
+
111
+ // GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host
112
+ // and as
113
+ // In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host)
114
+ // in order to transfer them on the device when calling kernels.
115
+ // On the device, indexing of multidimensional tensors gives to `TensorAccessor`s.
116
+ // Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__.
117
+ // Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available
118
+ // on the device, so those functions are host only.
119
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
120
+ class GenericPackedTensorAccessorBase {
121
+ public:
122
+ typedef typename PtrTraits<T>::PtrType PtrType;
123
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
124
+ C10_HOST GenericPackedTensorAccessorBase(
125
+ PtrType data_,
126
+ const index_t* sizes_,
127
+ const index_t* strides_)
128
+ : data_(data_) {
129
+ std::copy(sizes_, sizes_ + N, std::begin(this->sizes_));
130
+ std::copy(strides_, strides_ + N, std::begin(this->strides_));
131
+ }
132
+
133
+ // if index_t is not int64_t, we want to have an int64_t constructor
134
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
135
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
136
+ C10_HOST GenericPackedTensorAccessorBase(
137
+ PtrType data_,
138
+ const source_index_t* sizes_,
139
+ const source_index_t* strides_)
140
+ : data_(data_) {
141
+ for (const auto i : c10::irange(N)) {
142
+ this->sizes_[i] = sizes_[i];
143
+ this->strides_[i] = strides_[i];
144
+ }
145
+ }
146
+
147
+ C10_HOST_DEVICE index_t stride(index_t i) const {
148
+ return strides_[i];
149
+ }
150
+ C10_HOST_DEVICE index_t size(index_t i) const {
151
+ return sizes_[i];
152
+ }
153
+ C10_HOST_DEVICE PtrType data() {
154
+ return data_;
155
+ }
156
+ C10_HOST_DEVICE const PtrType data() const {
157
+ return data_;
158
+ }
159
+ protected:
160
+ PtrType data_;
161
+ // NOLINTNEXTLINE(*c-arrays*)
162
+ index_t sizes_[N];
163
+ // NOLINTNEXTLINE(*c-arrays*)
164
+ index_t strides_[N];
165
+ C10_HOST void bounds_check_(index_t i) const {
166
+ TORCH_CHECK_INDEX(
167
+ 0 <= i && i < index_t{N},
168
+ "Index ",
169
+ i,
170
+ " is not within bounds of a tensor of dimension ",
171
+ N);
172
+ }
173
+ };
174
+
175
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
176
+ class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase<T,N,PtrTraits,index_t> {
177
+ public:
178
+ typedef typename PtrTraits<T>::PtrType PtrType;
179
+
180
+ C10_HOST GenericPackedTensorAccessor(
181
+ PtrType data_,
182
+ const index_t* sizes_,
183
+ const index_t* strides_)
184
+ : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
185
+
186
+ // if index_t is not int64_t, we want to have an int64_t constructor
187
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
188
+ C10_HOST GenericPackedTensorAccessor(
189
+ PtrType data_,
190
+ const source_index_t* sizes_,
191
+ const source_index_t* strides_)
192
+ : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {}
193
+
194
+ C10_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) {
195
+ index_t* new_sizes = this->sizes_ + 1;
196
+ index_t* new_strides = this->strides_ + 1;
197
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
198
+ }
199
+
200
+ C10_DEVICE const TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) const {
201
+ const index_t* new_sizes = this->sizes_ + 1;
202
+ const index_t* new_strides = this->strides_ + 1;
203
+ return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides);
204
+ }
205
+
206
+ /// Returns a PackedTensorAccessor of the same dimension after transposing the
207
+ /// two dimensions given. Does not actually move elements; transposition is
208
+ /// made by permuting the size/stride arrays. If the dimensions are not valid,
209
+ /// asserts.
210
+ C10_HOST GenericPackedTensorAccessor<T, N, PtrTraits, index_t> transpose(
211
+ index_t dim1,
212
+ index_t dim2) const {
213
+ this->bounds_check_(dim1);
214
+ this->bounds_check_(dim2);
215
+ GenericPackedTensorAccessor<T, N, PtrTraits, index_t> result(
216
+ this->data_, this->sizes_, this->strides_);
217
+ std::swap(result.strides_[dim1], result.strides_[dim2]);
218
+ std::swap(result.sizes_[dim1], result.sizes_[dim2]);
219
+ return result;
220
+ }
221
+ };
222
+
223
+ template<typename T, template <typename U> class PtrTraits, typename index_t>
224
+ class GenericPackedTensorAccessor<T,1,PtrTraits,index_t> : public GenericPackedTensorAccessorBase<T,1,PtrTraits,index_t> {
225
+ public:
226
+ typedef typename PtrTraits<T>::PtrType PtrType;
227
+ C10_HOST GenericPackedTensorAccessor(
228
+ PtrType data_,
229
+ const index_t* sizes_,
230
+ const index_t* strides_)
231
+ : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
232
+
233
+ // if index_t is not int64_t, we want to have an int64_t constructor
234
+ template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type>
235
+ C10_HOST GenericPackedTensorAccessor(
236
+ PtrType data_,
237
+ const source_index_t* sizes_,
238
+ const source_index_t* strides_)
239
+ : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {}
240
+
241
+ C10_DEVICE T & operator[](index_t i) {
242
+ return this->data_[this->strides_[0] * i];
243
+ }
244
+ C10_DEVICE const T& operator[](index_t i) const {
245
+ return this->data_[this->strides_[0]*i];
246
+ }
247
+
248
+ // Same as in the general N-dimensional case, but note that in the
249
+ // 1-dimensional case the returned PackedTensorAccessor will always be an
250
+ // identical copy of the original
251
+ C10_HOST GenericPackedTensorAccessor<T, 1, PtrTraits, index_t> transpose(
252
+ index_t dim1,
253
+ index_t dim2) const {
254
+ this->bounds_check_(dim1);
255
+ this->bounds_check_(dim2);
256
+ return GenericPackedTensorAccessor<T, 1, PtrTraits, index_t>(
257
+ this->data_, this->sizes_, this->strides_);
258
+ }
259
+ };
260
+
261
+
262
+ // Can't put this directly into the macro function args because of commas
263
+ #define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t>
264
+
265
+ // Old name for `GenericPackedTensorAccessor`
266
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
267
+ C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X)
268
+
269
+ #undef AT_X
270
+
271
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
272
+ using PackedTensorAccessor32 = GenericPackedTensorAccessor<T, N, PtrTraits, int32_t>;
273
+
274
+ template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
275
+ using PackedTensorAccessor64 = GenericPackedTensorAccessor<T, N, PtrTraits, int64_t>;
276
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <c10/util/Half.h>
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/MathConstants.h>
5
+ #include <ATen/NumericUtils.h>
6
+ #include <limits>
7
+ #include <cstdint>
8
+ #include <cassert>
9
+
10
+ namespace at {
11
+
12
+ // Using DistAccumType in accumulate types for distributions.
13
+ // Note: Ideally we'd be using ATen/AccumulateType.h but looks
14
+ // like the there is some inconsistency in how accumulate types
15
+ // are mapped currently, e.g. for the cpu side, float is mapped
16
+ // to double.
17
+ template <typename T>
18
+ struct DistAccumType { };
19
+
20
+ #if defined(__CUDACC__) || defined(__HIPCC__)
21
+ template <> struct DistAccumType<half> { using type = float; };
22
+ #endif
23
+ template <> struct DistAccumType<BFloat16> { using type = float; };
24
+ template <> struct DistAccumType<Half> { using type = float; };
25
+ template <> struct DistAccumType<float> { using type = float; };
26
+ template <> struct DistAccumType<double> { using type = double; };
27
+
28
+ template <typename T>
29
+ using dist_acctype = typename DistAccumType<T>::type;
30
+
31
+ namespace transformation {
32
+
33
+ /**
34
+ * A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified.
35
+ * `range` is `to - from`
36
+ * `base` is `from`
37
+ */
38
+ template <typename T, typename V>
39
+ C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) {
40
+ return static_cast<T>(static_cast<int64_t>((val % range) + base));
41
+ }
42
+
43
+ /**
44
+ * A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None
45
+ */
46
+ template <typename T, typename V>
47
+ C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
48
+ return static_cast<T>(static_cast<int64_t>(val));
49
+ }
50
+
51
+ /**
52
+ * A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`.
53
+ * In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double
54
+ * in this overloaded version
55
+ */
56
+ template <typename T, typename V>
57
+ C10_HOST_DEVICE inline typename std::enable_if<!(std::is_floating_point<T>::value), T>::type uniform_int(V val) {
58
+ if constexpr (std::is_same_v<T, bool>) {
59
+ return static_cast<bool>(val & 1);
60
+ } else if constexpr (std::is_same_v<T, int64_t>) {
61
+ return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
62
+ } else if constexpr (std::is_same_v<T, at::Half> || std::is_same<T, at::BFloat16>::value) {
63
+ return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
64
+ } else if constexpr (std::is_integral_v<T>) {
65
+ return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1));
66
+ } else {
67
+ assert(false);
68
+ return 0;
69
+ }
70
+ }
71
+
72
+ /**
73
+ * An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`,
74
+ * added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version.
75
+ */
76
+ template<typename T, typename V>
77
+ C10_HOST_DEVICE inline typename std::enable_if<std::is_floating_point<T>::value, T>::type uniform_int(V val) {
78
+ return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1));
79
+ }
80
+
81
+ template <typename T, typename V>
82
+ C10_HOST_DEVICE inline dist_acctype<T> uniform_real(V val, T from, T to) {
83
+ constexpr auto MASK = static_cast<V>((static_cast<uint64_t>(1) << std::numeric_limits<T>::digits) - 1);
84
+ constexpr auto DIVISOR = static_cast<dist_acctype<T>>(1) / (static_cast<uint64_t>(1) << std::numeric_limits<T>::digits);
85
+ dist_acctype<T> x = (val & MASK) * DIVISOR;
86
+ return (x * (to - from) + from);
87
+ }
88
+
89
+ /**
90
+ * Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to
91
+ * normally distributed with `mean` and standard deviation `std`.
92
+ */
93
+ template <typename T>
94
+ C10_HOST_DEVICE inline T normal(T val, T mean, T std) {
95
+ return val * std + mean;
96
+ }
97
+
98
+ /**
99
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
100
+ * Cauchy distribution with location parameter `median` and scale parameter `sigma`.
101
+ */
102
+ template <typename T>
103
+ C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) {
104
+ // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
105
+ // __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps),
106
+ // thus we clip those values.
107
+ constexpr T eps = std::numeric_limits<T>::epsilon();
108
+ constexpr T one_minus_eps = 1 - eps;
109
+ constexpr T zero_plus_eps = 0 + eps;
110
+ val = (val > one_minus_eps ? one_minus_eps : val);
111
+ val = (val < zero_plus_eps ? zero_plus_eps : val);
112
+ return median + sigma * at::tan(c10::pi<T> * (val - static_cast<T>(0.5)));
113
+ }
114
+
115
+ template <>
116
+ C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) {
117
+ // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function
118
+ return median + sigma * at::tan(c10::pi<double> * (val - static_cast<double>(0.5)));
119
+ }
120
+
121
+ /**
122
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
123
+ * exponentially distributed with `lambda` parameter of the distribution.
124
+ */
125
+ template <typename T>
126
+ C10_HOST_DEVICE inline T exponential(T val, T lambda) {
127
+ // https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates
128
+ // Different implementations for CUDA and CPU to preserve original logic
129
+ // TODO: must be investigated and unified!!!
130
+ // https://github.com/pytorch/pytorch/issues/38662
131
+ #if defined(__CUDACC__) || defined(__HIPCC__)
132
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
133
+ // curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
134
+ // we need log to be not 0, and not underflow when converted to half
135
+ // fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args
136
+ auto log = val >= static_cast<T>(1.) - std::numeric_limits<T>::epsilon() / 2
137
+ ? -std::numeric_limits<T>::epsilon() / 2
138
+ : at::log(val);
139
+ return static_cast<T>(-1.0) / lambda * log;
140
+ #else
141
+ return static_cast<T>(-1.0) / lambda * at::log1p(-val);
142
+ #endif
143
+ }
144
+
145
+ /**
146
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
147
+ * geometrically distributed with success probability `p`.
148
+ */
149
+ template <typename T>
150
+ C10_HOST_DEVICE inline T geometric(T val, T p) {
151
+ // https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions
152
+ return static_cast<T>(::ceil(at::log(val) / at::log1p(-p)));
153
+ }
154
+
155
+ /**
156
+ * Transforms normally distributed `val` to log-normally distributed.
157
+ */
158
+ template <typename T>
159
+ C10_HOST_DEVICE inline T log_normal(T val) {
160
+ // https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles
161
+ return at::exp(val);
162
+ }
163
+
164
+ /**
165
+ * Transforms uniformly distributed `val` between 0.0 and 1.0 to
166
+ * bernoulli distributed with success probability `p`.
167
+ */
168
+ template <typename T>
169
+ C10_HOST_DEVICE inline T bernoulli(T val, T p) {
170
+ return val < p;
171
+ }
172
+
173
+ }} // namespace at::transformation
venv/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <c10/core/UndefinedTensorImpl.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <tuple>
5
+ #include <type_traits>
6
+ #include <utility>
7
+
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <ATen/core/List.h>
10
+
11
+ namespace at {
12
+
13
+ // This class allows you to write variadic functions which
14
+ // call a (possibly overloaded) function on each argument,
15
+ // in order. This is most commonly used in autogenerated code,
16
+ // where it is convenient to have a function that can uniformly
17
+ // take arguments of different types. If your arguments
18
+ // are homogenous consider using a std::initializer_list instead.
19
+ //
20
+ // For examples of this in use, see torch/csrc/utils/variadic.h
21
+ template <typename F>
22
+ struct IterArgs {
23
+ template <typename... Args>
24
+ inline F& apply() {
25
+ return self();
26
+ }
27
+
28
+ // NB: Use perfect forwarding here, otherwise we'll make value
29
+ // copies of all arguments!
30
+ template <typename T, typename... Args>
31
+ inline F& apply(T&& arg, Args&&... args) {
32
+ self()(std::forward<T>(arg));
33
+ if (self().short_circuit()) {
34
+ return self();
35
+ } else {
36
+ return apply(std::forward<Args>(args)...);
37
+ }
38
+ }
39
+
40
+ // Here are some handy overloads which provide sensible
41
+ // defaults for container-like structures that one might
42
+ // be interested in recursing into. You can enable them
43
+ // by adding:
44
+ //
45
+ // using IterArgs<YourStructName>::operator()
46
+ //
47
+ // to your struct. These are not enabled by default because
48
+ // you may be able to process these structures more efficiently
49
+ // than handling them one-by-one.
50
+
51
+ template <typename T>
52
+ void operator()(c10::IListRef<T> args) {
53
+ for (const auto& arg : args) {
54
+ self()(arg);
55
+ if (self().short_circuit())
56
+ return;
57
+ }
58
+ }
59
+
60
+ template <typename T>
61
+ void operator()(at::ArrayRef<T> args) {
62
+ for (const auto& arg : args) {
63
+ self()(arg);
64
+ if (self().short_circuit())
65
+ return;
66
+ }
67
+ }
68
+
69
+ template <typename T>
70
+ void operator()(const torch::List<T>& args) {
71
+ for (const auto& arg : args) {
72
+ self()(arg);
73
+ if (self().short_circuit())
74
+ return;
75
+ }
76
+ }
77
+
78
+ // NB: we need to specify std::vector manually as C++ won't
79
+ // do an implicit conversion to make a template deduction go through.
80
+ template <typename T>
81
+ void operator()(const std::vector<T>& args) {
82
+ self()(at::ArrayRef<T>{args});
83
+ }
84
+
85
+ constexpr bool short_circuit() const {
86
+ return false;
87
+ }
88
+
89
+ private:
90
+ inline F& self() {
91
+ return *static_cast<F*>(this);
92
+ }
93
+ };
94
+
95
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstring>
3
+ #include <map>
4
+ #include <memory>
5
+ #include <ostream>
6
+ #include <sstream>
7
+ #include <unordered_map>
8
+
9
+ #include <c10/core/impl/LocalDispatchKeySet.h>
10
+
11
+ namespace at {
12
+ namespace vitals {
13
+
14
+ TORCH_API bool torchVitalEnabled();
15
+
16
+ struct TORCH_API TorchVitalAttr {
17
+ // always initialized to empty
18
+ std::string value = "";
19
+ template <typename T>
20
+ TorchVitalAttr& operator<<(const T& t) {
21
+ if (torchVitalEnabled()) {
22
+ std::stringstream ss;
23
+ ss << t;
24
+ value += ss.str();
25
+ }
26
+ return *this;
27
+ }
28
+
29
+ template <typename T>
30
+ void write(const T& t, bool force) {
31
+ if (force || torchVitalEnabled()) {
32
+ std::stringstream ss;
33
+ ss << t;
34
+ value = ss.str();
35
+ }
36
+ }
37
+ };
38
+
39
+ struct TORCH_API TorchVital {
40
+ std::string name;
41
+ std::unordered_map<std::string, TorchVitalAttr> attrs;
42
+
43
+ explicit TorchVital(std::string n) : name(std::move(n)) {}
44
+ TorchVital(const TorchVital&) = default;
45
+ TorchVital(TorchVital&&) = default;
46
+ TorchVital() = delete;
47
+
48
+ TorchVitalAttr& create(const std::string& attr);
49
+ TorchVitalAttr& create(const std::string& attr, bool force);
50
+ friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt);
51
+
52
+ ~TorchVital();
53
+ };
54
+
55
+ std::ostream& operator<<(std::ostream& os, TorchVital const& tv);
56
+
57
+ // A way to access vitals by string names instead of by global reference.
58
+ // This enables access to vitals from the PythonAPI.
59
+ class TORCH_API APIVitals {
60
+ public:
61
+ bool vitals_enabled;
62
+
63
+ // Set any vital sign that was added to the map.
64
+ bool setVital(
65
+ const std::string& vital_name,
66
+ const std::string& attr_name,
67
+ const std::string& value,
68
+ bool force = false);
69
+ std::string readVitals();
70
+
71
+ APIVitals();
72
+
73
+ // Ensure this stays a singleton
74
+ APIVitals(APIVitals const& other) = delete;
75
+ APIVitals(APIVitals&& other) = delete;
76
+ APIVitals& operator=(const APIVitals&) = delete;
77
+ APIVitals& operator=(APIVitals&&) = delete;
78
+
79
+ private:
80
+ std::unordered_map<std::string, TorchVital> name_map_;
81
+ };
82
+
83
+ extern TORCH_API APIVitals VitalsAPI;
84
+
85
+ } // namespace vitals
86
+ } // namespace at
87
+
88
+ #define TORCH_VITAL_DECLARE(name) \
89
+ TORCH_API at::vitals::TorchVital TorchVital_##name;
90
+
91
+ #define TORCH_VITAL_DEFINE(name) \
92
+ TORCH_API at::vitals::TorchVital TorchVital_##name(#name);
93
+
94
+ #define TORCH_VITAL_BASE(name) TorchVital_##name
95
+
96
+ #define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr)
venv/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <unordered_set>
3
+ #include <vector>
4
+ #include <ATen/core/symbol.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/hash.h>
7
+
8
+ namespace c10 {
9
+ /**
10
+ * class AliasInfo
11
+ *
12
+ * Data structure to hold aliasing information for an `Argument`. They can be
13
+ * nested to represent aliasing information on contained types.
14
+ *
15
+ * There is a `beforeSet` which describes the aliasing information before the
16
+ * operator executes, and an `afterSet` that describes aliasing info
17
+ * after execution.
18
+ */
19
+ class AliasInfo {
20
+ public:
21
+ // Symbol for the set that can alias anything
22
+ static Symbol wildcardSet() {
23
+ static const Symbol wc = Symbol::fromQualString("alias::*");
24
+ return wc;
25
+ }
26
+
27
+ void setIsWrite(bool isWrite) {
28
+ isWrite_ = isWrite;
29
+ }
30
+
31
+ bool isWrite() const {
32
+ return isWrite_;
33
+ }
34
+
35
+ void addBeforeSet(Symbol aliasSet) {
36
+ beforeSets_.insert(aliasSet);
37
+ }
38
+
39
+ void addAfterSet(Symbol aliasSet) {
40
+ afterSets_.insert(aliasSet);
41
+ }
42
+
43
+ const std::unordered_set<Symbol>& beforeSets() const {
44
+ return beforeSets_;
45
+ }
46
+
47
+ const std::unordered_set<Symbol>& afterSets() const {
48
+ return afterSets_;
49
+ }
50
+
51
+ Symbol beforeSet() const {
52
+ AT_ASSERT(beforeSets_.size() == 1);
53
+ return *beforeSets_.begin();
54
+ }
55
+
56
+ bool isWildcardBefore() const {
57
+ return beforeSets_.count(wildcardSet()) != 0;
58
+ }
59
+
60
+ bool isWildcardAfter() const {
61
+ return afterSets_.count(wildcardSet()) != 0;
62
+ }
63
+
64
+ // the alias info for the contained types of the type
65
+ // e.g. if this is an annotation on List[T], `sets` refers to
66
+ // the alias sets that the list may be in
67
+ // while containedTypes()[0] refers to the sets that members of the list
68
+ // may be in
69
+ void addContainedType(AliasInfo aliasInfo) {
70
+ containedTypes_.push_back(std::move(aliasInfo));
71
+ }
72
+ const std::vector<AliasInfo>& containedTypes() const {
73
+ return containedTypes_;
74
+ }
75
+
76
+ private:
77
+ std::unordered_set<Symbol> beforeSets_;
78
+ std::unordered_set<Symbol> afterSets_;
79
+ std::vector<AliasInfo> containedTypes_;
80
+ bool isWrite_ = false;
81
+ };
82
+
83
+ inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) {
84
+ return lhs.isWrite() == rhs.isWrite()
85
+ && lhs.beforeSets() == rhs.beforeSets()
86
+ && lhs.afterSets() == rhs.afterSets()
87
+ && lhs.containedTypes() == rhs.containedTypes();
88
+ }
89
+
90
+ // this does match the way things are represented in the schema
91
+ inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) {
92
+ out << "(";
93
+ bool first = true;
94
+ for (const auto& set : aliasInfo.beforeSets()) {
95
+ if (first) {
96
+ first = false;
97
+ } else {
98
+ out << "|";
99
+ }
100
+ out << set.toUnqualString();
101
+ }
102
+ if (aliasInfo.isWrite()) {
103
+ out << "!";
104
+ }
105
+ if (aliasInfo.beforeSets() != aliasInfo.afterSets()) {
106
+ out << " -> ";
107
+ first = true;
108
+ for (const auto& set : aliasInfo.afterSets()) {
109
+ if (first) {
110
+ first = false;
111
+ } else {
112
+ out << "|";
113
+ }
114
+ out << set.toUnqualString();
115
+ }
116
+ }
117
+ out << ")";
118
+ return out;
119
+ }
120
+ } // namespace c10
121
+
122
+ namespace std {
123
+ template <>
124
+ struct hash<c10::AliasInfo> {
125
+ size_t operator()(const c10::AliasInfo& aliasInfo) const {
126
+ auto hash = std::hash<bool>()(aliasInfo.isWrite());
127
+
128
+ // NOTE: for unordered_set hashes, we couldn't use hash_combine
129
+ // because hash_combine is order dependent. Instead, we choose to
130
+ // use XOR as the combining function as XOR is commutative.
131
+ size_t before_set_hash_seed = 0;
132
+ for (auto &e: aliasInfo.beforeSets()) {
133
+ auto symbol_hash = std::hash<c10::Symbol>()(e);
134
+ before_set_hash_seed = before_set_hash_seed ^ symbol_hash;
135
+ }
136
+ size_t after_set_hash_seed = 0;
137
+ for (auto &e: aliasInfo.afterSets()) {
138
+ auto symbol_hash = std::hash<c10::Symbol>()(e);
139
+ after_set_hash_seed = after_set_hash_seed ^ symbol_hash;
140
+ }
141
+
142
+ hash = c10::hash_combine(hash, before_set_hash_seed);
143
+ hash = c10::hash_combine(hash, after_set_hash_seed);
144
+ for (auto &e: aliasInfo.containedTypes()) {
145
+ auto contained_type_hash = std::hash<c10::AliasInfo>()(e);
146
+ hash = c10::hash_combine(hash, contained_type_hash);
147
+ }
148
+ return hash;
149
+ }
150
+ };
151
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h ADDED
@@ -0,0 +1,2213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from aten_interned_strings.h
4
+
5
+ #if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if including <ATen/core/symbol.h> for \
9
+ the c10::Symbol class would be sufficient, or if your change would be \
10
+ better placed in another file.
11
+ #endif
12
+
13
+ // ATen symbols correspond exactly to operators defined in ATen. Every
14
+ // symbol here corresponds exactly to an ATen operation defined in
15
+ // native_functions.yaml; attributes are in one-to-one correspondence
16
+ // with their ATen name.
17
+
18
+ #define FORALL_ATEN_BASE_SYMBOLS(_) \
19
+ _(aten, __and__) \
20
+ _(aten, __iand__) \
21
+ _(aten, __ilshift__) \
22
+ _(aten, __ior__) \
23
+ _(aten, __irshift__) \
24
+ _(aten, __ixor__) \
25
+ _(aten, __lshift__) \
26
+ _(aten, __or__) \
27
+ _(aten, __rshift__) \
28
+ _(aten, __xor__) \
29
+ _(aten, _adaptive_avg_pool2d) \
30
+ _(aten, _adaptive_avg_pool2d_backward) \
31
+ _(aten, _adaptive_avg_pool3d) \
32
+ _(aten, _adaptive_avg_pool3d_backward) \
33
+ _(aten, _add_batch_dim) \
34
+ _(aten, _add_relu) \
35
+ _(aten, _add_relu_) \
36
+ _(aten, _addmm_activation) \
37
+ _(aten, _aminmax) \
38
+ _(aten, _amp_foreach_non_finite_check_and_unscale) \
39
+ _(aten, _amp_foreach_non_finite_check_and_unscale_) \
40
+ _(aten, _amp_update_scale) \
41
+ _(aten, _amp_update_scale_) \
42
+ _(aten, _assert_async) \
43
+ _(aten, _assert_scalar) \
44
+ _(aten, _assert_tensor_metadata) \
45
+ _(aten, _autocast_to_full_precision) \
46
+ _(aten, _autocast_to_reduced_precision) \
47
+ _(aten, _backward) \
48
+ _(aten, _batch_norm_impl_index) \
49
+ _(aten, _batch_norm_impl_index_backward) \
50
+ _(aten, _cast_Byte) \
51
+ _(aten, _cast_Char) \
52
+ _(aten, _cast_Double) \
53
+ _(aten, _cast_Float) \
54
+ _(aten, _cast_Half) \
55
+ _(aten, _cast_Int) \
56
+ _(aten, _cast_Long) \
57
+ _(aten, _cast_Short) \
58
+ _(aten, _cdist_backward) \
59
+ _(aten, _cdist_forward) \
60
+ _(aten, _cholesky_solve_helper) \
61
+ _(aten, _choose_qparams_per_tensor) \
62
+ _(aten, _chunk_cat) \
63
+ _(aten, _coalesce) \
64
+ _(aten, _coalesced) \
65
+ _(aten, _coalesced_) \
66
+ _(aten, _compute_linear_combination) \
67
+ _(aten, _conj) \
68
+ _(aten, _conj_copy) \
69
+ _(aten, _conj_physical) \
70
+ _(aten, _conv_depthwise2d) \
71
+ _(aten, _convert_indices_from_coo_to_csr) \
72
+ _(aten, _convert_indices_from_csr_to_coo) \
73
+ _(aten, _convert_weight_to_int4pack) \
74
+ _(aten, _convolution) \
75
+ _(aten, _convolution_double_backward) \
76
+ _(aten, _convolution_mode) \
77
+ _(aten, _copy_from) \
78
+ _(aten, _copy_from_and_resize) \
79
+ _(aten, _cslt_compress) \
80
+ _(aten, _cslt_sparse_mm) \
81
+ _(aten, _cslt_sparse_mm_search) \
82
+ _(aten, _ctc_loss) \
83
+ _(aten, _ctc_loss_backward) \
84
+ _(aten, _cudnn_ctc_loss) \
85
+ _(aten, _cudnn_init_dropout_state) \
86
+ _(aten, _cudnn_rnn) \
87
+ _(aten, _cudnn_rnn_backward) \
88
+ _(aten, _cudnn_rnn_flatten_weight) \
89
+ _(aten, _cufft_clear_plan_cache) \
90
+ _(aten, _cufft_get_plan_cache_max_size) \
91
+ _(aten, _cufft_get_plan_cache_size) \
92
+ _(aten, _cufft_set_plan_cache_max_size) \
93
+ _(aten, _cummax_helper) \
94
+ _(aten, _cummin_helper) \
95
+ _(aten, _debug_has_internal_overlap) \
96
+ _(aten, _dimI) \
97
+ _(aten, _dimV) \
98
+ _(aten, _dim_arange) \
99
+ _(aten, _dirichlet_grad) \
100
+ _(aten, _efficient_attention_backward) \
101
+ _(aten, _efficient_attention_forward) \
102
+ _(aten, _efficientzerotensor) \
103
+ _(aten, _embedding_bag) \
104
+ _(aten, _embedding_bag_backward) \
105
+ _(aten, _embedding_bag_dense_backward) \
106
+ _(aten, _embedding_bag_forward_only) \
107
+ _(aten, _embedding_bag_per_sample_weights_backward) \
108
+ _(aten, _embedding_bag_sparse_backward) \
109
+ _(aten, _empty_affine_quantized) \
110
+ _(aten, _empty_per_channel_affine_quantized) \
111
+ _(aten, _euclidean_dist) \
112
+ _(aten, _fake_quantize_learnable_per_channel_affine) \
113
+ _(aten, _fake_quantize_learnable_per_channel_affine_backward) \
114
+ _(aten, _fake_quantize_learnable_per_tensor_affine) \
115
+ _(aten, _fake_quantize_learnable_per_tensor_affine_backward) \
116
+ _(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \
117
+ _(aten, _fft_c2c) \
118
+ _(aten, _fft_c2r) \
119
+ _(aten, _fft_r2c) \
120
+ _(aten, _fill_mem_eff_dropout_mask) \
121
+ _(aten, _fill_mem_eff_dropout_mask_) \
122
+ _(aten, _flash_attention_backward) \
123
+ _(aten, _flash_attention_forward) \
124
+ _(aten, _foobar) \
125
+ _(aten, _foreach_abs) \
126
+ _(aten, _foreach_abs_) \
127
+ _(aten, _foreach_acos) \
128
+ _(aten, _foreach_acos_) \
129
+ _(aten, _foreach_add) \
130
+ _(aten, _foreach_add_) \
131
+ _(aten, _foreach_addcdiv) \
132
+ _(aten, _foreach_addcdiv_) \
133
+ _(aten, _foreach_addcmul) \
134
+ _(aten, _foreach_addcmul_) \
135
+ _(aten, _foreach_asin) \
136
+ _(aten, _foreach_asin_) \
137
+ _(aten, _foreach_atan) \
138
+ _(aten, _foreach_atan_) \
139
+ _(aten, _foreach_ceil) \
140
+ _(aten, _foreach_ceil_) \
141
+ _(aten, _foreach_clamp_max) \
142
+ _(aten, _foreach_clamp_max_) \
143
+ _(aten, _foreach_clamp_min) \
144
+ _(aten, _foreach_clamp_min_) \
145
+ _(aten, _foreach_copy) \
146
+ _(aten, _foreach_copy_) \
147
+ _(aten, _foreach_cos) \
148
+ _(aten, _foreach_cos_) \
149
+ _(aten, _foreach_cosh) \
150
+ _(aten, _foreach_cosh_) \
151
+ _(aten, _foreach_div) \
152
+ _(aten, _foreach_div_) \
153
+ _(aten, _foreach_erf) \
154
+ _(aten, _foreach_erf_) \
155
+ _(aten, _foreach_erfc) \
156
+ _(aten, _foreach_erfc_) \
157
+ _(aten, _foreach_exp) \
158
+ _(aten, _foreach_exp_) \
159
+ _(aten, _foreach_expm1) \
160
+ _(aten, _foreach_expm1_) \
161
+ _(aten, _foreach_floor) \
162
+ _(aten, _foreach_floor_) \
163
+ _(aten, _foreach_frac) \
164
+ _(aten, _foreach_frac_) \
165
+ _(aten, _foreach_lerp) \
166
+ _(aten, _foreach_lerp_) \
167
+ _(aten, _foreach_lgamma) \
168
+ _(aten, _foreach_lgamma_) \
169
+ _(aten, _foreach_log) \
170
+ _(aten, _foreach_log10) \
171
+ _(aten, _foreach_log10_) \
172
+ _(aten, _foreach_log1p) \
173
+ _(aten, _foreach_log1p_) \
174
+ _(aten, _foreach_log2) \
175
+ _(aten, _foreach_log2_) \
176
+ _(aten, _foreach_log_) \
177
+ _(aten, _foreach_maximum) \
178
+ _(aten, _foreach_maximum_) \
179
+ _(aten, _foreach_minimum) \
180
+ _(aten, _foreach_minimum_) \
181
+ _(aten, _foreach_mul) \
182
+ _(aten, _foreach_mul_) \
183
+ _(aten, _foreach_neg) \
184
+ _(aten, _foreach_neg_) \
185
+ _(aten, _foreach_norm) \
186
+ _(aten, _foreach_pow) \
187
+ _(aten, _foreach_pow_) \
188
+ _(aten, _foreach_reciprocal) \
189
+ _(aten, _foreach_reciprocal_) \
190
+ _(aten, _foreach_round) \
191
+ _(aten, _foreach_round_) \
192
+ _(aten, _foreach_sigmoid) \
193
+ _(aten, _foreach_sigmoid_) \
194
+ _(aten, _foreach_sign) \
195
+ _(aten, _foreach_sign_) \
196
+ _(aten, _foreach_sin) \
197
+ _(aten, _foreach_sin_) \
198
+ _(aten, _foreach_sinh) \
199
+ _(aten, _foreach_sinh_) \
200
+ _(aten, _foreach_sqrt) \
201
+ _(aten, _foreach_sqrt_) \
202
+ _(aten, _foreach_sub) \
203
+ _(aten, _foreach_sub_) \
204
+ _(aten, _foreach_tan) \
205
+ _(aten, _foreach_tan_) \
206
+ _(aten, _foreach_tanh) \
207
+ _(aten, _foreach_tanh_) \
208
+ _(aten, _foreach_trunc) \
209
+ _(aten, _foreach_trunc_) \
210
+ _(aten, _foreach_zero) \
211
+ _(aten, _foreach_zero_) \
212
+ _(aten, _functional_assert_async) \
213
+ _(aten, _functional_assert_scalar) \
214
+ _(aten, _functional_sym_constrain_range) \
215
+ _(aten, _functional_sym_constrain_range_for_size) \
216
+ _(aten, _fused_adam) \
217
+ _(aten, _fused_adam_) \
218
+ _(aten, _fused_adamw) \
219
+ _(aten, _fused_adamw_) \
220
+ _(aten, _fused_dropout) \
221
+ _(aten, _fused_moving_avg_obs_fq_helper) \
222
+ _(aten, _fused_moving_avg_obs_fq_helper_functional) \
223
+ _(aten, _fused_sdp_choice) \
224
+ _(aten, _fused_sgd) \
225
+ _(aten, _fused_sgd_) \
226
+ _(aten, _fw_primal) \
227
+ _(aten, _fw_primal_copy) \
228
+ _(aten, _gather_sparse_backward) \
229
+ _(aten, _grid_sampler_2d_cpu_fallback) \
230
+ _(aten, _grid_sampler_2d_cpu_fallback_backward) \
231
+ _(aten, _has_compatible_shallow_copy_type) \
232
+ _(aten, _has_same_storage_numel) \
233
+ _(aten, _histogramdd_bin_edges) \
234
+ _(aten, _histogramdd_from_bin_cts) \
235
+ _(aten, _histogramdd_from_bin_tensors) \
236
+ _(aten, _index_put_impl) \
237
+ _(aten, _index_put_impl_) \
238
+ _(aten, _indices) \
239
+ _(aten, _indices_copy) \
240
+ _(aten, _int_mm) \
241
+ _(aten, _is_all_true) \
242
+ _(aten, _is_any_true) \
243
+ _(aten, _is_zerotensor) \
244
+ _(aten, _lazy_clone) \
245
+ _(aten, _linalg_check_errors) \
246
+ _(aten, _linalg_det) \
247
+ _(aten, _linalg_eigh) \
248
+ _(aten, _linalg_eigvals) \
249
+ _(aten, _linalg_slogdet) \
250
+ _(aten, _linalg_solve_ex) \
251
+ _(aten, _linalg_svd) \
252
+ _(aten, _local_scalar_dense) \
253
+ _(aten, _log_softmax) \
254
+ _(aten, _log_softmax_backward_data) \
255
+ _(aten, _logcumsumexp) \
256
+ _(aten, _lstm_mps) \
257
+ _(aten, _lu_with_info) \
258
+ _(aten, _make_dep_token) \
259
+ _(aten, _make_dual) \
260
+ _(aten, _make_dual_copy) \
261
+ _(aten, _make_per_channel_quantized_tensor) \
262
+ _(aten, _make_per_tensor_quantized_tensor) \
263
+ _(aten, _masked_scale) \
264
+ _(aten, _masked_softmax) \
265
+ _(aten, _masked_softmax_backward) \
266
+ _(aten, _mixed_dtypes_linear) \
267
+ _(aten, _mkldnn_reshape) \
268
+ _(aten, _mkldnn_transpose) \
269
+ _(aten, _mkldnn_transpose_) \
270
+ _(aten, _mps_convolution) \
271
+ _(aten, _mps_convolution_transpose) \
272
+ _(aten, _native_batch_norm_legit) \
273
+ _(aten, _native_batch_norm_legit_functional) \
274
+ _(aten, _native_batch_norm_legit_no_training) \
275
+ _(aten, _native_multi_head_attention) \
276
+ _(aten, _neg_view) \
277
+ _(aten, _neg_view_copy) \
278
+ _(aten, _nested_from_padded) \
279
+ _(aten, _nested_from_padded_and_nested_example) \
280
+ _(aten, _nested_get_jagged_dummy) \
281
+ _(aten, _nested_get_lengths) \
282
+ _(aten, _nested_get_offsets) \
283
+ _(aten, _nested_get_ragged_idx) \
284
+ _(aten, _nested_get_values) \
285
+ _(aten, _nested_get_values_copy) \
286
+ _(aten, _nested_select_backward) \
287
+ _(aten, _nested_sum_backward) \
288
+ _(aten, _nested_tensor_from_mask) \
289
+ _(aten, _nested_tensor_from_mask_left_aligned) \
290
+ _(aten, _nested_tensor_from_tensor_list) \
291
+ _(aten, _nested_tensor_size) \
292
+ _(aten, _nested_tensor_softmax_with_shape) \
293
+ _(aten, _nested_tensor_storage_offsets) \
294
+ _(aten, _nested_tensor_strides) \
295
+ _(aten, _nested_view_from_buffer) \
296
+ _(aten, _nested_view_from_buffer_copy) \
297
+ _(aten, _nested_view_from_jagged) \
298
+ _(aten, _nested_view_from_jagged_copy) \
299
+ _(aten, _new_zeros_with_same_feature_meta) \
300
+ _(aten, _nnpack_available) \
301
+ _(aten, _nnpack_spatial_convolution) \
302
+ _(aten, _nnz) \
303
+ _(aten, _pack_padded_sequence) \
304
+ _(aten, _pack_padded_sequence_backward) \
305
+ _(aten, _pad_circular) \
306
+ _(aten, _pad_enum) \
307
+ _(aten, _pad_packed_sequence) \
308
+ _(aten, _pdist_backward) \
309
+ _(aten, _pdist_forward) \
310
+ _(aten, _pin_memory) \
311
+ _(aten, _prelu_kernel) \
312
+ _(aten, _prelu_kernel_backward) \
313
+ _(aten, _print) \
314
+ _(aten, _propagate_xla_data) \
315
+ _(aten, _remove_batch_dim) \
316
+ _(aten, _reshape_alias) \
317
+ _(aten, _reshape_alias_copy) \
318
+ _(aten, _reshape_copy) \
319
+ _(aten, _reshape_from_tensor) \
320
+ _(aten, _resize_output) \
321
+ _(aten, _resize_output_) \
322
+ _(aten, _rowwise_prune) \
323
+ _(aten, _sample_dirichlet) \
324
+ _(aten, _saturate_weight_to_fp16) \
325
+ _(aten, _scaled_dot_product_attention_math) \
326
+ _(aten, _scaled_dot_product_cudnn_attention) \
327
+ _(aten, _scaled_dot_product_efficient_attention) \
328
+ _(aten, _scaled_dot_product_efficient_attention_backward) \
329
+ _(aten, _scaled_dot_product_flash_attention) \
330
+ _(aten, _scaled_dot_product_flash_attention_backward) \
331
+ _(aten, _scaled_dot_product_flash_attention_for_cpu) \
332
+ _(aten, _scaled_dot_product_flash_attention_for_cpu_backward) \
333
+ _(aten, _scaled_mm) \
334
+ _(aten, _segment_reduce_backward) \
335
+ _(aten, _shape_as_tensor) \
336
+ _(aten, _slow_conv2d_backward) \
337
+ _(aten, _slow_conv2d_forward) \
338
+ _(aten, _sobol_engine_draw) \
339
+ _(aten, _sobol_engine_ff) \
340
+ _(aten, _sobol_engine_ff_) \
341
+ _(aten, _sobol_engine_initialize_state) \
342
+ _(aten, _sobol_engine_initialize_state_) \
343
+ _(aten, _sobol_engine_scramble) \
344
+ _(aten, _sobol_engine_scramble_) \
345
+ _(aten, _softmax) \
346
+ _(aten, _softmax_backward_data) \
347
+ _(aten, _sparse_addmm) \
348
+ _(aten, _sparse_broadcast_to) \
349
+ _(aten, _sparse_broadcast_to_copy) \
350
+ _(aten, _sparse_bsc_tensor_unsafe) \
351
+ _(aten, _sparse_bsr_tensor_unsafe) \
352
+ _(aten, _sparse_compressed_tensor_unsafe) \
353
+ _(aten, _sparse_coo_tensor_unsafe) \
354
+ _(aten, _sparse_coo_tensor_with_dims) \
355
+ _(aten, _sparse_coo_tensor_with_dims_and_tensors) \
356
+ _(aten, _sparse_csc_tensor_unsafe) \
357
+ _(aten, _sparse_csr_prod) \
358
+ _(aten, _sparse_csr_sum) \
359
+ _(aten, _sparse_csr_tensor_unsafe) \
360
+ _(aten, _sparse_log_softmax) \
361
+ _(aten, _sparse_log_softmax_backward_data) \
362
+ _(aten, _sparse_mask_projection) \
363
+ _(aten, _sparse_mm) \
364
+ _(aten, _sparse_mm_reduce_impl) \
365
+ _(aten, _sparse_mm_reduce_impl_backward) \
366
+ _(aten, _sparse_semi_structured_linear) \
367
+ _(aten, _sparse_softmax) \
368
+ _(aten, _sparse_softmax_backward_data) \
369
+ _(aten, _sparse_sparse_matmul) \
370
+ _(aten, _sparse_sum) \
371
+ _(aten, _sparse_sum_backward) \
372
+ _(aten, _spdiags) \
373
+ _(aten, _stack) \
374
+ _(aten, _standard_gamma) \
375
+ _(aten, _standard_gamma_grad) \
376
+ _(aten, _test_ambiguous_defaults) \
377
+ _(aten, _test_autograd_multiple_dispatch) \
378
+ _(aten, _test_autograd_multiple_dispatch_view) \
379
+ _(aten, _test_autograd_multiple_dispatch_view_copy) \
380
+ _(aten, _test_check_tensor) \
381
+ _(aten, _test_functorch_fallback) \
382
+ _(aten, _test_optional_filled_intlist) \
383
+ _(aten, _test_optional_floatlist) \
384
+ _(aten, _test_optional_intlist) \
385
+ _(aten, _test_parallel_materialize) \
386
+ _(aten, _test_serialization_subcmul) \
387
+ _(aten, _test_string_default) \
388
+ _(aten, _test_warn_in_autograd) \
389
+ _(aten, _thnn_differentiable_gru_cell_backward) \
390
+ _(aten, _thnn_differentiable_lstm_cell_backward) \
391
+ _(aten, _thnn_fused_gru_cell) \
392
+ _(aten, _thnn_fused_gru_cell_backward) \
393
+ _(aten, _thnn_fused_lstm_cell) \
394
+ _(aten, _thnn_fused_lstm_cell_backward) \
395
+ _(aten, _thnn_fused_lstm_cell_backward_impl) \
396
+ _(aten, _to_copy) \
397
+ _(aten, _to_cpu) \
398
+ _(aten, _to_dense) \
399
+ _(aten, _to_sparse) \
400
+ _(aten, _to_sparse_bsc) \
401
+ _(aten, _to_sparse_bsr) \
402
+ _(aten, _to_sparse_csc) \
403
+ _(aten, _to_sparse_csr) \
404
+ _(aten, _to_sparse_semi_structured) \
405
+ _(aten, _transform_bias_rescale_qkv) \
406
+ _(aten, _transformer_encoder_layer_fwd) \
407
+ _(aten, _trilinear) \
408
+ _(aten, _triton_multi_head_attention) \
409
+ _(aten, _triton_scaled_dot_attention) \
410
+ _(aten, _unique) \
411
+ _(aten, _unique2) \
412
+ _(aten, _unpack_dual) \
413
+ _(aten, _unsafe_index) \
414
+ _(aten, _unsafe_index_put) \
415
+ _(aten, _unsafe_view) \
416
+ _(aten, _upsample_bicubic2d_aa) \
417
+ _(aten, _upsample_bicubic2d_aa_backward) \
418
+ _(aten, _upsample_bilinear2d_aa) \
419
+ _(aten, _upsample_bilinear2d_aa_backward) \
420
+ _(aten, _upsample_nearest_exact1d) \
421
+ _(aten, _upsample_nearest_exact1d_backward) \
422
+ _(aten, _upsample_nearest_exact2d) \
423
+ _(aten, _upsample_nearest_exact2d_backward) \
424
+ _(aten, _upsample_nearest_exact3d) \
425
+ _(aten, _upsample_nearest_exact3d_backward) \
426
+ _(aten, _use_cudnn_ctc_loss) \
427
+ _(aten, _use_cudnn_rnn_flatten_weight) \
428
+ _(aten, _validate_compressed_sparse_indices) \
429
+ _(aten, _validate_sparse_bsc_tensor_args) \
430
+ _(aten, _validate_sparse_bsr_tensor_args) \
431
+ _(aten, _validate_sparse_compressed_tensor_args) \
432
+ _(aten, _validate_sparse_coo_tensor_args) \
433
+ _(aten, _validate_sparse_csc_tensor_args) \
434
+ _(aten, _validate_sparse_csr_tensor_args) \
435
+ _(aten, _values) \
436
+ _(aten, _values_copy) \
437
+ _(aten, _version) \
438
+ _(aten, _weight_int4pack_mm) \
439
+ _(aten, _weight_int8pack_mm) \
440
+ _(aten, _weight_norm) \
441
+ _(aten, _weight_norm_differentiable_backward) \
442
+ _(aten, _weight_norm_interface) \
443
+ _(aten, _weight_norm_interface_backward) \
444
+ _(aten, abs) \
445
+ _(aten, abs_) \
446
+ _(aten, absolute) \
447
+ _(aten, absolute_) \
448
+ _(aten, acos) \
449
+ _(aten, acos_) \
450
+ _(aten, acosh) \
451
+ _(aten, acosh_) \
452
+ _(aten, adaptive_avg_pool1d) \
453
+ _(aten, adaptive_avg_pool2d) \
454
+ _(aten, adaptive_avg_pool3d) \
455
+ _(aten, adaptive_avg_pool3d_backward) \
456
+ _(aten, adaptive_max_pool1d) \
457
+ _(aten, adaptive_max_pool2d) \
458
+ _(aten, adaptive_max_pool2d_backward) \
459
+ _(aten, adaptive_max_pool3d) \
460
+ _(aten, adaptive_max_pool3d_backward) \
461
+ _(aten, add) \
462
+ _(aten, add_) \
463
+ _(aten, addbmm) \
464
+ _(aten, addbmm_) \
465
+ _(aten, addcdiv) \
466
+ _(aten, addcdiv_) \
467
+ _(aten, addcmul) \
468
+ _(aten, addcmul_) \
469
+ _(aten, addmm) \
470
+ _(aten, addmm_) \
471
+ _(aten, addmv) \
472
+ _(aten, addmv_) \
473
+ _(aten, addr) \
474
+ _(aten, addr_) \
475
+ _(aten, adjoint) \
476
+ _(aten, affine_grid_generator) \
477
+ _(aten, affine_grid_generator_backward) \
478
+ _(aten, alias) \
479
+ _(aten, alias_copy) \
480
+ _(aten, align_as) \
481
+ _(aten, align_tensors) \
482
+ _(aten, align_to) \
483
+ _(aten, all) \
484
+ _(aten, allclose) \
485
+ _(aten, alpha_dropout) \
486
+ _(aten, alpha_dropout_) \
487
+ _(aten, amax) \
488
+ _(aten, amin) \
489
+ _(aten, aminmax) \
490
+ _(aten, angle) \
491
+ _(aten, any) \
492
+ _(aten, arange) \
493
+ _(aten, arccos) \
494
+ _(aten, arccos_) \
495
+ _(aten, arccosh) \
496
+ _(aten, arccosh_) \
497
+ _(aten, arcsin) \
498
+ _(aten, arcsin_) \
499
+ _(aten, arcsinh) \
500
+ _(aten, arcsinh_) \
501
+ _(aten, arctan) \
502
+ _(aten, arctan2) \
503
+ _(aten, arctan2_) \
504
+ _(aten, arctan_) \
505
+ _(aten, arctanh) \
506
+ _(aten, arctanh_) \
507
+ _(aten, argmax) \
508
+ _(aten, argmin) \
509
+ _(aten, argsort) \
510
+ _(aten, argwhere) \
511
+ _(aten, as_strided) \
512
+ _(aten, as_strided_) \
513
+ _(aten, as_strided_copy) \
514
+ _(aten, as_strided_scatter) \
515
+ _(aten, asin) \
516
+ _(aten, asin_) \
517
+ _(aten, asinh) \
518
+ _(aten, asinh_) \
519
+ _(aten, atan) \
520
+ _(aten, atan2) \
521
+ _(aten, atan2_) \
522
+ _(aten, atan_) \
523
+ _(aten, atanh) \
524
+ _(aten, atanh_) \
525
+ _(aten, atleast_1d) \
526
+ _(aten, atleast_2d) \
527
+ _(aten, atleast_3d) \
528
+ _(aten, avg_pool1d) \
529
+ _(aten, avg_pool2d) \
530
+ _(aten, avg_pool2d_backward) \
531
+ _(aten, avg_pool3d) \
532
+ _(aten, avg_pool3d_backward) \
533
+ _(aten, baddbmm) \
534
+ _(aten, baddbmm_) \
535
+ _(aten, bartlett_window) \
536
+ _(aten, batch_norm) \
537
+ _(aten, batch_norm_backward_elemt) \
538
+ _(aten, batch_norm_backward_reduce) \
539
+ _(aten, batch_norm_elemt) \
540
+ _(aten, batch_norm_gather_stats) \
541
+ _(aten, batch_norm_gather_stats_with_counts) \
542
+ _(aten, batch_norm_stats) \
543
+ _(aten, batch_norm_update_stats) \
544
+ _(aten, bernoulli) \
545
+ _(aten, bernoulli_) \
546
+ _(aten, bilinear) \
547
+ _(aten, binary_cross_entropy) \
548
+ _(aten, binary_cross_entropy_backward) \
549
+ _(aten, binary_cross_entropy_with_logits) \
550
+ _(aten, bincount) \
551
+ _(aten, binomial) \
552
+ _(aten, bitwise_and) \
553
+ _(aten, bitwise_and_) \
554
+ _(aten, bitwise_left_shift) \
555
+ _(aten, bitwise_left_shift_) \
556
+ _(aten, bitwise_not) \
557
+ _(aten, bitwise_not_) \
558
+ _(aten, bitwise_or) \
559
+ _(aten, bitwise_or_) \
560
+ _(aten, bitwise_right_shift) \
561
+ _(aten, bitwise_right_shift_) \
562
+ _(aten, bitwise_xor) \
563
+ _(aten, bitwise_xor_) \
564
+ _(aten, blackman_window) \
565
+ _(aten, block_diag) \
566
+ _(aten, bmm) \
567
+ _(aten, broadcast_tensors) \
568
+ _(aten, broadcast_to) \
569
+ _(aten, bucketize) \
570
+ _(aten, can_cast) \
571
+ _(aten, cartesian_prod) \
572
+ _(aten, cat) \
573
+ _(aten, cauchy) \
574
+ _(aten, cauchy_) \
575
+ _(aten, ccol_indices) \
576
+ _(aten, ccol_indices_copy) \
577
+ _(aten, cdist) \
578
+ _(aten, ceil) \
579
+ _(aten, ceil_) \
580
+ _(aten, celu) \
581
+ _(aten, celu_) \
582
+ _(aten, chain_matmul) \
583
+ _(aten, chalf) \
584
+ _(aten, channel_shuffle) \
585
+ _(aten, cholesky) \
586
+ _(aten, cholesky_inverse) \
587
+ _(aten, cholesky_solve) \
588
+ _(aten, choose_qparams_optimized) \
589
+ _(aten, chunk) \
590
+ _(aten, clamp) \
591
+ _(aten, clamp_) \
592
+ _(aten, clamp_max) \
593
+ _(aten, clamp_max_) \
594
+ _(aten, clamp_min) \
595
+ _(aten, clamp_min_) \
596
+ _(aten, clip) \
597
+ _(aten, clip_) \
598
+ _(aten, clone) \
599
+ _(aten, coalesce) \
600
+ _(aten, col2im) \
601
+ _(aten, col_indices) \
602
+ _(aten, col_indices_copy) \
603
+ _(aten, column_stack) \
604
+ _(aten, combinations) \
605
+ _(aten, complex) \
606
+ _(aten, concat) \
607
+ _(aten, concatenate) \
608
+ _(aten, conj) \
609
+ _(aten, conj_physical) \
610
+ _(aten, conj_physical_) \
611
+ _(aten, constant_pad_nd) \
612
+ _(aten, contiguous) \
613
+ _(aten, conv1d) \
614
+ _(aten, conv2d) \
615
+ _(aten, conv3d) \
616
+ _(aten, conv_depthwise3d) \
617
+ _(aten, conv_tbc) \
618
+ _(aten, conv_tbc_backward) \
619
+ _(aten, conv_transpose1d) \
620
+ _(aten, conv_transpose2d) \
621
+ _(aten, conv_transpose3d) \
622
+ _(aten, convolution) \
623
+ _(aten, convolution_backward) \
624
+ _(aten, convolution_backward_overrideable) \
625
+ _(aten, convolution_overrideable) \
626
+ _(aten, copy) \
627
+ _(aten, copy_) \
628
+ _(aten, copy_sparse_to_sparse) \
629
+ _(aten, copy_sparse_to_sparse_) \
630
+ _(aten, copysign) \
631
+ _(aten, copysign_) \
632
+ _(aten, corrcoef) \
633
+ _(aten, cos) \
634
+ _(aten, cos_) \
635
+ _(aten, cosh) \
636
+ _(aten, cosh_) \
637
+ _(aten, cosine_embedding_loss) \
638
+ _(aten, cosine_similarity) \
639
+ _(aten, count_nonzero) \
640
+ _(aten, cov) \
641
+ _(aten, cross) \
642
+ _(aten, cross_entropy_loss) \
643
+ _(aten, crow_indices) \
644
+ _(aten, crow_indices_copy) \
645
+ _(aten, ctc_loss) \
646
+ _(aten, cudnn_affine_grid_generator) \
647
+ _(aten, cudnn_affine_grid_generator_backward) \
648
+ _(aten, cudnn_batch_norm) \
649
+ _(aten, cudnn_batch_norm_backward) \
650
+ _(aten, cudnn_convolution) \
651
+ _(aten, cudnn_convolution_add_relu) \
652
+ _(aten, cudnn_convolution_relu) \
653
+ _(aten, cudnn_convolution_transpose) \
654
+ _(aten, cudnn_grid_sampler) \
655
+ _(aten, cudnn_grid_sampler_backward) \
656
+ _(aten, cudnn_is_acceptable) \
657
+ _(aten, cummax) \
658
+ _(aten, cummaxmin_backward) \
659
+ _(aten, cummin) \
660
+ _(aten, cumprod) \
661
+ _(aten, cumprod_) \
662
+ _(aten, cumprod_backward) \
663
+ _(aten, cumsum) \
664
+ _(aten, cumsum_) \
665
+ _(aten, cumulative_trapezoid) \
666
+ _(aten, data) \
667
+ _(aten, deg2rad) \
668
+ _(aten, deg2rad_) \
669
+ _(aten, dense_dim) \
670
+ _(aten, dequantize) \
671
+ _(aten, det) \
672
+ _(aten, detach) \
673
+ _(aten, detach_) \
674
+ _(aten, detach_copy) \
675
+ _(aten, diag) \
676
+ _(aten, diag_embed) \
677
+ _(aten, diagflat) \
678
+ _(aten, diagonal) \
679
+ _(aten, diagonal_backward) \
680
+ _(aten, diagonal_copy) \
681
+ _(aten, diagonal_scatter) \
682
+ _(aten, diff) \
683
+ _(aten, digamma) \
684
+ _(aten, digamma_) \
685
+ _(aten, dist) \
686
+ _(aten, div) \
687
+ _(aten, div_) \
688
+ _(aten, divide) \
689
+ _(aten, divide_) \
690
+ _(aten, dot) \
691
+ _(aten, dropout) \
692
+ _(aten, dropout_) \
693
+ _(aten, dsplit) \
694
+ _(aten, dstack) \
695
+ _(aten, einsum) \
696
+ _(aten, elu) \
697
+ _(aten, elu_) \
698
+ _(aten, elu_backward) \
699
+ _(aten, embedding) \
700
+ _(aten, embedding_backward) \
701
+ _(aten, embedding_bag) \
702
+ _(aten, embedding_dense_backward) \
703
+ _(aten, embedding_renorm) \
704
+ _(aten, embedding_renorm_) \
705
+ _(aten, embedding_sparse_backward) \
706
+ _(aten, empty) \
707
+ _(aten, empty_like) \
708
+ _(aten, empty_permuted) \
709
+ _(aten, empty_quantized) \
710
+ _(aten, empty_strided) \
711
+ _(aten, eq) \
712
+ _(aten, eq_) \
713
+ _(aten, equal) \
714
+ _(aten, erf) \
715
+ _(aten, erf_) \
716
+ _(aten, erfc) \
717
+ _(aten, erfc_) \
718
+ _(aten, erfinv) \
719
+ _(aten, erfinv_) \
720
+ _(aten, exp) \
721
+ _(aten, exp2) \
722
+ _(aten, exp2_) \
723
+ _(aten, exp_) \
724
+ _(aten, expand) \
725
+ _(aten, expand_as) \
726
+ _(aten, expand_copy) \
727
+ _(aten, expm1) \
728
+ _(aten, expm1_) \
729
+ _(aten, exponential) \
730
+ _(aten, exponential_) \
731
+ _(aten, eye) \
732
+ _(aten, fake_quantize_per_channel_affine) \
733
+ _(aten, fake_quantize_per_channel_affine_cachemask) \
734
+ _(aten, fake_quantize_per_channel_affine_cachemask_backward) \
735
+ _(aten, fake_quantize_per_tensor_affine) \
736
+ _(aten, fake_quantize_per_tensor_affine_cachemask) \
737
+ _(aten, fake_quantize_per_tensor_affine_cachemask_backward) \
738
+ _(aten, fbgemm_linear_fp16_weight) \
739
+ _(aten, fbgemm_linear_fp16_weight_fp32_activation) \
740
+ _(aten, fbgemm_linear_int8_weight) \
741
+ _(aten, fbgemm_linear_int8_weight_fp32_activation) \
742
+ _(aten, fbgemm_linear_quantize_weight) \
743
+ _(aten, fbgemm_pack_gemm_matrix_fp16) \
744
+ _(aten, fbgemm_pack_quantized_matrix) \
745
+ _(aten, feature_alpha_dropout) \
746
+ _(aten, feature_alpha_dropout_) \
747
+ _(aten, feature_dropout) \
748
+ _(aten, feature_dropout_) \
749
+ _(aten, fft_fft) \
750
+ _(aten, fft_fft2) \
751
+ _(aten, fft_fftfreq) \
752
+ _(aten, fft_fftn) \
753
+ _(aten, fft_fftshift) \
754
+ _(aten, fft_hfft) \
755
+ _(aten, fft_hfft2) \
756
+ _(aten, fft_hfftn) \
757
+ _(aten, fft_ifft) \
758
+ _(aten, fft_ifft2) \
759
+ _(aten, fft_ifftn) \
760
+ _(aten, fft_ifftshift) \
761
+ _(aten, fft_ihfft) \
762
+ _(aten, fft_ihfft2) \
763
+ _(aten, fft_ihfftn) \
764
+ _(aten, fft_irfft) \
765
+ _(aten, fft_irfft2) \
766
+ _(aten, fft_irfftn) \
767
+ _(aten, fft_rfft) \
768
+ _(aten, fft_rfft2) \
769
+ _(aten, fft_rfftfreq) \
770
+ _(aten, fft_rfftn) \
771
+ _(aten, fill) \
772
+ _(aten, fill_) \
773
+ _(aten, fill_diagonal) \
774
+ _(aten, fill_diagonal_) \
775
+ _(aten, fix) \
776
+ _(aten, fix_) \
777
+ _(aten, flatten) \
778
+ _(aten, flatten_dense_tensors) \
779
+ _(aten, flip) \
780
+ _(aten, fliplr) \
781
+ _(aten, flipud) \
782
+ _(aten, float_power) \
783
+ _(aten, float_power_) \
784
+ _(aten, floor) \
785
+ _(aten, floor_) \
786
+ _(aten, floor_divide) \
787
+ _(aten, floor_divide_) \
788
+ _(aten, fmax) \
789
+ _(aten, fmin) \
790
+ _(aten, fmod) \
791
+ _(aten, fmod_) \
792
+ _(aten, frac) \
793
+ _(aten, frac_) \
794
+ _(aten, fractional_max_pool2d) \
795
+ _(aten, fractional_max_pool2d_backward) \
796
+ _(aten, fractional_max_pool3d) \
797
+ _(aten, fractional_max_pool3d_backward) \
798
+ _(aten, frexp) \
799
+ _(aten, frobenius_norm) \
800
+ _(aten, from_file) \
801
+ _(aten, full) \
802
+ _(aten, full_like) \
803
+ _(aten, fused_moving_avg_obs_fake_quant) \
804
+ _(aten, gather) \
805
+ _(aten, gather_backward) \
806
+ _(aten, gcd) \
807
+ _(aten, gcd_) \
808
+ _(aten, ge) \
809
+ _(aten, ge_) \
810
+ _(aten, gelu) \
811
+ _(aten, gelu_) \
812
+ _(aten, gelu_backward) \
813
+ _(aten, geometric) \
814
+ _(aten, geometric_) \
815
+ _(aten, geqrf) \
816
+ _(aten, ger) \
817
+ _(aten, glu) \
818
+ _(aten, glu_backward) \
819
+ _(aten, glu_backward_jvp) \
820
+ _(aten, glu_jvp) \
821
+ _(aten, gradient) \
822
+ _(aten, greater) \
823
+ _(aten, greater_) \
824
+ _(aten, greater_equal) \
825
+ _(aten, greater_equal_) \
826
+ _(aten, grid_sampler) \
827
+ _(aten, grid_sampler_2d) \
828
+ _(aten, grid_sampler_2d_backward) \
829
+ _(aten, grid_sampler_3d) \
830
+ _(aten, grid_sampler_3d_backward) \
831
+ _(aten, group_norm) \
832
+ _(aten, gru) \
833
+ _(aten, gru_cell) \
834
+ _(aten, gt) \
835
+ _(aten, gt_) \
836
+ _(aten, hamming_window) \
837
+ _(aten, hann_window) \
838
+ _(aten, hardshrink) \
839
+ _(aten, hardshrink_backward) \
840
+ _(aten, hardsigmoid) \
841
+ _(aten, hardsigmoid_) \
842
+ _(aten, hardsigmoid_backward) \
843
+ _(aten, hardswish) \
844
+ _(aten, hardswish_) \
845
+ _(aten, hardswish_backward) \
846
+ _(aten, hardtanh) \
847
+ _(aten, hardtanh_) \
848
+ _(aten, hardtanh_backward) \
849
+ _(aten, heaviside) \
850
+ _(aten, heaviside_) \
851
+ _(aten, hinge_embedding_loss) \
852
+ _(aten, histc) \
853
+ _(aten, histogram) \
854
+ _(aten, histogramdd) \
855
+ _(aten, hsplit) \
856
+ _(aten, hspmm) \
857
+ _(aten, hstack) \
858
+ _(aten, huber_loss) \
859
+ _(aten, huber_loss_backward) \
860
+ _(aten, hypot) \
861
+ _(aten, hypot_) \
862
+ _(aten, i0) \
863
+ _(aten, i0_) \
864
+ _(aten, igamma) \
865
+ _(aten, igamma_) \
866
+ _(aten, igammac) \
867
+ _(aten, igammac_) \
868
+ _(aten, im2col) \
869
+ _(aten, imag) \
870
+ _(aten, index) \
871
+ _(aten, index_add) \
872
+ _(aten, index_add_) \
873
+ _(aten, index_copy) \
874
+ _(aten, index_copy_) \
875
+ _(aten, index_fill) \
876
+ _(aten, index_fill_) \
877
+ _(aten, index_put) \
878
+ _(aten, index_put_) \
879
+ _(aten, index_reduce) \
880
+ _(aten, index_reduce_) \
881
+ _(aten, index_select) \
882
+ _(aten, index_select_backward) \
883
+ _(aten, indices) \
884
+ _(aten, indices_copy) \
885
+ _(aten, infinitely_differentiable_gelu_backward) \
886
+ _(aten, inner) \
887
+ _(aten, instance_norm) \
888
+ _(aten, int_repr) \
889
+ _(aten, inverse) \
890
+ _(aten, is_coalesced) \
891
+ _(aten, is_complex) \
892
+ _(aten, is_conj) \
893
+ _(aten, is_distributed) \
894
+ _(aten, is_floating_point) \
895
+ _(aten, is_inference) \
896
+ _(aten, is_leaf) \
897
+ _(aten, is_neg) \
898
+ _(aten, is_nonzero) \
899
+ _(aten, is_pinned) \
900
+ _(aten, is_same_size) \
901
+ _(aten, is_set_to) \
902
+ _(aten, is_signed) \
903
+ _(aten, is_vulkan_available) \
904
+ _(aten, isclose) \
905
+ _(aten, isfinite) \
906
+ _(aten, isin) \
907
+ _(aten, isinf) \
908
+ _(aten, isnan) \
909
+ _(aten, isneginf) \
910
+ _(aten, isposinf) \
911
+ _(aten, isreal) \
912
+ _(aten, istft) \
913
+ _(aten, item) \
914
+ _(aten, kaiser_window) \
915
+ _(aten, kl_div) \
916
+ _(aten, kron) \
917
+ _(aten, kthvalue) \
918
+ _(aten, l1_loss) \
919
+ _(aten, layer_norm) \
920
+ _(aten, lcm) \
921
+ _(aten, lcm_) \
922
+ _(aten, ldexp) \
923
+ _(aten, ldexp_) \
924
+ _(aten, le) \
925
+ _(aten, le_) \
926
+ _(aten, leaky_relu) \
927
+ _(aten, leaky_relu_) \
928
+ _(aten, leaky_relu_backward) \
929
+ _(aten, lerp) \
930
+ _(aten, lerp_) \
931
+ _(aten, less) \
932
+ _(aten, less_) \
933
+ _(aten, less_equal) \
934
+ _(aten, less_equal_) \
935
+ _(aten, lgamma) \
936
+ _(aten, lgamma_) \
937
+ _(aten, lift) \
938
+ _(aten, lift_fresh) \
939
+ _(aten, lift_fresh_copy) \
940
+ _(aten, linalg_cholesky) \
941
+ _(aten, linalg_cholesky_ex) \
942
+ _(aten, linalg_cond) \
943
+ _(aten, linalg_cross) \
944
+ _(aten, linalg_det) \
945
+ _(aten, linalg_diagonal) \
946
+ _(aten, linalg_eig) \
947
+ _(aten, linalg_eigh) \
948
+ _(aten, linalg_eigvals) \
949
+ _(aten, linalg_eigvalsh) \
950
+ _(aten, linalg_householder_product) \
951
+ _(aten, linalg_inv) \
952
+ _(aten, linalg_inv_ex) \
953
+ _(aten, linalg_ldl_factor) \
954
+ _(aten, linalg_ldl_factor_ex) \
955
+ _(aten, linalg_ldl_solve) \
956
+ _(aten, linalg_lstsq) \
957
+ _(aten, linalg_lu) \
958
+ _(aten, linalg_lu_factor) \
959
+ _(aten, linalg_lu_factor_ex) \
960
+ _(aten, linalg_lu_solve) \
961
+ _(aten, linalg_matmul) \
962
+ _(aten, linalg_matrix_exp) \
963
+ _(aten, linalg_matrix_norm) \
964
+ _(aten, linalg_matrix_power) \
965
+ _(aten, linalg_matrix_rank) \
966
+ _(aten, linalg_multi_dot) \
967
+ _(aten, linalg_norm) \
968
+ _(aten, linalg_pinv) \
969
+ _(aten, linalg_qr) \
970
+ _(aten, linalg_slogdet) \
971
+ _(aten, linalg_solve) \
972
+ _(aten, linalg_solve_ex) \
973
+ _(aten, linalg_solve_triangular) \
974
+ _(aten, linalg_svd) \
975
+ _(aten, linalg_svdvals) \
976
+ _(aten, linalg_tensorinv) \
977
+ _(aten, linalg_tensorsolve) \
978
+ _(aten, linalg_vander) \
979
+ _(aten, linalg_vecdot) \
980
+ _(aten, linalg_vector_norm) \
981
+ _(aten, linear) \
982
+ _(aten, linear_backward) \
983
+ _(aten, linspace) \
984
+ _(aten, log) \
985
+ _(aten, log10) \
986
+ _(aten, log10_) \
987
+ _(aten, log1p) \
988
+ _(aten, log1p_) \
989
+ _(aten, log2) \
990
+ _(aten, log2_) \
991
+ _(aten, log_) \
992
+ _(aten, log_normal) \
993
+ _(aten, log_normal_) \
994
+ _(aten, log_sigmoid) \
995
+ _(aten, log_sigmoid_backward) \
996
+ _(aten, log_sigmoid_forward) \
997
+ _(aten, log_softmax) \
998
+ _(aten, logaddexp) \
999
+ _(aten, logaddexp2) \
1000
+ _(aten, logcumsumexp) \
1001
+ _(aten, logdet) \
1002
+ _(aten, logical_and) \
1003
+ _(aten, logical_and_) \
1004
+ _(aten, logical_not) \
1005
+ _(aten, logical_not_) \
1006
+ _(aten, logical_or) \
1007
+ _(aten, logical_or_) \
1008
+ _(aten, logical_xor) \
1009
+ _(aten, logical_xor_) \
1010
+ _(aten, logit) \
1011
+ _(aten, logit_) \
1012
+ _(aten, logit_backward) \
1013
+ _(aten, logspace) \
1014
+ _(aten, logsumexp) \
1015
+ _(aten, lshift) \
1016
+ _(aten, lstm) \
1017
+ _(aten, lstm_cell) \
1018
+ _(aten, lstm_mps_backward) \
1019
+ _(aten, lt) \
1020
+ _(aten, lt_) \
1021
+ _(aten, lu_solve) \
1022
+ _(aten, lu_unpack) \
1023
+ _(aten, mH) \
1024
+ _(aten, mT) \
1025
+ _(aten, margin_ranking_loss) \
1026
+ _(aten, masked_fill) \
1027
+ _(aten, masked_fill_) \
1028
+ _(aten, masked_scatter) \
1029
+ _(aten, masked_scatter_) \
1030
+ _(aten, masked_scatter_backward) \
1031
+ _(aten, masked_select) \
1032
+ _(aten, masked_select_backward) \
1033
+ _(aten, matmul) \
1034
+ _(aten, matmul_backward) \
1035
+ _(aten, matrix_H) \
1036
+ _(aten, matrix_exp) \
1037
+ _(aten, matrix_exp_backward) \
1038
+ _(aten, matrix_power) \
1039
+ _(aten, max) \
1040
+ _(aten, max_pool1d) \
1041
+ _(aten, max_pool1d_with_indices) \
1042
+ _(aten, max_pool2d) \
1043
+ _(aten, max_pool2d_backward) \
1044
+ _(aten, max_pool2d_with_indices) \
1045
+ _(aten, max_pool2d_with_indices_backward) \
1046
+ _(aten, max_pool3d) \
1047
+ _(aten, max_pool3d_with_indices) \
1048
+ _(aten, max_pool3d_with_indices_backward) \
1049
+ _(aten, max_unpool2d) \
1050
+ _(aten, max_unpool3d) \
1051
+ _(aten, maximum) \
1052
+ _(aten, mean) \
1053
+ _(aten, median) \
1054
+ _(aten, meshgrid) \
1055
+ _(aten, min) \
1056
+ _(aten, minimum) \
1057
+ _(aten, miopen_batch_norm) \
1058
+ _(aten, miopen_batch_norm_backward) \
1059
+ _(aten, miopen_convolution) \
1060
+ _(aten, miopen_convolution_add_relu) \
1061
+ _(aten, miopen_convolution_relu) \
1062
+ _(aten, miopen_convolution_transpose) \
1063
+ _(aten, miopen_depthwise_convolution) \
1064
+ _(aten, miopen_rnn) \
1065
+ _(aten, miopen_rnn_backward) \
1066
+ _(aten, mish) \
1067
+ _(aten, mish_) \
1068
+ _(aten, mish_backward) \
1069
+ _(aten, mkldnn_adaptive_avg_pool2d) \
1070
+ _(aten, mkldnn_adaptive_avg_pool2d_backward) \
1071
+ _(aten, mkldnn_convolution) \
1072
+ _(aten, mkldnn_linear) \
1073
+ _(aten, mkldnn_linear_backward) \
1074
+ _(aten, mkldnn_linear_backward_input) \
1075
+ _(aten, mkldnn_linear_backward_weights) \
1076
+ _(aten, mkldnn_max_pool2d) \
1077
+ _(aten, mkldnn_max_pool2d_backward) \
1078
+ _(aten, mkldnn_max_pool3d) \
1079
+ _(aten, mkldnn_max_pool3d_backward) \
1080
+ _(aten, mkldnn_reorder_conv2d_weight) \
1081
+ _(aten, mkldnn_reorder_conv3d_weight) \
1082
+ _(aten, mkldnn_rnn_layer) \
1083
+ _(aten, mkldnn_rnn_layer_backward) \
1084
+ _(aten, mm) \
1085
+ _(aten, mode) \
1086
+ _(aten, moveaxis) \
1087
+ _(aten, movedim) \
1088
+ _(aten, mps_convolution_backward) \
1089
+ _(aten, mps_convolution_transpose_backward) \
1090
+ _(aten, mse_loss) \
1091
+ _(aten, mse_loss_backward) \
1092
+ _(aten, msort) \
1093
+ _(aten, mul) \
1094
+ _(aten, mul_) \
1095
+ _(aten, multi_margin_loss) \
1096
+ _(aten, multi_margin_loss_backward) \
1097
+ _(aten, multilabel_margin_loss) \
1098
+ _(aten, multilabel_margin_loss_backward) \
1099
+ _(aten, multilabel_margin_loss_forward) \
1100
+ _(aten, multinomial) \
1101
+ _(aten, multiply) \
1102
+ _(aten, multiply_) \
1103
+ _(aten, mv) \
1104
+ _(aten, mvlgamma) \
1105
+ _(aten, mvlgamma_) \
1106
+ _(aten, nan_to_num) \
1107
+ _(aten, nan_to_num_) \
1108
+ _(aten, nanmean) \
1109
+ _(aten, nanmedian) \
1110
+ _(aten, nanquantile) \
1111
+ _(aten, nansum) \
1112
+ _(aten, narrow) \
1113
+ _(aten, narrow_copy) \
1114
+ _(aten, native_batch_norm) \
1115
+ _(aten, native_batch_norm_backward) \
1116
+ _(aten, native_channel_shuffle) \
1117
+ _(aten, native_dropout) \
1118
+ _(aten, native_dropout_backward) \
1119
+ _(aten, native_group_norm) \
1120
+ _(aten, native_group_norm_backward) \
1121
+ _(aten, native_layer_norm) \
1122
+ _(aten, native_layer_norm_backward) \
1123
+ _(aten, native_norm) \
1124
+ _(aten, ne) \
1125
+ _(aten, ne_) \
1126
+ _(aten, neg) \
1127
+ _(aten, neg_) \
1128
+ _(aten, negative) \
1129
+ _(aten, negative_) \
1130
+ _(aten, nested_to_padded_tensor) \
1131
+ _(aten, new_empty) \
1132
+ _(aten, new_empty_strided) \
1133
+ _(aten, new_full) \
1134
+ _(aten, new_ones) \
1135
+ _(aten, new_zeros) \
1136
+ _(aten, nextafter) \
1137
+ _(aten, nextafter_) \
1138
+ _(aten, nll_loss) \
1139
+ _(aten, nll_loss2d) \
1140
+ _(aten, nll_loss2d_backward) \
1141
+ _(aten, nll_loss2d_forward) \
1142
+ _(aten, nll_loss_backward) \
1143
+ _(aten, nll_loss_forward) \
1144
+ _(aten, nll_loss_nd) \
1145
+ _(aten, nonzero) \
1146
+ _(aten, nonzero_numpy) \
1147
+ _(aten, nonzero_static) \
1148
+ _(aten, norm) \
1149
+ _(aten, norm_except_dim) \
1150
+ _(aten, normal) \
1151
+ _(aten, normal_) \
1152
+ _(aten, normal_functional) \
1153
+ _(aten, not_equal) \
1154
+ _(aten, not_equal_) \
1155
+ _(aten, nuclear_norm) \
1156
+ _(aten, numpy_T) \
1157
+ _(aten, one_hot) \
1158
+ _(aten, ones) \
1159
+ _(aten, ones_like) \
1160
+ _(aten, orgqr) \
1161
+ _(aten, ormqr) \
1162
+ _(aten, outer) \
1163
+ _(aten, output_nr) \
1164
+ _(aten, pad) \
1165
+ _(aten, pad_sequence) \
1166
+ _(aten, pairwise_distance) \
1167
+ _(aten, pdist) \
1168
+ _(aten, permute) \
1169
+ _(aten, permute_copy) \
1170
+ _(aten, pin_memory) \
1171
+ _(aten, pinverse) \
1172
+ _(aten, pixel_shuffle) \
1173
+ _(aten, pixel_unshuffle) \
1174
+ _(aten, poisson) \
1175
+ _(aten, poisson_nll_loss) \
1176
+ _(aten, polar) \
1177
+ _(aten, polygamma) \
1178
+ _(aten, polygamma_) \
1179
+ _(aten, positive) \
1180
+ _(aten, pow) \
1181
+ _(aten, pow_) \
1182
+ _(aten, prelu) \
1183
+ _(aten, prod) \
1184
+ _(aten, promote_types) \
1185
+ _(aten, put) \
1186
+ _(aten, put_) \
1187
+ _(aten, q_per_channel_axis) \
1188
+ _(aten, q_per_channel_scales) \
1189
+ _(aten, q_per_channel_zero_points) \
1190
+ _(aten, q_scale) \
1191
+ _(aten, q_zero_point) \
1192
+ _(aten, qr) \
1193
+ _(aten, qscheme) \
1194
+ _(aten, quantile) \
1195
+ _(aten, quantize_per_channel) \
1196
+ _(aten, quantize_per_tensor) \
1197
+ _(aten, quantize_per_tensor_dynamic) \
1198
+ _(aten, quantized_batch_norm) \
1199
+ _(aten, quantized_gru_cell) \
1200
+ _(aten, quantized_lstm_cell) \
1201
+ _(aten, quantized_max_pool1d) \
1202
+ _(aten, quantized_max_pool2d) \
1203
+ _(aten, quantized_max_pool3d) \
1204
+ _(aten, quantized_rnn_relu_cell) \
1205
+ _(aten, quantized_rnn_tanh_cell) \
1206
+ _(aten, rad2deg) \
1207
+ _(aten, rad2deg_) \
1208
+ _(aten, rand) \
1209
+ _(aten, rand_like) \
1210
+ _(aten, randint) \
1211
+ _(aten, randint_like) \
1212
+ _(aten, randn) \
1213
+ _(aten, randn_like) \
1214
+ _(aten, random) \
1215
+ _(aten, random_) \
1216
+ _(aten, randperm) \
1217
+ _(aten, range) \
1218
+ _(aten, ravel) \
1219
+ _(aten, real) \
1220
+ _(aten, reciprocal) \
1221
+ _(aten, reciprocal_) \
1222
+ _(aten, record_stream) \
1223
+ _(aten, refine_names) \
1224
+ _(aten, reflection_pad1d) \
1225
+ _(aten, reflection_pad1d_backward) \
1226
+ _(aten, reflection_pad2d) \
1227
+ _(aten, reflection_pad2d_backward) \
1228
+ _(aten, reflection_pad3d) \
1229
+ _(aten, reflection_pad3d_backward) \
1230
+ _(aten, relu) \
1231
+ _(aten, relu6) \
1232
+ _(aten, relu6_) \
1233
+ _(aten, relu_) \
1234
+ _(aten, remainder) \
1235
+ _(aten, remainder_) \
1236
+ _(aten, rename) \
1237
+ _(aten, rename_) \
1238
+ _(aten, renorm) \
1239
+ _(aten, renorm_) \
1240
+ _(aten, repeat) \
1241
+ _(aten, repeat_interleave) \
1242
+ _(aten, replication_pad1d) \
1243
+ _(aten, replication_pad1d_backward) \
1244
+ _(aten, replication_pad2d) \
1245
+ _(aten, replication_pad2d_backward) \
1246
+ _(aten, replication_pad3d) \
1247
+ _(aten, replication_pad3d_backward) \
1248
+ _(aten, requires_grad) \
1249
+ _(aten, requires_grad_) \
1250
+ _(aten, reshape) \
1251
+ _(aten, reshape_as) \
1252
+ _(aten, resize) \
1253
+ _(aten, resize_) \
1254
+ _(aten, resize_as) \
1255
+ _(aten, resize_as_) \
1256
+ _(aten, resize_as_sparse) \
1257
+ _(aten, resize_as_sparse_) \
1258
+ _(aten, resolve_conj) \
1259
+ _(aten, resolve_neg) \
1260
+ _(aten, result_type) \
1261
+ _(aten, retain_grad) \
1262
+ _(aten, retains_grad) \
1263
+ _(aten, rnn_relu) \
1264
+ _(aten, rnn_relu_cell) \
1265
+ _(aten, rnn_tanh) \
1266
+ _(aten, rnn_tanh_cell) \
1267
+ _(aten, roll) \
1268
+ _(aten, rot90) \
1269
+ _(aten, round) \
1270
+ _(aten, round_) \
1271
+ _(aten, row_indices) \
1272
+ _(aten, row_indices_copy) \
1273
+ _(aten, row_stack) \
1274
+ _(aten, rrelu) \
1275
+ _(aten, rrelu_) \
1276
+ _(aten, rrelu_with_noise) \
1277
+ _(aten, rrelu_with_noise_) \
1278
+ _(aten, rrelu_with_noise_backward) \
1279
+ _(aten, rshift) \
1280
+ _(aten, rsqrt) \
1281
+ _(aten, rsqrt_) \
1282
+ _(aten, rsub) \
1283
+ _(aten, scalar_tensor) \
1284
+ _(aten, scaled_dot_product_attention) \
1285
+ _(aten, scatter) \
1286
+ _(aten, scatter_) \
1287
+ _(aten, scatter_add) \
1288
+ _(aten, scatter_add_) \
1289
+ _(aten, scatter_reduce) \
1290
+ _(aten, scatter_reduce_) \
1291
+ _(aten, searchsorted) \
1292
+ _(aten, segment_reduce) \
1293
+ _(aten, select) \
1294
+ _(aten, select_backward) \
1295
+ _(aten, select_copy) \
1296
+ _(aten, select_scatter) \
1297
+ _(aten, selu) \
1298
+ _(aten, selu_) \
1299
+ _(aten, set) \
1300
+ _(aten, set_) \
1301
+ _(aten, set_data) \
1302
+ _(aten, sgn) \
1303
+ _(aten, sgn_) \
1304
+ _(aten, sigmoid) \
1305
+ _(aten, sigmoid_) \
1306
+ _(aten, sigmoid_backward) \
1307
+ _(aten, sign) \
1308
+ _(aten, sign_) \
1309
+ _(aten, signbit) \
1310
+ _(aten, silu) \
1311
+ _(aten, silu_) \
1312
+ _(aten, silu_backward) \
1313
+ _(aten, sin) \
1314
+ _(aten, sin_) \
1315
+ _(aten, sinc) \
1316
+ _(aten, sinc_) \
1317
+ _(aten, sinh) \
1318
+ _(aten, sinh_) \
1319
+ _(aten, size) \
1320
+ _(aten, slice) \
1321
+ _(aten, slice_backward) \
1322
+ _(aten, slice_copy) \
1323
+ _(aten, slice_inverse) \
1324
+ _(aten, slice_scatter) \
1325
+ _(aten, slogdet) \
1326
+ _(aten, slow_conv3d) \
1327
+ _(aten, slow_conv3d_forward) \
1328
+ _(aten, slow_conv_dilated2d) \
1329
+ _(aten, slow_conv_dilated3d) \
1330
+ _(aten, slow_conv_transpose2d) \
1331
+ _(aten, slow_conv_transpose3d) \
1332
+ _(aten, smm) \
1333
+ _(aten, smooth_l1_loss) \
1334
+ _(aten, smooth_l1_loss_backward) \
1335
+ _(aten, soft_margin_loss) \
1336
+ _(aten, soft_margin_loss_backward) \
1337
+ _(aten, softmax) \
1338
+ _(aten, softplus) \
1339
+ _(aten, softplus_backward) \
1340
+ _(aten, softshrink) \
1341
+ _(aten, softshrink_backward) \
1342
+ _(aten, sort) \
1343
+ _(aten, sparse_bsc_tensor) \
1344
+ _(aten, sparse_bsr_tensor) \
1345
+ _(aten, sparse_compressed_tensor) \
1346
+ _(aten, sparse_coo_tensor) \
1347
+ _(aten, sparse_csc_tensor) \
1348
+ _(aten, sparse_csr_tensor) \
1349
+ _(aten, sparse_dim) \
1350
+ _(aten, sparse_mask) \
1351
+ _(aten, sparse_resize) \
1352
+ _(aten, sparse_resize_) \
1353
+ _(aten, sparse_resize_and_clear) \
1354
+ _(aten, sparse_resize_and_clear_) \
1355
+ _(aten, sparse_sampled_addmm) \
1356
+ _(aten, special_airy_ai) \
1357
+ _(aten, special_bessel_j0) \
1358
+ _(aten, special_bessel_j1) \
1359
+ _(aten, special_bessel_y0) \
1360
+ _(aten, special_bessel_y1) \
1361
+ _(aten, special_chebyshev_polynomial_t) \
1362
+ _(aten, special_chebyshev_polynomial_u) \
1363
+ _(aten, special_chebyshev_polynomial_v) \
1364
+ _(aten, special_chebyshev_polynomial_w) \
1365
+ _(aten, special_digamma) \
1366
+ _(aten, special_entr) \
1367
+ _(aten, special_erf) \
1368
+ _(aten, special_erfc) \
1369
+ _(aten, special_erfcx) \
1370
+ _(aten, special_erfinv) \
1371
+ _(aten, special_exp2) \
1372
+ _(aten, special_expit) \
1373
+ _(aten, special_expm1) \
1374
+ _(aten, special_gammainc) \
1375
+ _(aten, special_gammaincc) \
1376
+ _(aten, special_gammaln) \
1377
+ _(aten, special_hermite_polynomial_h) \
1378
+ _(aten, special_hermite_polynomial_he) \
1379
+ _(aten, special_i0) \
1380
+ _(aten, special_i0e) \
1381
+ _(aten, special_i1) \
1382
+ _(aten, special_i1e) \
1383
+ _(aten, special_laguerre_polynomial_l) \
1384
+ _(aten, special_legendre_polynomial_p) \
1385
+ _(aten, special_log1p) \
1386
+ _(aten, special_log_ndtr) \
1387
+ _(aten, special_log_softmax) \
1388
+ _(aten, special_logit) \
1389
+ _(aten, special_logsumexp) \
1390
+ _(aten, special_modified_bessel_i0) \
1391
+ _(aten, special_modified_bessel_i1) \
1392
+ _(aten, special_modified_bessel_k0) \
1393
+ _(aten, special_modified_bessel_k1) \
1394
+ _(aten, special_multigammaln) \
1395
+ _(aten, special_ndtr) \
1396
+ _(aten, special_ndtri) \
1397
+ _(aten, special_polygamma) \
1398
+ _(aten, special_psi) \
1399
+ _(aten, special_round) \
1400
+ _(aten, special_scaled_modified_bessel_k0) \
1401
+ _(aten, special_scaled_modified_bessel_k1) \
1402
+ _(aten, special_shifted_chebyshev_polynomial_t) \
1403
+ _(aten, special_shifted_chebyshev_polynomial_u) \
1404
+ _(aten, special_shifted_chebyshev_polynomial_v) \
1405
+ _(aten, special_shifted_chebyshev_polynomial_w) \
1406
+ _(aten, special_sinc) \
1407
+ _(aten, special_softmax) \
1408
+ _(aten, special_spherical_bessel_j0) \
1409
+ _(aten, special_xlog1py) \
1410
+ _(aten, special_xlogy) \
1411
+ _(aten, special_zeta) \
1412
+ _(aten, split) \
1413
+ _(aten, split_copy) \
1414
+ _(aten, split_with_sizes) \
1415
+ _(aten, split_with_sizes_copy) \
1416
+ _(aten, sqrt) \
1417
+ _(aten, sqrt_) \
1418
+ _(aten, square) \
1419
+ _(aten, square_) \
1420
+ _(aten, squeeze) \
1421
+ _(aten, squeeze_) \
1422
+ _(aten, squeeze_copy) \
1423
+ _(aten, sspaddmm) \
1424
+ _(aten, stack) \
1425
+ _(aten, std) \
1426
+ _(aten, std_mean) \
1427
+ _(aten, stft) \
1428
+ _(aten, stride) \
1429
+ _(aten, sub) \
1430
+ _(aten, sub_) \
1431
+ _(aten, subtract) \
1432
+ _(aten, subtract_) \
1433
+ _(aten, sum) \
1434
+ _(aten, sum_to_size) \
1435
+ _(aten, svd) \
1436
+ _(aten, swapaxes) \
1437
+ _(aten, swapaxes_) \
1438
+ _(aten, swapdims) \
1439
+ _(aten, swapdims_) \
1440
+ _(aten, sym_constrain_range) \
1441
+ _(aten, sym_constrain_range_for_size) \
1442
+ _(aten, sym_numel) \
1443
+ _(aten, sym_size) \
1444
+ _(aten, sym_storage_offset) \
1445
+ _(aten, sym_stride) \
1446
+ _(aten, t) \
1447
+ _(aten, t_) \
1448
+ _(aten, t_copy) \
1449
+ _(aten, take) \
1450
+ _(aten, take_along_dim) \
1451
+ _(aten, tan) \
1452
+ _(aten, tan_) \
1453
+ _(aten, tanh) \
1454
+ _(aten, tanh_) \
1455
+ _(aten, tanh_backward) \
1456
+ _(aten, tensor_split) \
1457
+ _(aten, tensordot) \
1458
+ _(aten, thnn_conv2d) \
1459
+ _(aten, threshold) \
1460
+ _(aten, threshold_) \
1461
+ _(aten, threshold_backward) \
1462
+ _(aten, tile) \
1463
+ _(aten, to) \
1464
+ _(aten, to_dense) \
1465
+ _(aten, to_dense_backward) \
1466
+ _(aten, to_mkldnn) \
1467
+ _(aten, to_mkldnn_backward) \
1468
+ _(aten, to_padded_tensor) \
1469
+ _(aten, to_sparse) \
1470
+ _(aten, to_sparse_bsc) \
1471
+ _(aten, to_sparse_bsr) \
1472
+ _(aten, to_sparse_csc) \
1473
+ _(aten, to_sparse_csr) \
1474
+ _(aten, topk) \
1475
+ _(aten, trace) \
1476
+ _(aten, trace_backward) \
1477
+ _(aten, transpose) \
1478
+ _(aten, transpose_) \
1479
+ _(aten, transpose_copy) \
1480
+ _(aten, trapezoid) \
1481
+ _(aten, trapz) \
1482
+ _(aten, triangular_solve) \
1483
+ _(aten, tril) \
1484
+ _(aten, tril_) \
1485
+ _(aten, tril_indices) \
1486
+ _(aten, triplet_margin_loss) \
1487
+ _(aten, triu) \
1488
+ _(aten, triu_) \
1489
+ _(aten, triu_indices) \
1490
+ _(aten, true_divide) \
1491
+ _(aten, true_divide_) \
1492
+ _(aten, trunc) \
1493
+ _(aten, trunc_) \
1494
+ _(aten, type_as) \
1495
+ _(aten, unbind) \
1496
+ _(aten, unbind_copy) \
1497
+ _(aten, unflatten) \
1498
+ _(aten, unflatten_dense_tensors) \
1499
+ _(aten, unfold) \
1500
+ _(aten, unfold_backward) \
1501
+ _(aten, unfold_copy) \
1502
+ _(aten, uniform) \
1503
+ _(aten, uniform_) \
1504
+ _(aten, unique_consecutive) \
1505
+ _(aten, unique_dim) \
1506
+ _(aten, unique_dim_consecutive) \
1507
+ _(aten, unsafe_chunk) \
1508
+ _(aten, unsafe_split) \
1509
+ _(aten, unsafe_split_with_sizes) \
1510
+ _(aten, unsqueeze) \
1511
+ _(aten, unsqueeze_) \
1512
+ _(aten, unsqueeze_copy) \
1513
+ _(aten, upsample_bicubic2d) \
1514
+ _(aten, upsample_bicubic2d_backward) \
1515
+ _(aten, upsample_bilinear2d) \
1516
+ _(aten, upsample_bilinear2d_backward) \
1517
+ _(aten, upsample_linear1d) \
1518
+ _(aten, upsample_linear1d_backward) \
1519
+ _(aten, upsample_nearest1d) \
1520
+ _(aten, upsample_nearest1d_backward) \
1521
+ _(aten, upsample_nearest2d) \
1522
+ _(aten, upsample_nearest2d_backward) \
1523
+ _(aten, upsample_nearest3d) \
1524
+ _(aten, upsample_nearest3d_backward) \
1525
+ _(aten, upsample_trilinear3d) \
1526
+ _(aten, upsample_trilinear3d_backward) \
1527
+ _(aten, value_selecting_reduction_backward) \
1528
+ _(aten, values) \
1529
+ _(aten, values_copy) \
1530
+ _(aten, vander) \
1531
+ _(aten, var) \
1532
+ _(aten, var_mean) \
1533
+ _(aten, vdot) \
1534
+ _(aten, view) \
1535
+ _(aten, view_as) \
1536
+ _(aten, view_as_complex) \
1537
+ _(aten, view_as_complex_copy) \
1538
+ _(aten, view_as_real) \
1539
+ _(aten, view_as_real_copy) \
1540
+ _(aten, view_copy) \
1541
+ _(aten, vsplit) \
1542
+ _(aten, vstack) \
1543
+ _(aten, where) \
1544
+ _(aten, xlogy) \
1545
+ _(aten, xlogy_) \
1546
+ _(aten, zero) \
1547
+ _(aten, zero_) \
1548
+ _(aten, zeros) \
1549
+ _(aten, zeros_like)
1550
+
1551
+ #define FORALL_ATTR_BASE_SYMBOLS(_) \
1552
+ _(attr, A) \
1553
+ _(attr, B) \
1554
+ _(attr, C) \
1555
+ _(attr, H) \
1556
+ _(attr, HxW) \
1557
+ _(attr, K) \
1558
+ _(attr, L) \
1559
+ _(attr, LD) \
1560
+ _(attr, LU) \
1561
+ _(attr, LU_data) \
1562
+ _(attr, LU_pivots) \
1563
+ _(attr, M) \
1564
+ _(attr, N) \
1565
+ _(attr, P) \
1566
+ _(attr, Q) \
1567
+ _(attr, R) \
1568
+ _(attr, S) \
1569
+ _(attr, U) \
1570
+ _(attr, UPLO) \
1571
+ _(attr, V) \
1572
+ _(attr, Vh) \
1573
+ _(attr, W) \
1574
+ _(attr, X) \
1575
+ _(attr, a) \
1576
+ _(attr, abs) \
1577
+ _(attr, accumulate) \
1578
+ _(attr, accumulate_matches) \
1579
+ _(attr, activation) \
1580
+ _(attr, addends) \
1581
+ _(attr, adjoint) \
1582
+ _(attr, alg_id) \
1583
+ _(attr, align_corners) \
1584
+ _(attr, allow_tf32) \
1585
+ _(attr, alpha) \
1586
+ _(attr, amsgrad) \
1587
+ _(attr, anchor) \
1588
+ _(attr, angle) \
1589
+ _(attr, any) \
1590
+ _(attr, api_name) \
1591
+ _(attr, append) \
1592
+ _(attr, approximate) \
1593
+ _(attr, arg1) \
1594
+ _(attr, arg2) \
1595
+ _(attr, arg3) \
1596
+ _(attr, arg_out) \
1597
+ _(attr, assert_msg) \
1598
+ _(attr, assume_unique) \
1599
+ _(attr, atol) \
1600
+ _(attr, attn_bias) \
1601
+ _(attr, attn_mask) \
1602
+ _(attr, average_attn_weights) \
1603
+ _(attr, averaging_const) \
1604
+ _(attr, aweights) \
1605
+ _(attr, axis) \
1606
+ _(attr, axis0) \
1607
+ _(attr, axis1) \
1608
+ _(attr, b) \
1609
+ _(attr, b_hh) \
1610
+ _(attr, b_ih) \
1611
+ _(attr, bag_size) \
1612
+ _(attr, base) \
1613
+ _(attr, batch1) \
1614
+ _(attr, batch2) \
1615
+ _(attr, batch_dim) \
1616
+ _(attr, batch_first) \
1617
+ _(attr, batch_size) \
1618
+ _(attr, batch_sizes) \
1619
+ _(attr, benchmark) \
1620
+ _(attr, beta) \
1621
+ _(attr, beta1) \
1622
+ _(attr, beta2) \
1623
+ _(attr, bias) \
1624
+ _(attr, bias_defined) \
1625
+ _(attr, bias_g) \
1626
+ _(attr, bias_requires_grad) \
1627
+ _(attr, bias_sizes) \
1628
+ _(attr, bidirectional) \
1629
+ _(attr, bin_edges) \
1630
+ _(attr, bins) \
1631
+ _(attr, bit_width) \
1632
+ _(attr, blank) \
1633
+ _(attr, blocksize) \
1634
+ _(attr, boundaries) \
1635
+ _(attr, buffer) \
1636
+ _(attr, causal_diagonal) \
1637
+ _(attr, ccol_indices) \
1638
+ _(attr, cdim) \
1639
+ _(attr, cdist) \
1640
+ _(attr, ceil_mode) \
1641
+ _(attr, cell_state_fwd) \
1642
+ _(attr, center) \
1643
+ _(attr, ch_axis) \
1644
+ _(attr, check_errors) \
1645
+ _(attr, chunks) \
1646
+ _(attr, coalesced) \
1647
+ _(attr, coefficients) \
1648
+ _(attr, col) \
1649
+ _(attr, col_indices) \
1650
+ _(attr, col_offsets) \
1651
+ _(attr, col_offsets_hh) \
1652
+ _(attr, col_offsets_ih) \
1653
+ _(attr, compressed_A) \
1654
+ _(attr, compressed_idx) \
1655
+ _(attr, compressed_indices) \
1656
+ _(attr, compressed_indices_dtype) \
1657
+ _(attr, compute_log_sumexp) \
1658
+ _(attr, compute_mode) \
1659
+ _(attr, compute_uv) \
1660
+ _(attr, compute_v) \
1661
+ _(attr, condition) \
1662
+ _(attr, copy) \
1663
+ _(attr, correction) \
1664
+ _(attr, count) \
1665
+ _(attr, count_include_pad) \
1666
+ _(attr, counts) \
1667
+ _(attr, cpu_dtype) \
1668
+ _(attr, cpu_enabled) \
1669
+ _(attr, cpu_nested_shape_example) \
1670
+ _(attr, create_graph) \
1671
+ _(attr, crow_indices) \
1672
+ _(attr, cu_seqlens_k) \
1673
+ _(attr, cu_seqlens_q) \
1674
+ _(attr, cuda_dtype) \
1675
+ _(attr, cuda_enabled) \
1676
+ _(attr, cudnn_enable) \
1677
+ _(attr, cudnn_enabled) \
1678
+ _(attr, cum_seq_k) \
1679
+ _(attr, cum_seq_q) \
1680
+ _(attr, custom_mask_type) \
1681
+ _(attr, cx) \
1682
+ _(attr, cx_) \
1683
+ _(attr, cx_tmp) \
1684
+ _(attr, cy) \
1685
+ _(attr, cy_) \
1686
+ _(attr, d) \
1687
+ _(attr, dampening) \
1688
+ _(attr, data) \
1689
+ _(attr, decimals) \
1690
+ _(attr, delta) \
1691
+ _(attr, dense) \
1692
+ _(attr, dense_B) \
1693
+ _(attr, dense_dim) \
1694
+ _(attr, density) \
1695
+ _(attr, dep_token) \
1696
+ _(attr, descending) \
1697
+ _(attr, destination) \
1698
+ _(attr, deterministic) \
1699
+ _(attr, device) \
1700
+ _(attr, device_index) \
1701
+ _(attr, dgrad_glu) \
1702
+ _(attr, diagonal) \
1703
+ _(attr, diagonals) \
1704
+ _(attr, dilation) \
1705
+ _(attr, dim) \
1706
+ _(attr, dim0) \
1707
+ _(attr, dim1) \
1708
+ _(attr, dim2) \
1709
+ _(attr, dimension) \
1710
+ _(attr, dims) \
1711
+ _(attr, dims_other) \
1712
+ _(attr, dims_self) \
1713
+ _(attr, divisor_override) \
1714
+ _(attr, downscale_factor) \
1715
+ _(attr, driver) \
1716
+ _(attr, dropout) \
1717
+ _(attr, dropout_mask) \
1718
+ _(attr, dropout_p) \
1719
+ _(attr, dropout_seed) \
1720
+ _(attr, dropout_state) \
1721
+ _(attr, dst) \
1722
+ _(attr, dtype) \
1723
+ _(attr, dual) \
1724
+ _(attr, dummy) \
1725
+ _(attr, dx) \
1726
+ _(attr, edge_order) \
1727
+ _(attr, eigenvalues) \
1728
+ _(attr, eigenvectors) \
1729
+ _(attr, eigvals) \
1730
+ _(attr, eigvecs) \
1731
+ _(attr, element) \
1732
+ _(attr, elements) \
1733
+ _(attr, ellipsis_idx) \
1734
+ _(attr, embed_dim) \
1735
+ _(attr, end) \
1736
+ _(attr, end_dim) \
1737
+ _(attr, eps) \
1738
+ _(attr, epsilon) \
1739
+ _(attr, equal_nan) \
1740
+ _(attr, equation) \
1741
+ _(attr, exp_avg_sqs) \
1742
+ _(attr, exp_avgs) \
1743
+ _(attr, expand1) \
1744
+ _(attr, expand2) \
1745
+ _(attr, expand3) \
1746
+ _(attr, exponent) \
1747
+ _(attr, exponential_average_factor) \
1748
+ _(attr, fake_quant_enabled) \
1749
+ _(attr, fake_quant_on) \
1750
+ _(attr, ffn_bias_1) \
1751
+ _(attr, ffn_bias_2) \
1752
+ _(attr, ffn_weight_1) \
1753
+ _(attr, ffn_weight_2) \
1754
+ _(attr, filename) \
1755
+ _(attr, fill_value) \
1756
+ _(attr, flat) \
1757
+ _(attr, forward) \
1758
+ _(attr, found_inf) \
1759
+ _(attr, from) \
1760
+ _(attr, full) \
1761
+ _(attr, full_matrices) \
1762
+ _(attr, fuse_transform_0213) \
1763
+ _(attr, fweights) \
1764
+ _(attr, g) \
1765
+ _(attr, gO) \
1766
+ _(attr, generator) \
1767
+ _(attr, ggI) \
1768
+ _(attr, ggW) \
1769
+ _(attr, ggb) \
1770
+ _(attr, glu) \
1771
+ _(attr, grad) \
1772
+ _(attr, grad_bias) \
1773
+ _(attr, grad_cy) \
1774
+ _(attr, grad_factor) \
1775
+ _(attr, grad_glu) \
1776
+ _(attr, grad_hy) \
1777
+ _(attr, grad_in) \
1778
+ _(attr, grad_input) \
1779
+ _(attr, grad_input_mask) \
1780
+ _(attr, grad_out) \
1781
+ _(attr, grad_out_) \
1782
+ _(attr, grad_output) \
1783
+ _(attr, grad_scale) \
1784
+ _(attr, grad_w) \
1785
+ _(attr, grad_weight) \
1786
+ _(attr, grad_x) \
1787
+ _(attr, grad_y) \
1788
+ _(attr, gradient) \
1789
+ _(attr, grads) \
1790
+ _(attr, grid) \
1791
+ _(attr, group) \
1792
+ _(attr, groups) \
1793
+ _(attr, growth_interval) \
1794
+ _(attr, growth_tracker) \
1795
+ _(attr, half_to_float) \
1796
+ _(attr, has_bias) \
1797
+ _(attr, has_biases) \
1798
+ _(attr, hermitian) \
1799
+ _(attr, hidden_bias) \
1800
+ _(attr, hidden_gates) \
1801
+ _(attr, hidden_size) \
1802
+ _(attr, high) \
1803
+ _(attr, hist) \
1804
+ _(attr, hop_length) \
1805
+ _(attr, hx) \
1806
+ _(attr, hx_) \
1807
+ _(attr, hy_) \
1808
+ _(attr, i1) \
1809
+ _(attr, i2) \
1810
+ _(attr, i3) \
1811
+ _(attr, ignore_index) \
1812
+ _(attr, imag) \
1813
+ _(attr, impl_index) \
1814
+ _(attr, implicit) \
1815
+ _(attr, include_last_offset) \
1816
+ _(attr, include_self) \
1817
+ _(attr, increasing) \
1818
+ _(attr, ind) \
1819
+ _(attr, index) \
1820
+ _(attr, indexing) \
1821
+ _(attr, indices) \
1822
+ _(attr, info) \
1823
+ _(attr, initial) \
1824
+ _(attr, innerKTiles) \
1825
+ _(attr, input) \
1826
+ _(attr, input1) \
1827
+ _(attr, input2) \
1828
+ _(attr, input3) \
1829
+ _(attr, input_bias) \
1830
+ _(attr, input_dtype) \
1831
+ _(attr, input_g) \
1832
+ _(attr, input_gates) \
1833
+ _(attr, input_lengths) \
1834
+ _(attr, input_scale) \
1835
+ _(attr, input_size) \
1836
+ _(attr, input_sizes) \
1837
+ _(attr, inputs) \
1838
+ _(attr, interpolation) \
1839
+ _(attr, interpolation_mode) \
1840
+ _(attr, inv_scale) \
1841
+ _(attr, inverse) \
1842
+ _(attr, invert) \
1843
+ _(attr, invstd) \
1844
+ _(attr, is_causal) \
1845
+ _(attr, is_coalesced) \
1846
+ _(attr, is_crow) \
1847
+ _(attr, is_first_step) \
1848
+ _(attr, is_matrix) \
1849
+ _(attr, is_result) \
1850
+ _(attr, is_target) \
1851
+ _(attr, k) \
1852
+ _(attr, keepdim) \
1853
+ _(attr, kernel_size) \
1854
+ _(attr, key) \
1855
+ _(attr, label_smoothing) \
1856
+ _(attr, lambd) \
1857
+ _(attr, largest) \
1858
+ _(attr, last_dim_size) \
1859
+ _(attr, layersOutputs) \
1860
+ _(attr, layout) \
1861
+ _(attr, left) \
1862
+ _(attr, length) \
1863
+ _(attr, lengths) \
1864
+ _(attr, level) \
1865
+ _(attr, like) \
1866
+ _(attr, list) \
1867
+ _(attr, log_alpha) \
1868
+ _(attr, log_input) \
1869
+ _(attr, log_probs) \
1870
+ _(attr, log_target) \
1871
+ _(attr, logabsdet) \
1872
+ _(attr, logsumexp) \
1873
+ _(attr, low) \
1874
+ _(attr, lower) \
1875
+ _(attr, lr) \
1876
+ _(attr, ltm) \
1877
+ _(attr, m) \
1878
+ _(attr, mantissa) \
1879
+ _(attr, margin) \
1880
+ _(attr, mask) \
1881
+ _(attr, mask_check) \
1882
+ _(attr, mask_type) \
1883
+ _(attr, masked_grad) \
1884
+ _(attr, mat) \
1885
+ _(attr, mat1) \
1886
+ _(attr, mat2) \
1887
+ _(attr, matrices) \
1888
+ _(attr, max) \
1889
+ _(attr, max_exp_avg_sqs) \
1890
+ _(attr, max_k) \
1891
+ _(attr, max_norm) \
1892
+ _(attr, max_q) \
1893
+ _(attr, max_seqlen_k) \
1894
+ _(attr, max_seqlen_q) \
1895
+ _(attr, max_size) \
1896
+ _(attr, max_val) \
1897
+ _(attr, max_values) \
1898
+ _(attr, maximize) \
1899
+ _(attr, maximum_indices) \
1900
+ _(attr, maxnorm) \
1901
+ _(attr, mean) \
1902
+ _(attr, median) \
1903
+ _(attr, memory_format) \
1904
+ _(attr, meta) \
1905
+ _(attr, min) \
1906
+ _(attr, min_indices) \
1907
+ _(attr, min_val) \
1908
+ _(attr, minlength) \
1909
+ _(attr, mode) \
1910
+ _(attr, momentum) \
1911
+ _(attr, momentum_buffer_list) \
1912
+ _(attr, n) \
1913
+ _(attr, n_bins) \
1914
+ _(attr, n_fft) \
1915
+ _(attr, names) \
1916
+ _(attr, nan) \
1917
+ _(attr, need_weights) \
1918
+ _(attr, neg_log_likelihood) \
1919
+ _(attr, negative) \
1920
+ _(attr, negative_slope) \
1921
+ _(attr, neginf) \
1922
+ _(attr, nested_size) \
1923
+ _(attr, nested_strides) \
1924
+ _(attr, nesterov) \
1925
+ _(attr, new_data) \
1926
+ _(attr, nnz) \
1927
+ _(attr, noise) \
1928
+ _(attr, non_blocking) \
1929
+ _(attr, norm) \
1930
+ _(attr, norm_bias_1) \
1931
+ _(attr, norm_bias_2) \
1932
+ _(attr, norm_first) \
1933
+ _(attr, norm_type) \
1934
+ _(attr, norm_weight_1) \
1935
+ _(attr, norm_weight_2) \
1936
+ _(attr, normalization) \
1937
+ _(attr, normalized) \
1938
+ _(attr, normalized_shape) \
1939
+ _(attr, nt_example) \
1940
+ _(attr, num_chunks) \
1941
+ _(attr, num_classes) \
1942
+ _(attr, num_generated) \
1943
+ _(attr, num_groups) \
1944
+ _(attr, num_head) \
1945
+ _(attr, num_heads) \
1946
+ _(attr, num_layers) \
1947
+ _(attr, num_parallel) \
1948
+ _(attr, num_samples) \
1949
+ _(attr, num_splits_key) \
1950
+ _(attr, num_weights) \
1951
+ _(attr, numel) \
1952
+ _(attr, observer_on) \
1953
+ _(attr, offset) \
1954
+ _(attr, offset2bag) \
1955
+ _(attr, offsets) \
1956
+ _(attr, onesided) \
1957
+ _(attr, ord) \
1958
+ _(attr, order) \
1959
+ _(attr, other) \
1960
+ _(attr, out) \
1961
+ _(attr, out0) \
1962
+ _(attr, out1) \
1963
+ _(attr, out2) \
1964
+ _(attr, out3) \
1965
+ _(attr, out4) \
1966
+ _(attr, out5) \
1967
+ _(attr, out6) \
1968
+ _(attr, out_amax) \
1969
+ _(attr, out_dim) \
1970
+ _(attr, out_dtype) \
1971
+ _(attr, out_int32) \
1972
+ _(attr, outdim) \
1973
+ _(attr, output) \
1974
+ _(attr, output_mask) \
1975
+ _(attr, output_padding) \
1976
+ _(attr, output_scale) \
1977
+ _(attr, output_size) \
1978
+ _(attr, output_zero_point) \
1979
+ _(attr, p) \
1980
+ _(attr, packed) \
1981
+ _(attr, packed_hh) \
1982
+ _(attr, packed_ih) \
1983
+ _(attr, packed_weight) \
1984
+ _(attr, pad) \
1985
+ _(attr, pad_mode) \
1986
+ _(attr, padded) \
1987
+ _(attr, padding) \
1988
+ _(attr, padding_idx) \
1989
+ _(attr, padding_mode) \
1990
+ _(attr, padding_value) \
1991
+ _(attr, params) \
1992
+ _(attr, path) \
1993
+ _(attr, pdist) \
1994
+ _(attr, per_row_fake_quant) \
1995
+ _(attr, per_sample_weights) \
1996
+ _(attr, periodic) \
1997
+ _(attr, philox_offset) \
1998
+ _(attr, philox_seed) \
1999
+ _(attr, physical_layout) \
2000
+ _(attr, pin_memory) \
2001
+ _(attr, pivot) \
2002
+ _(attr, pivots) \
2003
+ _(attr, plain_idx) \
2004
+ _(attr, plain_indices) \
2005
+ _(attr, pos_weight) \
2006
+ _(attr, posinf) \
2007
+ _(attr, positive) \
2008
+ _(attr, pow) \
2009
+ _(attr, prepend) \
2010
+ _(attr, primal) \
2011
+ _(attr, prob) \
2012
+ _(attr, proj_bias) \
2013
+ _(attr, proj_size) \
2014
+ _(attr, proj_weight) \
2015
+ _(attr, q) \
2016
+ _(attr, qGroupSize) \
2017
+ _(attr, qScaleAndZeros) \
2018
+ _(attr, qkv) \
2019
+ _(attr, qkv_bias) \
2020
+ _(attr, qkv_weight) \
2021
+ _(attr, qtensor) \
2022
+ _(attr, quant_max) \
2023
+ _(attr, quant_min) \
2024
+ _(attr, quasi) \
2025
+ _(attr, query) \
2026
+ _(attr, r) \
2027
+ _(attr, ragged_idx) \
2028
+ _(attr, random_samples) \
2029
+ _(attr, range) \
2030
+ _(attr, rank) \
2031
+ _(attr, ratio) \
2032
+ _(attr, rcond) \
2033
+ _(attr, real) \
2034
+ _(attr, reduce) \
2035
+ _(attr, reduce_range) \
2036
+ _(attr, reduction) \
2037
+ _(attr, repeats) \
2038
+ _(attr, replacement) \
2039
+ _(attr, requires_grad) \
2040
+ _(attr, reserve) \
2041
+ _(attr, reserveSpace) \
2042
+ _(attr, reservedSpace) \
2043
+ _(attr, residuals) \
2044
+ _(attr, result) \
2045
+ _(attr, retain_graph) \
2046
+ _(attr, return_complex) \
2047
+ _(attr, return_counts) \
2048
+ _(attr, return_debug_mask) \
2049
+ _(attr, return_inverse) \
2050
+ _(attr, reverse) \
2051
+ _(attr, right) \
2052
+ _(attr, rounding_mode) \
2053
+ _(attr, row) \
2054
+ _(attr, row_indices) \
2055
+ _(attr, rstd) \
2056
+ _(attr, rtol) \
2057
+ _(attr, running_max) \
2058
+ _(attr, running_mean) \
2059
+ _(attr, running_min) \
2060
+ _(attr, running_var) \
2061
+ _(attr, s) \
2062
+ _(attr, save_invstd) \
2063
+ _(attr, save_mean) \
2064
+ _(attr, save_var) \
2065
+ _(attr, save_var_transform) \
2066
+ _(attr, saved_g) \
2067
+ _(attr, saved_norms) \
2068
+ _(attr, saved_v) \
2069
+ _(attr, scalar) \
2070
+ _(attr, scalar1) \
2071
+ _(attr, scalar2) \
2072
+ _(attr, scalars) \
2073
+ _(attr, scale) \
2074
+ _(attr, scale_a) \
2075
+ _(attr, scale_b) \
2076
+ _(attr, scale_backoff_factor) \
2077
+ _(attr, scale_factors) \
2078
+ _(attr, scale_grad_by_freq) \
2079
+ _(attr, scale_growth_factor) \
2080
+ _(attr, scale_hh) \
2081
+ _(attr, scale_ih) \
2082
+ _(attr, scale_result) \
2083
+ _(attr, scales) \
2084
+ _(attr, scales_d) \
2085
+ _(attr, scales_h) \
2086
+ _(attr, scales_w) \
2087
+ _(attr, sections) \
2088
+ _(attr, seed) \
2089
+ _(attr, self) \
2090
+ _(attr, self_is_result) \
2091
+ _(attr, self_num_batch_dims) \
2092
+ _(attr, self_or_result) \
2093
+ _(attr, self_sizes) \
2094
+ _(attr, seqlen_k) \
2095
+ _(attr, sequences) \
2096
+ _(attr, shape) \
2097
+ _(attr, shared) \
2098
+ _(attr, shifts) \
2099
+ _(attr, side) \
2100
+ _(attr, sigma) \
2101
+ _(attr, sign) \
2102
+ _(attr, singular_values) \
2103
+ _(attr, size) \
2104
+ _(attr, sizes) \
2105
+ _(attr, skip_first) \
2106
+ _(attr, sobolstate) \
2107
+ _(attr, solution) \
2108
+ _(attr, some) \
2109
+ _(attr, sorted) \
2110
+ _(attr, sorted_sequence) \
2111
+ _(attr, sorter) \
2112
+ _(attr, source) \
2113
+ _(attr, spacing) \
2114
+ _(attr, sparse) \
2115
+ _(attr, sparse_dim) \
2116
+ _(attr, sparse_grad) \
2117
+ _(attr, split_size) \
2118
+ _(attr, split_sizes) \
2119
+ _(attr, src) \
2120
+ _(attr, stable) \
2121
+ _(attr, start) \
2122
+ _(attr, start_dim) \
2123
+ _(attr, state_steps) \
2124
+ _(attr, std) \
2125
+ _(attr, step) \
2126
+ _(attr, steps) \
2127
+ _(attr, storage_offset) \
2128
+ _(attr, stride) \
2129
+ _(attr, sum_dy) \
2130
+ _(attr, sum_dy_xmu) \
2131
+ _(attr, sumdim) \
2132
+ _(attr, swap) \
2133
+ _(attr, symmetric_quant) \
2134
+ _(attr, t) \
2135
+ _(attr, tangent) \
2136
+ _(attr, target) \
2137
+ _(attr, target_lengths) \
2138
+ _(attr, targets) \
2139
+ _(attr, tau) \
2140
+ _(attr, tensor) \
2141
+ _(attr, tensor1) \
2142
+ _(attr, tensor2) \
2143
+ _(attr, tensor_indices_or_sections) \
2144
+ _(attr, tensors) \
2145
+ _(attr, tensors1) \
2146
+ _(attr, test_element) \
2147
+ _(attr, test_elements) \
2148
+ _(attr, the_template) \
2149
+ _(attr, theta) \
2150
+ _(attr, threshold) \
2151
+ _(attr, to) \
2152
+ _(attr, tol) \
2153
+ _(attr, total) \
2154
+ _(attr, total_length) \
2155
+ _(attr, total_weight) \
2156
+ _(attr, train) \
2157
+ _(attr, training) \
2158
+ _(attr, transpose) \
2159
+ _(attr, transpose_result) \
2160
+ _(attr, transposed) \
2161
+ _(attr, type1) \
2162
+ _(attr, type2) \
2163
+ _(attr, unbiased) \
2164
+ _(attr, unitriangular) \
2165
+ _(attr, unpack_data) \
2166
+ _(attr, unpack_pivots) \
2167
+ _(attr, unroll_dim) \
2168
+ _(attr, unsafe) \
2169
+ _(attr, upper) \
2170
+ _(attr, upscale_factor) \
2171
+ _(attr, use_fast_accum) \
2172
+ _(attr, use_gelu) \
2173
+ _(attr, use_input_stats) \
2174
+ _(attr, v) \
2175
+ _(attr, value) \
2176
+ _(attr, values) \
2177
+ _(attr, var) \
2178
+ _(attr, vec) \
2179
+ _(attr, vec1) \
2180
+ _(attr, vec2) \
2181
+ _(attr, w_hh) \
2182
+ _(attr, w_ih) \
2183
+ _(attr, weight) \
2184
+ _(attr, weight0) \
2185
+ _(attr, weight1) \
2186
+ _(attr, weight2) \
2187
+ _(attr, weight3) \
2188
+ _(attr, weight4) \
2189
+ _(attr, weight_arr) \
2190
+ _(attr, weight_buf) \
2191
+ _(attr, weight_decay) \
2192
+ _(attr, weight_g) \
2193
+ _(attr, weight_scale) \
2194
+ _(attr, weight_stride0) \
2195
+ _(attr, weight_zero_point) \
2196
+ _(attr, weights) \
2197
+ _(attr, win_length) \
2198
+ _(attr, window) \
2199
+ _(attr, window_length) \
2200
+ _(attr, with_replacement) \
2201
+ _(attr, workspace) \
2202
+ _(attr, wrap) \
2203
+ _(attr, x) \
2204
+ _(attr, x1) \
2205
+ _(attr, x2) \
2206
+ _(attr, y) \
2207
+ _(attr, z) \
2208
+ _(attr, z_state) \
2209
+ _(attr, zero_infinity) \
2210
+ _(attr, zero_point) \
2211
+ _(attr, zero_point_hh) \
2212
+ _(attr, zero_point_ih) \
2213
+ _(attr, zero_points)
venv/lib/python3.10/site-packages/torch/include/ATen/core/blob.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <sstream>
5
+ #include <type_traits>
6
+ #include <typeinfo>
7
+ #include <vector>
8
+
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <c10/util/typeid.h>
11
+ #include <c10/macros/Macros.h>
12
+
13
+ namespace caffe2 {
14
+
15
+ class Tensor;
16
+
17
+ /**
18
+ * @brief Blob is a general container that hosts a typed pointer.
19
+ *
20
+ * A Blob hosts a pointer as well as its type, and takes charge of deleting it
21
+ * properly when the blob is deallocated or re-allocated with a new type. A blob
22
+ * could contain anything, although the most common case is to contain a Tensor.
23
+ */
24
+ class TORCH_API Blob final : public c10::intrusive_ptr_target {
25
+ public:
26
+ /**
27
+ * Initializes an empty Blob.
28
+ */
29
+ Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {}
30
+ ~Blob() override {
31
+ Reset();
32
+ }
33
+
34
+ Blob(Blob&& other) noexcept : Blob() {
35
+ swap(other);
36
+ }
37
+
38
+ Blob& operator=(Blob&& other) noexcept {
39
+ Blob(std::move(other)).swap(*this);
40
+ return *this;
41
+ }
42
+
43
+ /**
44
+ * Checks if the content stored in the blob is of type T.
45
+ */
46
+ template <class T>
47
+ bool IsType() const noexcept {
48
+ return meta_.Match<T>();
49
+ }
50
+
51
+ /**
52
+ * Returns the meta info of the blob.
53
+ */
54
+ const TypeMeta meta() const noexcept {
55
+ return meta_;
56
+ }
57
+
58
+ /**
59
+ * Returns a printable typename of the blob.
60
+ */
61
+ c10::string_view TypeName() const noexcept {
62
+ return meta_.name();
63
+ }
64
+
65
+ /**
66
+ * @brief Gets the const reference of the stored object. The code checks if
67
+ * the stored object is of the desired type.
68
+ */
69
+ // TODO(jerryzh): add a Get(c10::DeviceType) function?
70
+ template <class T>
71
+ const T& Get() const {
72
+ TORCH_INTERNAL_ASSERT(
73
+ IsType<T>(),
74
+ "wrong type for the Blob instance. Blob contains ",
75
+ meta_.name(),
76
+ " while caller expects ",
77
+ TypeMeta::TypeName<T>());
78
+ // TODO: after we add Get<Tensor>(c10::DeviceType)
79
+ // and changed all the callsites, we can add
80
+ // a static assert here to enforce T != Tensor
81
+ return *static_cast<const T*>(pointer_);
82
+ }
83
+
84
+ const void* GetRaw() const noexcept {
85
+ return pointer_;
86
+ }
87
+ void* GetRaw() noexcept {
88
+ return pointer_;
89
+ }
90
+
91
+ /**
92
+ * @brief Gets a mutable pointer to the stored object.
93
+ *
94
+ * If the current object is not of the right type, a new object is created
95
+ * and the old object is freed. Note that type T should have a default
96
+ * constructor. Otherwise, create the object yourself first, and use
97
+ * Reset().
98
+ */
99
+ template <class T>
100
+ T* GetMutable() {
101
+ static_assert(
102
+ std::is_default_constructible<T>::value,
103
+ "GetMutable can't be called with non-default-constructible types. "
104
+ "Try using specialized methods");
105
+ if (IsType<T>()) {
106
+ return static_cast<T*>(pointer_);
107
+ } else {
108
+ // TODO Re-enable logging
109
+ // VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>();
110
+ return Reset<T>(new T());
111
+ }
112
+ }
113
+
114
+ template <class T>
115
+ T* GetMutableOrNull() {
116
+ if (IsType<T>()) {
117
+ return static_cast<T*>(pointer_);
118
+ } else {
119
+ return nullptr;
120
+ }
121
+ }
122
+
123
+ /**
124
+ * Sets the underlying object to the allocated one. The Blob then takes over
125
+ * the ownership of the passed in pointer. If there is already an object in
126
+ * the Blob, the old object is freed.
127
+ *
128
+ * This is used when the underlying class T does not have a default ctor, or
129
+ * complex initializations needs to be done outside the blob.
130
+ */
131
+ template <class T>
132
+ T* Reset(T* allocated) {
133
+ free_();
134
+ meta_ = TypeMeta::Make<T>();
135
+ pointer_ = static_cast<void*>(allocated);
136
+ has_ownership_ = true;
137
+ return allocated;
138
+ }
139
+
140
+ /**
141
+ * Sets the underlying object to the allocated one, but does not take over
142
+ * the ownership of the passed in pointer. If there is already an object in
143
+ * the Blob, the old object is freed.
144
+ *
145
+ * Unlike Reset, this does not take over the ownership of the pointer and the
146
+ * caller is responsible for making sure that the lifetime of the allocated
147
+ * blob outlasts the lifetime of any access to this blob, until another Reset
148
+ * call is made or the blob is destructed.
149
+ */
150
+ template <class T>
151
+ typename std::remove_const<T>::type* ShareExternal(
152
+ typename std::remove_const<T>::type* allocated) {
153
+ return static_cast<T*>(ShareExternal(
154
+ static_cast<void*>(allocated),
155
+ TypeMeta::Make<typename std::remove_const<T>::type>()));
156
+ }
157
+
158
+ void* ShareExternal(void* allocated, const TypeMeta meta) {
159
+ free_();
160
+ meta_ = meta;
161
+ pointer_ = allocated;
162
+ has_ownership_ = false;
163
+ return allocated;
164
+ }
165
+
166
+ /**
167
+ * Resets the Blob to an empty one.
168
+ */
169
+ void Reset() {
170
+ free_();
171
+ pointer_ = nullptr;
172
+ meta_ = TypeMeta();
173
+ has_ownership_ = false;
174
+ }
175
+
176
+ /**
177
+ * @brief Swaps the underlying storage of two blobs.
178
+ */
179
+ void swap(Blob& rhs) {
180
+ using std::swap;
181
+ swap(meta_, rhs.meta_);
182
+ swap(pointer_, rhs.pointer_);
183
+ swap(has_ownership_, rhs.has_ownership_);
184
+ }
185
+
186
+ private:
187
+ void free_() {
188
+ if (has_ownership_ && pointer_ != nullptr) {
189
+ (*meta_.deleteFn())(pointer_);
190
+ }
191
+ }
192
+
193
+ TypeMeta meta_;
194
+ void* pointer_;
195
+ bool has_ownership_;
196
+
197
+ C10_DISABLE_COPY_AND_ASSIGN(Blob);
198
+ };
199
+
200
+ inline void swap(Blob& lhs, Blob& rhs) {
201
+ lhs.swap(rhs);
202
+ }
203
+
204
+ inline std::ostream& operator<<(std::ostream& out, const Blob& v) {
205
+ return out << "Blob[" << v.TypeName() << "]";
206
+ }
207
+
208
+ } // namespace caffe2
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace c10 {
8
+
9
+ struct IValue;
10
+ using Stack = std::vector<IValue>;
11
+
12
+ class OperatorHandle;
13
+ class KernelFunction;
14
+
15
+ // This kernel implements the behavior of falling through to the next available
16
+ // registered dispatch key. The implementation of this function is FAST; it is
17
+ // no overhead to fallthrough to the next key. See cpp file for some more
18
+ // implementation notes; notably, this does NOT actually go through the
19
+ // boxing/unboxing codepath.
20
+ TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
21
+
22
+ // Note [Ambiguity in AutogradOther kernel]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // This error-reporting kernel is registered to the AutogradOther entry in the
25
+ // dispatch table when there is both a CompositeImplicitAutograd kernel and a
26
+ // backend kernel for ANY backend that maps to AutogradOther. To see why
27
+ // this is necessary in the AutogradOther case, it's helpful to first see
28
+ // why everything works out fine for a backend that has a reserved Autograd
29
+ // entry (see rule 2.2 in [Note] DispatchTable computation):
30
+ //
31
+ // CPU AutogradCPU
32
+ // reg? registers with...
33
+ // -------------------------------------------------
34
+ // y Autograd registration takes precedence
35
+ // over CompositeImplicitAutograd.
36
+ // This is good, because the CPU specific backend
37
+ // implementation is more specialized and typically better;
38
+ // if we used the composite, we would bypass it.
39
+ // (NB: the Autograd key is guaranteed to exist because
40
+ // the autograd codegen requires it!)
41
+ //
42
+ // n CompositeImplicitAutograd takes precedence.
43
+ // This is also good, because the Autograd
44
+ // registration (if it exists) would try to redispatch
45
+ // to the (non-existent) CPU implementation; by
46
+ // using the composite, we ensure the operator
47
+ // actually works.
48
+ //
49
+ // As you can see, when we have a specific Autograd key (AutogradCPU), we can
50
+ // decide whether or not to use the CompositeImplicitAutograd kernel or the
51
+ // Autograd kernel based on whether or not the backend kernel exists.
52
+ //
53
+ // However, for AutogradOther (which is the catchall autograd kernel for
54
+ // everything that doesn't have a specific Autograd key), we can't do this
55
+ // trick because there isn't any unique backend to peek at to disambiguate;
56
+ // if there are some backends that have implementations they prefer Autograd,
57
+ // but unimplemented backends would prefer CompositeImplicitAutograd. Rather
58
+ // than arbitrarily pick one or the other, we just register a kernel that raises
59
+ // an error and let the user decide how to proceed.
60
+ TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
61
+
62
+ // Note [named_not_supported_kernel]
63
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64
+ // This kernel implements reporting an error message saying that named tensor is
65
+ // not supported. This kernel doesn't rely on the Stack, and so it is special
66
+ // cased in the dispatcher to be triggered before we attempt boxing (so we can
67
+ // give a good error message in cases when boxing is not supported). When
68
+ // boxing is universally supported this can be removed.
69
+ [[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
70
+
71
+ /**
72
+ * BoxedKernel is similar to a std::function storing a boxed kernel.
73
+ */
74
+ class TORCH_API BoxedKernel final {
75
+ public:
76
+ // This is how boxed kernels are actually stored
77
+ //
78
+ // Note [Plumbing Keys Through The Dispatcher]
79
+ // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS)
80
+ // upon every dispatch call into order to compute which kernel to dispatch to.
81
+ //
82
+ // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores
83
+ // to have a first argument of type DispatchKeySet.
84
+ //
85
+ // What are the invariants of the DispatchKeySet when it gets passed to a kernel?
86
+ // - All keys to the left of the current dispatch key have been masked out.
87
+ // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer)
88
+ // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments
89
+ // are still in the set.
90
+ //
91
+ // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches:
92
+ // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will
93
+ // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher
94
+ // upon redispatching.
95
+ //
96
+ // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature
97
+ // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples.
98
+ //
99
+ // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h.
100
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
101
+ using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
102
+ // This is the public API for how boxed kernels are defined
103
+ using BoxedKernelFunction = void(const OperatorHandle&, Stack*);
104
+ using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*);
105
+
106
+ BoxedKernel();
107
+
108
+ // Fast path for dispatch to allow not touching the boxed kernel in
109
+ // the common case where unboxed is available.
110
+ bool isValid() const;
111
+ bool isFallthrough() const;
112
+
113
+ /**
114
+ * Call the function with boxed arguments.
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Create a KernelFunction from a boxed function.
120
+ *
121
+ * Example:
122
+ *
123
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
124
+ * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>();
125
+ */
126
+ template<BoxedKernelFunction* func>
127
+ static BoxedKernel makeFromFunction();
128
+
129
+ /**
130
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
131
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
132
+ */
133
+ template<BoxedKernelFunction_withDispatchKeys* func>
134
+ static BoxedKernel makeFromFunction();
135
+
136
+ /**
137
+ * Create a KernelFunction from a boxed functor.
138
+ *
139
+ * Example:
140
+ *
141
+ * > class MyFunctor final : public c10::OperatorKernel {
142
+ * > public:
143
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
144
+ * > };
145
+ * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>());
146
+ */
147
+ template<class KernelFunctor>
148
+ static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
149
+
150
+
151
+ static BoxedKernel makeFallthrough();
152
+ static BoxedKernel makeAmbiguousAutogradOther();
153
+ static BoxedKernel makeNamedNotSupported();
154
+
155
+ private:
156
+
157
+ friend class KernelFunction;
158
+
159
+ template<BoxedKernelFunction* func>
160
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
161
+
162
+ template<BoxedKernelFunction_withDispatchKeys* func>
163
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
164
+
165
+ explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func);
166
+
167
+ OperatorKernel* getFunctor() const;
168
+ InternalBoxedKernelFunction* getFnPtr() const;
169
+
170
+ c10::intrusive_ptr<OperatorKernel> functor_;
171
+ InternalBoxedKernelFunction* boxed_kernel_func_;
172
+ };
173
+
174
+ } // namespace c10
175
+
176
+ #include <ATen/core/boxing/BoxedKernel_impl.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ inline BoxedKernel::BoxedKernel()
6
+ : functor_()
7
+ , boxed_kernel_func_(nullptr)
8
+ {}
9
+
10
+ inline BoxedKernel::BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func)
11
+ : functor_(std::move(functor))
12
+ , boxed_kernel_func_(boxed_kernel_func)
13
+ {}
14
+
15
+ template<BoxedKernel::BoxedKernelFunction* func>
16
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) {
17
+ // Note that we're dropping the DispatchKeySet argument.
18
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
19
+ func(opHandle, stack);
20
+ }
21
+
22
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
23
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) {
24
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
25
+ func(opHandle, ks, stack);
26
+ }
27
+
28
+ inline bool BoxedKernel::isValid() const {
29
+ return boxed_kernel_func_ != nullptr;
30
+ }
31
+
32
+ inline bool BoxedKernel::isFallthrough() const {
33
+ return boxed_kernel_func_ == &fallthrough_kernel;
34
+ }
35
+
36
+ inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
37
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
38
+ boxed_kernel_func_ != nullptr,
39
+ "Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel."
40
+ );
41
+ (*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack);
42
+ }
43
+
44
+ template<BoxedKernel::BoxedKernelFunction* func>
45
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
46
+ return BoxedKernel(
47
+ nullptr, // no functor_ object
48
+ &make_boxed_function<func>
49
+ );
50
+ }
51
+
52
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
53
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
54
+ return BoxedKernel(
55
+ nullptr, // no functor_ object
56
+ &make_boxed_function<func>
57
+ );
58
+ }
59
+
60
+ inline BoxedKernel BoxedKernel::makeFallthrough() {
61
+ return BoxedKernel(
62
+ nullptr, // no functor_ object
63
+ &fallthrough_kernel
64
+ );
65
+ }
66
+
67
+ inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() {
68
+ return BoxedKernel(
69
+ nullptr, // no functor_ object
70
+ &ambiguous_autogradother_kernel
71
+ );
72
+ }
73
+
74
+ inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
75
+ return BoxedKernel(
76
+ nullptr, // no functor_ object
77
+ &named_not_supported_kernel
78
+ );
79
+ }
80
+
81
+ template<class KernelFunctor>
82
+ inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
83
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
84
+ return BoxedKernel(
85
+ std::move(kernelFunctor),
86
+ [](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {
87
+ (*static_cast<KernelFunctor*>(kernel))(op, ks, stack);
88
+ }
89
+ );
90
+ }
91
+
92
+ inline OperatorKernel* BoxedKernel::getFunctor() const {
93
+ return functor_.get();
94
+ }
95
+ inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const {
96
+ return boxed_kernel_func_;
97
+ }
98
+
99
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/core/boxing/BoxedKernel.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/core/DispatchKeySet.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <c10/util/TypeList.h>
9
+ #include <type_traits>
10
+
11
+ namespace c10 {
12
+
13
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
14
+
15
+ class OperatorHandle;
16
+ struct OperatorKernel;
17
+ class KernelFunction;
18
+
19
+ template <typename T>
20
+ using has_symint =
21
+ std::disjunction<
22
+ std::is_same<c10::SymInt, T>,
23
+ std::is_same<c10::SymIntArrayRef, T>,
24
+ std::is_same<at::OptionalSymIntArrayRef, T>,
25
+ std::is_same<c10::optional<c10::SymInt>, T>
26
+ >;
27
+
28
+ template <typename T>
29
+ struct remove_symint {
30
+ using type = T;
31
+ };
32
+
33
+ template <>
34
+ struct remove_symint<c10::SymInt> {
35
+ using type = int64_t;
36
+ };
37
+
38
+ template <>
39
+ struct remove_symint<at::OptionalSymIntArrayRef> {
40
+ using type = OptionalIntArrayRef;
41
+ };
42
+
43
+ template <>
44
+ struct remove_symint<c10::SymIntArrayRef> {
45
+ using type = c10::IntArrayRef;
46
+ };
47
+
48
+ template <>
49
+ struct remove_symint<c10::optional<c10::SymInt>> {
50
+ using type = c10::optional<int64_t>;
51
+ };
52
+
53
+
54
+ template <bool symint, typename T>
55
+ struct maybe_keep_symint final {};
56
+
57
+ template <typename T>
58
+ struct maybe_keep_symint<true, T> { using type = T; };
59
+
60
+ template <typename T>
61
+ struct maybe_keep_symint<false, T> { using type = typename remove_symint<T>::type; };
62
+
63
+ template <typename T>
64
+ using fn_has_symint = typename guts::typelist::true_for_any_type<
65
+ has_symint,
66
+ typename guts::infer_function_traits<T>::type::parameter_types
67
+ >;
68
+
69
+ template <typename T>
70
+ struct fn_remove_symint;
71
+
72
+ template <typename Ret, typename... Args>
73
+ struct fn_remove_symint<Ret(Args...)> {
74
+ using type = Ret(typename remove_symint<Args>::type...);
75
+ };
76
+
77
+ /**
78
+ * KernelFunction is similar to std::function but stores a kernel function.
79
+ * You can create a KernelFunction from a boxed or unboxed function/functor/lambda
80
+ * and call it in a boxed or unboxed way. If the way it was created doesn't
81
+ * match the way it was called, it will do boxing or unboxing as necessary.
82
+ */
83
+ class TORCH_API KernelFunction final {
84
+ public:
85
+ using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction;
86
+ using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction;
87
+ using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys;
88
+
89
+ KernelFunction();
90
+
91
+ // Fast path for dispatch to allow not touching the boxed kernel in
92
+ // the common case where unboxed is available.
93
+ bool isValidUnboxed() const;
94
+ bool isValidSymUnboxed() const;
95
+ bool isValid() const;
96
+ bool isFallthrough() const;
97
+
98
+ /**
99
+ * Call the function in a boxed way.
100
+ * If the kernel function was created with an unboxed function,
101
+ * this will call an unboxing wrapper which then calls into that
102
+ * unboxed function.
103
+ *
104
+ * Example:
105
+ *
106
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
107
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
108
+ * > Tensor result = func.callBoxed(stack);
109
+ *
110
+ * Or, with an unboxed implementation:
111
+ *
112
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
113
+ * > [] (Tensor a, bool b) -> Tensor {...});
114
+ * > Tensor result = func.callBoxed(stack);
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Call the function in an unboxed way.
120
+ * If the kernel function was created with a boxed function,
121
+ * this will box all inputs and then call into that boxed function.
122
+ *
123
+ * Note that this doesn't work for all types yet.
124
+ *
125
+ * Example:
126
+ *
127
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
128
+ * > [] (Tensor a, bool b) -> Tensor {...});
129
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
130
+ *
131
+ * Or, with a boxed implementation:
132
+ *
133
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
134
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
135
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
136
+ */
137
+ template<class Return, class... Args>
138
+ Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const;
139
+
140
+ /**
141
+ * Create a KernelFunction from a BoxedKernel.
142
+ */
143
+ static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn);
144
+
145
+ /**
146
+ * Create a KernelFunction from a boxed function.
147
+ *
148
+ * Example:
149
+ *
150
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
151
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>();
152
+ */
153
+ template<BoxedKernelFunction* func>
154
+ static KernelFunction makeFromBoxedFunction();
155
+
156
+ /**
157
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
158
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
159
+ */
160
+ template<BoxedKernelFunction_withDispatchKeys* func>
161
+ static KernelFunction makeFromBoxedFunction();
162
+
163
+ /**
164
+ * Create a KernelFunction from an unboxed functor.
165
+ *
166
+ * Example:
167
+ *
168
+ * > class MyFunctor final : public c10::OperatorKernel {
169
+ * > public:
170
+ * > Tensor operator()(Tensor a, Tensor b) {...}
171
+ * > };
172
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor<MyFunctor>(std::make_unique<MyFunctor>());
173
+ */
174
+ template<bool AllowLegacyTypes = false, class KernelFunctor>
175
+ static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
176
+
177
+ /**
178
+ * Create a KernelFunction from a boxed functor.
179
+ *
180
+ * Example:
181
+ *
182
+ * > class MyFunctor final : public c10::OperatorKernel {
183
+ * > public:
184
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
185
+ * > };
186
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique<MyFunctor>());
187
+ */
188
+ template<class KernelFunctor>
189
+ static KernelFunction makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
190
+
191
+ /**
192
+ * Create a KernelFunction from an unboxed function.
193
+ * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction
194
+ * because knowing the function pointer as a template argument (i.e. at
195
+ * compile time) allows the compiler to inline the function into its
196
+ * unboxing wrapper and yields better performance when calling the function.
197
+ *
198
+ * Example:
199
+ *
200
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
201
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunction<decltype(unboxed_func), &unboxed_func>();
202
+ */
203
+ template<class FuncPtr, bool AllowLegacyTypes = false>
204
+ static KernelFunction makeFromUnboxedFunction(FuncPtr);
205
+
206
+ /**
207
+ * Create a KernelFunction from an unboxed function.
208
+ * KernelFunction::makeFromUnboxedFunction is usually a better choice than
209
+ * this if you know the function pointer at compile time, see doc comment
210
+ * there for an explanation.
211
+ *
212
+ * Example:
213
+ *
214
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
215
+ * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func);
216
+ */
217
+ template<bool AllowLegacyTypes = false, class FuncType>
218
+ static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func);
219
+
220
+ static KernelFunction makeFallthrough();
221
+ static KernelFunction makeAmbiguousAutogradOther();
222
+ static KernelFunction makeNamedNotSupported();
223
+
224
+ /**
225
+ * Create a KernelFunction from an unboxed lambda.
226
+ *
227
+ * Example:
228
+ *
229
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
230
+ * > [] (Tensor a, bool b) -> Tensor {...});
231
+ */
232
+ template<bool AllowLegacyTypes = false, class Lambda>
233
+ static std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
234
+ template<bool AllowLegacyTypes = false, class Lambda>
235
+ static std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
236
+
237
+ std::string dumpState() const;
238
+ // For testing internal invariants only
239
+ bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
240
+
241
+ private:
242
+
243
+ explicit KernelFunction(
244
+ std::unique_ptr<OperatorKernel> functor,
245
+ InternalBoxedKernelFunction* boxed_kernel_func,
246
+ void* unboxed_kernel_func,
247
+ void* sym_unboxed_kernel_func);
248
+ explicit KernelFunction(
249
+ BoxedKernel boxed_fn,
250
+ void* unboxed_kernel_func,
251
+ void* sym_unboxed_kernel_func);
252
+
253
+ BoxedKernel boxed_kernel_func_;
254
+ void* unboxed_kernel_func_;
255
+ void* sym_unboxed_kernel_func_;
256
+ };
257
+
258
+ }
259
+
260
+ #include <ATen/core/boxing/KernelFunction_impl.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/boxing/impl/boxing.h>
2
+ #include <ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h>
3
+ #include <ATen/core/boxing/impl/WrapFunctionIntoFunctor.h>
4
+ #include <ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h>
5
+
6
+ #include <c10/util/C++17.h>
7
+ #include <type_traits>
8
+
9
+ namespace c10 {
10
+
11
+ inline KernelFunction::KernelFunction()
12
+ : boxed_kernel_func_()
13
+ , unboxed_kernel_func_(nullptr)
14
+ , sym_unboxed_kernel_func_(nullptr)
15
+ {}
16
+
17
+ inline KernelFunction::KernelFunction(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
18
+ : boxed_kernel_func_(std::move(functor), boxed_kernel_func)
19
+ , unboxed_kernel_func_(unboxed_kernel_func)
20
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
21
+ {}
22
+
23
+ inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
24
+ : boxed_kernel_func_(std::move(boxed_fn))
25
+ , unboxed_kernel_func_(unboxed_kernel_func)
26
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
27
+ {}
28
+
29
+ inline bool KernelFunction::isValidUnboxed() const {
30
+ return unboxed_kernel_func_ != nullptr;
31
+ }
32
+
33
+ inline bool KernelFunction::isValidSymUnboxed() const {
34
+ return sym_unboxed_kernel_func_ != nullptr;
35
+ }
36
+
37
+ inline bool KernelFunction::isValid() const {
38
+ return boxed_kernel_func_.isValid();
39
+ }
40
+
41
+ inline bool KernelFunction::isFallthrough() const {
42
+ return boxed_kernel_func_.isFallthrough();
43
+ }
44
+
45
+ inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
46
+ boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack);
47
+ }
48
+
49
+ template<class Return, class... Args>
50
+ inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) {
51
+ using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...);
52
+ ActualSignature* func = reinterpret_cast<ActualSignature*>(unboxed_kernel_func);
53
+ return (*func)(functor, dispatchKeySet, std::forward<Args>(args)...);
54
+ }
55
+
56
+ // This template requires you to explicitly specify the argument you want to
57
+ // forward; it doesn't work if you try to deduce it
58
+ // NB: keep this in sync with cloneWithRealTypes in function_schema.cpp
59
+
60
+ template <typename T>
61
+ inline typename remove_symint<T>::type unpackSymInt(T x) { return x; }
62
+
63
+ template <>
64
+ inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
65
+ return x.guard_int(__FILE__, __LINE__);
66
+ }
67
+
68
+ template <>
69
+ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIntArrayRef x) {
70
+ return C10_AS_INTARRAYREF_SLOW(x);
71
+ }
72
+
73
+ template <>
74
+ inline typename remove_symint<c10::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
75
+ return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
76
+ }
77
+
78
+ template <>
79
+ inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(at::OptionalSymIntArrayRef x) {
80
+ return x.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : c10::nullopt;
81
+ }
82
+
83
+ template<class Return, class... Args>
84
+ C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const {
85
+ // note: Args above is intentionally not Args&&. We don't want perfect
86
+ // forwarding, which would require Args to be deduced, but instead we
87
+ // want callers to explicitly specify the Args.
88
+
89
+ if constexpr (std::disjunction_v<has_symint<Args>...>) {
90
+ if (sym_unboxed_kernel_func_ != nullptr) {
91
+ auto *functor = boxed_kernel_func_.getFunctor();
92
+ return callUnboxedKernelFunction<Return, Args...>(
93
+ sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
94
+ }
95
+
96
+ if (unboxed_kernel_func_ != nullptr) {
97
+ auto *functor = boxed_kernel_func_.getFunctor();
98
+ return callUnboxedKernelFunction<Return, typename remove_symint<Args>::type...>(
99
+ unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt<Args>(args)...);
100
+ }
101
+ } else {
102
+ if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) {
103
+ auto *functor = boxed_kernel_func_.getFunctor();
104
+ return callUnboxedKernelFunction<Return, Args...>(
105
+ unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
106
+ }
107
+ }
108
+
109
+ return impl::BoxedKernelWrapper<Return(Args...)>::call(
110
+ boxed_kernel_func_,
111
+ opHandle,
112
+ dispatchKeySet,
113
+ std::forward<Args>(args)...
114
+ );
115
+ }
116
+
117
+ inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) {
118
+ return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer
119
+ }
120
+
121
+ template<KernelFunction::BoxedKernelFunction* func>
122
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
123
+ return KernelFunction::makeFromBoxedKernel(
124
+ BoxedKernel::makeFromFunction<func>());
125
+ }
126
+
127
+ template<KernelFunction::BoxedKernelFunction_withDispatchKeys* func>
128
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
129
+ return KernelFunction::makeFromBoxedKernel(
130
+ BoxedKernel::makeFromFunction<func>());
131
+ }
132
+
133
+ inline KernelFunction KernelFunction::makeFallthrough() {
134
+ return KernelFunction::makeFromBoxedKernel(
135
+ BoxedKernel::makeFallthrough());
136
+ }
137
+
138
+ inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() {
139
+ return KernelFunction::makeFromBoxedKernel(
140
+ BoxedKernel::makeAmbiguousAutogradOther());
141
+ }
142
+
143
+ inline KernelFunction KernelFunction::makeNamedNotSupported() {
144
+ return KernelFunction::makeFromBoxedKernel(
145
+ BoxedKernel::makeNamedNotSupported());
146
+ }
147
+
148
+ template<bool AllowLegacyTypes, class KernelFunctor>
149
+ inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor) {
150
+ #ifndef NDEBUG
151
+ // This assertion is costly for build time so it's debug-gated.
152
+ static_assert(guts::is_functor<KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor> but the argument is not a functor.");
153
+ #endif
154
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
155
+
156
+ auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed<KernelFunctor>::call;
157
+ void* void_unboxed_fn = reinterpret_cast<void*>(unboxed_fn);
158
+ bool is_symint = fn_has_symint<decltype(unboxed_fn)>::value;
159
+ return KernelFunction(
160
+ std::move(kernelFunctor),
161
+ &impl::make_boxed_from_unboxed_functor<KernelFunctor, AllowLegacyTypes>::call,
162
+ is_symint ? nullptr : void_unboxed_fn,
163
+ is_symint ? void_unboxed_fn : nullptr
164
+ );
165
+ }
166
+
167
+ template<class KernelFunctor>
168
+ inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
169
+ return KernelFunction::makeFromBoxedKernel(
170
+ BoxedKernel::makeFromFunctor(std::move(kernelFunctor)));
171
+ }
172
+
173
+ template<class FuncPtr, bool AllowLegacyTypes>
174
+ inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) {
175
+ static_assert(is_compile_time_function_pointer<FuncPtr>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN.");
176
+ static_assert(!std::is_same<typename FuncPtr::FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
177
+ static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr");
178
+
179
+ #if !defined(C10_MOBILE)
180
+ (void)func_ptr; // Suppress unused variable warning
181
+ return makeFromUnboxedFunctor<AllowLegacyTypes, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>(
182
+ guts::make_unique_base<OperatorKernel, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>()
183
+ );
184
+ #else
185
+ // On mobile, we rather want to optimize for binary size than for performance,
186
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
187
+ // instead.
188
+ return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr());
189
+ #endif
190
+ }
191
+
192
+ template<bool AllowLegacyTypes, class FuncType>
193
+ inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) {
194
+ static_assert(guts::is_function_type<FuncType>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type.");
195
+ static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
196
+ TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
197
+
198
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(
199
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(func)
200
+ );
201
+ }
202
+
203
+ template<bool AllowLegacyTypes, class Lambda>
204
+ inline std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
205
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
206
+
207
+ #if !defined(C10_MOBILE)
208
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
209
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
210
+ );
211
+ #else
212
+ // On mobile, we rather want to optimize for binary size than for performance,
213
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
214
+ // instead.
215
+ using FuncType = typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type;
216
+ return makeFromUnboxedRuntimeFunction<AllowLegacyTypes, FuncType>(lambda);
217
+ #endif
218
+ }
219
+
220
+ template<bool AllowLegacyTypes, class Lambda>
221
+ inline std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
222
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
223
+
224
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
225
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
226
+ );
227
+ }
228
+
229
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/intrusive_ptr.h>
3
+
4
+ namespace c10 {
5
+
6
+ /**
7
+ * Inherit from OperatorKernel to implement a c10 kernel.
8
+ *
9
+ * Example:
10
+ * > namespace {
11
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
12
+ * > public:
13
+ * > Tensor operator()(Tensor a, Tensor b) {...}
14
+ * > };
15
+ * > }
16
+ *
17
+ * The kernel class is allowed to have members but these are equivalent
18
+ * to global variables. The kernel implementation is responsible for
19
+ * preventing race conditions on them.
20
+ *
21
+ * See below for how to register this kernel with PyTorch.
22
+ */
23
+ struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target {
24
+ ~OperatorKernel() override = default;
25
+ };
26
+
27
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/CompileTimeFunctionPointer.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+ namespace detail {
8
+ template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
9
+ template<class FuncPtr, class ReturnType, class... Parameters>
10
+ class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
11
+ public:
12
+ C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
13
+ return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
14
+ }
15
+ };
16
+ }
17
+
18
+ // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
19
+ // Since it is a compile time function pointer, many compilers can inline it
20
+ // into the wrapper and you don't get any performance overhead for wrapping.
21
+ template<class FuncPtr>
22
+ struct WrapFunctionIntoFunctor final {
23
+ static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
24
+ using type = detail::WrapFunctionIntoFunctor_<
25
+ FuncPtr,
26
+ typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
27
+ typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
28
+ >;
29
+ };
30
+ }
31
+
32
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+
5
+ namespace c10 {
6
+
7
+ namespace impl {
8
+ namespace detail {
9
+ template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
10
+ template<class FuncType, class ReturnType, class... Parameters>
11
+ class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
12
+ public:
13
+ template<class FuncType_>
14
+ explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
15
+ : kernel_func_(std::forward<FuncType_>(kernel_func)) {}
16
+
17
+ decltype(auto) operator()(Parameters... args) {
18
+ return kernel_func_(std::forward<Parameters>(args)...);
19
+ }
20
+
21
+ private:
22
+ FuncType kernel_func_;
23
+ };
24
+ }
25
+
26
+ // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
27
+ // inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
28
+ // This can, for example, be used for lambdas, functors or even function pointers.
29
+ // In the case of function pointers, since it is a runtime function pointer,
30
+ // there is an overhead for calling it whenever the kernel is invoked.
31
+ template<class FuncType>
32
+ using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
33
+ FuncType,
34
+ typename guts::infer_function_traits_t<FuncType>::return_type,
35
+ typename guts::infer_function_traits_t<FuncType>::parameter_types
36
+ >;
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains boxing (not unboxing) logic,
4
+ // i.e. how to make a vector<IValue> from a set of concrete arguments.
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <ATen/core/stack.h>
8
+ #include <c10/core/TensorOptions.h>
9
+
10
+ #include <ATen/core/boxing/BoxedKernel.h>
11
+
12
+ #include <c10/util/Metaprogramming.h>
13
+ #include <type_traits>
14
+
15
+ namespace c10 {
16
+ namespace impl {
17
+
18
+ //
19
+ // utils
20
+ //
21
+
22
+ // is_mutable_tensor_ref
23
+ template <class T> struct is_mutable_tensor_ref : std::false_type {};
24
+ template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
25
+
26
+ // is_tuple_of_mutable_tensor_refs
27
+ //
28
+ template <class T, class Enable = void>
29
+ struct is_tuple_of_mutable_tensor_refs : std::false_type {};
30
+
31
+ template <class T>
32
+ struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
33
+ : guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
34
+ {};
35
+
36
+ // has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
37
+ //
38
+ template <class T, class Enable = void>
39
+ struct has_ivalue_to : std::false_type {};
40
+
41
+ template <class T>
42
+ struct has_ivalue_to<T, std::void_t<decltype(std::declval<IValue>().to<T>())>>
43
+ : std::true_type
44
+ {};
45
+
46
+ //
47
+ // boxing predicates
48
+ //
49
+
50
+ // A boxable arg type is one that IValue has a constructor for.
51
+ template <typename T>
52
+ using can_box =
53
+ std::disjunction<
54
+ std::is_constructible<IValue, std::decay_t<T>>,
55
+ // TensorOptions are not directly constructible into IValue,
56
+ // but torch::jit::push knows how to handle them
57
+ std::is_same<TensorOptions, std::decay_t<T>>
58
+ >;
59
+
60
+ template <typename... Ts>
61
+ using can_box_all = std::conjunction<can_box<Ts>...>;
62
+
63
+ // an unboxable result is one that can be extracted from an IValue
64
+ template <typename T>
65
+ using can_unbox =
66
+ std::conjunction<
67
+ std::disjunction<
68
+ has_ivalue_to<T>,
69
+ // void returns are ok
70
+ std::is_same<void, T>
71
+ >,
72
+ std::negation<std::is_lvalue_reference<T>>
73
+ >;
74
+
75
+ //
76
+ // boxArgs - utility for pushing unboxed args onto IValue stack
77
+ //
78
+ template <class... Args>
79
+ torch::jit::Stack boxArgs(Args... args) {
80
+ // TODO Reuse stack vector instead of allocating?
81
+ torch::jit::Stack stack;
82
+ stack.reserve(sizeof...(Args));
83
+ torch::jit::push(stack, std::forward<Args>(args)...);
84
+ return stack;
85
+ }
86
+
87
+ template <class T>
88
+ static inline constexpr size_t boxed_size_one() {
89
+ static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
90
+ return 1;
91
+ }
92
+
93
+ // torch::jit::push pushes 4 values for a TensorOptions; this needs to
94
+ // be kept in sync.
95
+ template <>
96
+ inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
97
+ return 4;
98
+ }
99
+
100
+ // NOTE: this could probably be simplified with C++17 fold expressions.
101
+ template <typename...>
102
+ struct BoxedSize : std::integral_constant<size_t, 0> {};
103
+ template <class T, class... Args>
104
+ struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
105
+
106
+ template <class... Args>
107
+ static inline constexpr size_t boxed_size() {
108
+ return BoxedSize<Args...>::value;
109
+ }
110
+
111
+ using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
112
+
113
+ template <typename T>
114
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
115
+ new (&dest[lastIdx]) IValue(arg);
116
+ lastIdx++;
117
+ }
118
+
119
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
120
+ new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
121
+ new (&dest[lastIdx++]) IValue(options.layout());
122
+ new (&dest[lastIdx++]) IValue(options.device());
123
+ new (&dest[lastIdx++]) IValue(options.pinned_memory());
124
+ }
125
+
126
+ inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
127
+
128
+ template<typename T, typename... Args>
129
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
130
+ boxToStack(dest, arg, lastIdx);
131
+ boxArgsToStack(dest, lastIdx, args...);
132
+ }
133
+
134
+ //
135
+ // PopResult is a helper class whose specializations handle popping single and
136
+ // multiple return values, respectively.
137
+ //
138
+ template <class Result>
139
+ struct PopResult final {
140
+ static Result call(Stack& stack) {
141
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
142
+ stack.size() == 1,
143
+ "Boxed kernel was expected to return one value on the stack, ",
144
+ "but instead pushed ", stack.size(), " values."
145
+ );
146
+ return std::move(stack[0]).to<Result>();
147
+ }
148
+ };
149
+
150
+ template <class... Types>
151
+ struct PopResult<std::tuple<Types...>> final {
152
+ using Result = std::tuple<Types...>;
153
+
154
+ static Result call(Stack& stack) {
155
+ // for tuple return types, boxed kernel has pushed multiple values onto the stack
156
+ constexpr int RetCount = sizeof...(Types);
157
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
158
+ stack.size() == RetCount,
159
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
160
+ "but instead pushed ", stack.size(), " values."
161
+ );
162
+ return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
163
+ }
164
+ private:
165
+ // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
166
+ // I'm sure there's an incantation that slips it past the parser but eh
167
+ template <size_t... indices>
168
+ static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
169
+ return std::make_tuple((std::move(stack[indices]).to<Types>())...);
170
+ }
171
+ };
172
+
173
+ //
174
+ // BoxedKernelWrapper
175
+ //
176
+ // For a given function type FT, BoxedKernelWrapper<FT> implements
177
+ // a `call` method that
178
+ // - takes a boxed kernel and unboxed arguments as specified by FT,
179
+ // - calls `boxArgs` to box the arguments
180
+ // - calls the boxed kernel
181
+ // - unboxes and returns the result
182
+ //
183
+ // The partial specializations below handle various cases: in
184
+ // particular, not all types appearing in op signatures are supported,
185
+ // and ops returning references have nonstandard wrapper implementations.
186
+ //
187
+
188
+ // 1. The base specialization of BoxedKernelWrapper should never be instantiated.
189
+ // A "no call method defined on BoxedKernelWrapper" compile error means that
190
+ // an op signature has failed to trigger any of the partial specializations
191
+ // that follow this one.
192
+ //
193
+ template <class FuncType, class Enable = void>
194
+ struct BoxedKernelWrapper {
195
+ // The reason we're not just doing straight up static_assert(false, ...) here:
196
+ // Basically, the way to make sure a static_assert only fires if a template
197
+ // is actually instantiated (rather than every time the file is parsed) is to use
198
+ // template parameters in the expression, e.g. FuncType here. However, since
199
+ // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
200
+ // effect.
201
+ static_assert(sizeof(FuncType) != sizeof(FuncType),
202
+ "Function signature contains one or more unsupported parameter and/or return types. "
203
+ "Look for a nearby error like "
204
+ "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
205
+ "- (your function type) is the unsupported signature.");
206
+ };
207
+
208
+ //
209
+ // 2. Supported signatures, other than those involving non-const Tensor refs -
210
+ // i.e., "functional" ops.
211
+ //
212
+
213
+ template <class Result, class... Args>
214
+ struct BoxedKernelWrapper<
215
+ Result(Args...),
216
+ std::enable_if_t<
217
+ can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
218
+ void
219
+ >
220
+ > {
221
+ static Result call(
222
+ const BoxedKernel& boxed_kernel_func,
223
+ const OperatorHandle& opHandle,
224
+ DispatchKeySet dispatchKeySet,
225
+ Args... args
226
+ ) {
227
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
228
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
229
+
230
+ if constexpr (!std::is_same_v<void, Result>) {
231
+ // op has pushed one or more values onto the stack.
232
+ return PopResult<Result>::call(stack);
233
+ } else {
234
+ // op returns void, boxed kernel has pushed nothing onto stack.
235
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
236
+ stack.empty(),
237
+ "Boxed kernel was expected to return no values on the stack, ",
238
+ "but instead returned ", stack.size(), " values."
239
+ );
240
+ }
241
+ }
242
+ };
243
+
244
+ //
245
+ // 3. in-place ops take a single non-const Tensor reference
246
+ // as their first argument, and return it.
247
+ //
248
+ // Note: all signatures matching this pattern are assumed to be for such ops.
249
+ // Because of this, the generated BoxedKernelWrapper specializations simply
250
+ // return the in-place argument.
251
+ //
252
+
253
+ template <class... OtherArgs>
254
+ struct BoxedKernelWrapper<
255
+ at::Tensor&(at::Tensor&, OtherArgs...),
256
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
257
+ > {
258
+ static at::Tensor& call(
259
+ const BoxedKernel& boxed_kernel_func,
260
+ const OperatorHandle& opHandle,
261
+ DispatchKeySet dispatchKeySet,
262
+ at::Tensor& outArg, OtherArgs... otherArgs
263
+ ) {
264
+ torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
265
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
266
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
267
+ stack.size() == 1,
268
+ "Boxed kernel was expected to return a single value on the stack, ",
269
+ "but instead returned ", stack.size(), " values."
270
+ );
271
+
272
+ return outArg;
273
+ }
274
+ };
275
+
276
+ //
277
+ // 3.5. In-process migration to make in-place ops take and return
278
+ // const references instead.
279
+ template <class... OtherArgs>
280
+ struct BoxedKernelWrapper<
281
+ const at::Tensor&(const at::Tensor&, OtherArgs...),
282
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
283
+ > {
284
+ static const at::Tensor& call(
285
+ const BoxedKernel& boxed_kernel_func,
286
+ const OperatorHandle& opHandle,
287
+ DispatchKeySet dispatchKeySet,
288
+ const at::Tensor& outArg, OtherArgs... otherArgs
289
+ ) {
290
+ torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
291
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
292
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
293
+ stack.size() == 1,
294
+ "Boxed kernel was expected to return a single value on the stack, ",
295
+ "but instead returned ", stack.size(), " values."
296
+ );
297
+
298
+ return outArg;
299
+ }
300
+ };
301
+
302
+ //
303
+ // 4. out of place ops that take a single non-const Tensor reference as their
304
+ // final argument, and also return it.
305
+ //
306
+ // Note: all signatures matching this pattern are assumed to be for such ops.
307
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
308
+ // return out arguments.
309
+ //
310
+ template <class FirstArg, class... RestArgs>
311
+ struct BoxedKernelWrapper<
312
+ at::Tensor&(FirstArg, RestArgs...),
313
+ std::enable_if_t<
314
+ can_box_all<FirstArg, RestArgs...>::value
315
+ // this skips over in-place kernels with a non-const Tensor
316
+ // arg at the front, so those can unambiguously trigger the preceding specialization.
317
+ && !is_mutable_tensor_ref<FirstArg>::value,
318
+ void
319
+ >
320
+ > {
321
+ static at::Tensor& call(
322
+ const BoxedKernel& boxed_kernel_func,
323
+ const OperatorHandle& opHandle,
324
+ DispatchKeySet dispatchKeySet,
325
+ FirstArg firstArg, RestArgs... restArgs
326
+ ) {
327
+ torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
328
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
329
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
330
+ stack.size() == 1,
331
+ "Boxed kernel was expected to return a single value on the stack, ",
332
+ "but instead returned ", stack.size(), " values."
333
+ );
334
+
335
+ // reusing restArgs after it has been forwarded here is ok because we know
336
+ // that the last element is of type `Tensor&`.
337
+ return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
338
+ }
339
+ };
340
+
341
+ //
342
+ // 5. out of place ops that take multiple non-const Tensor references as their
343
+ // final arguments, and return them in a std::tuple.
344
+ //
345
+ // Note: all signatures matching this pattern are assumed to be for such ops.
346
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
347
+ // return the out arguments.
348
+ //
349
+ template <class Result, class... Args>
350
+ struct BoxedKernelWrapper<
351
+ Result(Args...),
352
+ std::enable_if_t<
353
+ can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
354
+ void
355
+ >
356
+ > {
357
+ static Result call(
358
+ const BoxedKernel& boxed_kernel_func,
359
+ const OperatorHandle& opHandle,
360
+ DispatchKeySet dispatchKeySet,
361
+ Args... args
362
+ ) {
363
+ using ArgTuple = std::tuple<Args...>;
364
+ constexpr int RetCount = std::tuple_size<Result>();
365
+
366
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
367
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
368
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
369
+ stack.size() == RetCount,
370
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
371
+ "but instead returned ", stack.size(), " values."
372
+ );
373
+
374
+ // reusing args after it has been forwarded here is ok because we know
375
+ // that the last RetCount elements are of type `Tensor&`.
376
+ auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
377
+ static_assert(
378
+ std::is_same<Result, decltype(result)>::value,
379
+ "The parameter list of an op returning a tuple of Tensor references "
380
+ "must end with an equal number of Tensor reference parameters."
381
+ );
382
+ return result;
383
+ }
384
+ };
385
+
386
+ } // impl
387
+ } // c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/util/TypeList.h>
7
+ #include <ATen/core/IListRef.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
16
+ class OperatorHandle;
17
+
18
+ /*
19
+ * [Note: Argument forwarding in the dispatcher]
20
+ *
21
+ * The dispatcher uses a somewhat unusual way to forward arguments through several layers of
22
+ * wrapper functions. This can be confusing because an experienced C++ programmer would look at this
23
+ * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.".
24
+ * It is not a bug. The common way in C++ to forward arguments is to use universal references:
25
+ *
26
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
27
+ *
28
+ * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument.
29
+ * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a
30
+ * different reference type than was used in the kernel function. The correct reference type
31
+ * is dictated by the kernel signature and must be identical since we cast function pointers
32
+ * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines
33
+ * the reference type to use by looking at the explicitly supplied operator signature, not by looking at
34
+ * the argument we're calling it with.
35
+ *
36
+ * What does std::forward do, exactly?
37
+ * ------------------------------------
38
+ * std::forward<T>(t) is a way to cast t to the reference type supplied in T.
39
+ * Let's assume decay_t<T> == U and T is either U or some reference of U.
40
+ * - std::forward<T&>(t) will return U&, no matter what kind of reference t is.
41
+ * - std::forward<T&&>(t) will return U&&, no matter what kind of reference t is.
42
+ * - std::forward<T>(t) will return U&& (not U!), no matter what kind of reference t is.
43
+ *
44
+ * For universal references, that means that in the following function
45
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
46
+ *
47
+ * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be
48
+ * a non-reference U, and std::forward<T>(t) will return U&&, correctly moving the argument.
49
+ * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only
50
+ * way to match the signature (in C++, a type that is (T&)&& will collapse to T&).
51
+ * That means std::forward<T>(t) will return U& and the value will not be moved but passed on as
52
+ * a lvalue reference.
53
+ *
54
+ * How do we use that?
55
+ * ------------------------------------
56
+ * But std::forward can also be used outside of the common "universal forwarding" pattern to change
57
+ * reference types. So instead of following the common C++ pattern, we notice what
58
+ * std::forward<T>() actually does, and that is it takes a value and changes its reference to the
59
+ * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this
60
+ * to forward based on an explicitly specified reference type instead of the inferred argument type.
61
+ *
62
+ * This is why many of the dispatcher functions look like
63
+ * > template<class T> func(T t) { func2<T>(std::forward<T>(t)); }
64
+ * instead of the common
65
+ * > template<class T> func(T&& t) { func2(std::forward<T>(t)); }
66
+ *
67
+ * and are expected to be called by explicitly specifying the template parameters in a way that matches
68
+ * the expected operator signature at each call site.
69
+ */
70
+
71
+ namespace impl {
72
+ // supported_primitive_arg_types defines which primitive types we allow in
73
+ // kernel functions as arguments or returns.
74
+ // Additionally, we support lists, dicts and optionals containing these types.
75
+ using supported_primitive_arg_types = guts::typelist::typelist<
76
+ int64_t,
77
+ double,
78
+ bool,
79
+ c10::string_view,
80
+ at::Tensor,
81
+ at::Scalar,
82
+ c10::QScheme,
83
+ c10::ScalarType,
84
+ c10::Device,
85
+ c10::DeviceIndex,
86
+ c10::Layout,
87
+ c10::MemoryFormat,
88
+ at::Dimname
89
+ >;
90
+
91
+ // We have an unboxed functor in hand that takes C++ arguments, and
92
+ // we're building a boxed functor wrapper for it that takes IValues.
93
+ // So "outside" is boxed and "inside" is unboxed.
94
+ //
95
+ // So a valid input type is one that our boxed functor wrapper can
96
+ // unbox from an IValue into a C++ value.
97
+ //
98
+ // Whereas a valid output type is one that our wrapper can recieve
99
+ // as a C++ value from the unboxed functor, and box into an IValue.
100
+
101
+ //
102
+ // assert_is_valid_input_type
103
+ // checks that T can be unboxed from an IValue into a C++ value.
104
+ //
105
+
106
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
107
+ struct assert_is_valid_input_type {
108
+ assert_is_valid_input_type() {
109
+ if constexpr (guts::typelist::contains<supported_primitive_arg_types, T>::value) {
110
+ /* everything is ok, this is a primitive type */
111
+ } else {
112
+ /* otherwise this must be an instance of a valid custom class, since it can only
113
+ have been created via IValue(x), which ensures this. */
114
+ }
115
+ }
116
+ };
117
+
118
+ template<class T, bool AllowDeprecatedTypes>
119
+ struct assert_is_valid_input_type<c10::optional<T>, AllowDeprecatedTypes>
120
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
121
+
122
+ template <bool AllowDeprecatedTypes, class... Args>
123
+ struct TypeCheckHelper;
124
+
125
+ template <bool AllowDeprecatedTypes>
126
+ struct TypeCheckHelper<AllowDeprecatedTypes> {};
127
+
128
+ template <bool AllowDeprecatedTypes, class Head, class... Rest>
129
+ struct TypeCheckHelper<AllowDeprecatedTypes, Head, Rest...>
130
+ : TypeCheckHelper<AllowDeprecatedTypes, Rest...> {
131
+ assert_is_valid_input_type<Head, AllowDeprecatedTypes> check;
132
+ };
133
+
134
+ template<class... Contained, bool AllowDeprecatedTypes>
135
+ struct assert_is_valid_input_type<std::tuple<Contained...>, AllowDeprecatedTypes>
136
+ : TypeCheckHelper<AllowDeprecatedTypes, Contained...> {};
137
+
138
+ template<class Key, class Value, bool AllowDeprecatedTypes>
139
+ struct assert_is_valid_input_type<Dict<Key, Value>, AllowDeprecatedTypes>
140
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
141
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
142
+ "You tried to register a kernel with an unsupported input type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
143
+ };
144
+
145
+ template<class Key, class Value, bool AllowDeprecatedTypes>
146
+ struct assert_is_valid_input_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
147
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
148
+ static_assert(AllowDeprecatedTypes,
149
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
150
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
151
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
152
+ };
153
+
154
+ template<class T, bool AllowDeprecatedTypes>
155
+ struct assert_is_valid_input_type<List<T>, AllowDeprecatedTypes>
156
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
157
+ static_assert(!std::is_same<T, at::Scalar>::value,
158
+ "You tried to register a kernel with an unsupported input type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
159
+ };
160
+
161
+ template<class T, bool AllowDeprecatedTypes>
162
+ struct assert_is_valid_input_type<c10::ArrayRef<T>, AllowDeprecatedTypes>
163
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
164
+ static_assert(!std::is_same<T, at::Scalar>::value,
165
+ "You tried to register a kernel with an unsupported input type: ArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
166
+ };
167
+
168
+ template<class T, bool AllowDeprecatedTypes>
169
+ struct assert_is_valid_input_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
170
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
171
+ static_assert(!std::is_same<T, at::Scalar>::value,
172
+ "You tried to register a kernel with an unsupported input type: OptionalArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
173
+ };
174
+
175
+ template<class T, size_t N, bool AllowDeprecatedTypes>
176
+ struct assert_is_valid_input_type<std::array<T, N>, AllowDeprecatedTypes>
177
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
178
+ static_assert(!std::is_same<T, at::Scalar>::value,
179
+ "You tried to register a kernel with an unsupported input type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
180
+ };
181
+
182
+ template<class T, bool AllowDeprecatedTypes>
183
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
184
+ // There is no reason to support float when we have double. Keep the API lean.
185
+ static_assert(guts::false_t<T>::value,
186
+ "You tried to register a kernel with an unsupported input type: float. Please use double instead.");
187
+ };
188
+ template<class T, bool AllowDeprecatedTypes>
189
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
190
+ static_assert(guts::false_t<T>::value,
191
+ "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead.");
192
+ };
193
+ template<class T, bool AllowDeprecatedTypes>
194
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
195
+ static_assert(guts::false_t<T>::value,
196
+ "You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
197
+ };
198
+ template<class T, bool AllowDeprecatedTypes>
199
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
200
+ static_assert(guts::false_t<T>::value,
201
+ "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead.");
202
+ };
203
+ template<class T, bool AllowDeprecatedTypes>
204
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> {
205
+ static_assert(guts::false_t<T>::value,
206
+ "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead.");
207
+ };
208
+
209
+ // TODO: it probably would be good to tighten this up quite a bit more with
210
+ // an explicit list for everything
211
+
212
+ //
213
+ // assert_is_valid_output_type
214
+ //
215
+
216
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
217
+ struct assert_is_valid_output_type {
218
+ assert_is_valid_output_type() {
219
+ if constexpr(guts::typelist::contains<supported_primitive_arg_types, T>::value) {
220
+ /* everything is ok, this is a primitive type */
221
+ } else {
222
+ /* otherwise T is verified to be a registered custom class in the IValue
223
+ constructor, so no benefit in double-checking here */
224
+ }
225
+ }
226
+ };
227
+
228
+ template<class T, bool AllowDeprecatedTypes>
229
+ struct assert_is_valid_output_type<c10::optional<T>, AllowDeprecatedTypes>
230
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
231
+
232
+ template<class T, bool AllowDeprecatedTypes>
233
+ struct assert_is_valid_output_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
234
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
235
+
236
+ template<class Key, class Value, bool AllowDeprecatedTypes>
237
+ struct assert_is_valid_output_type<Dict<Key, Value>, AllowDeprecatedTypes>
238
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
239
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
240
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
241
+ static_assert(!std::is_same<Value, at::Scalar>::value,
242
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
243
+ };
244
+
245
+ template<class Key, class Value, bool AllowDeprecatedTypes>
246
+ struct assert_is_valid_output_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
247
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
248
+ static_assert(AllowDeprecatedTypes,
249
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
250
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
251
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
252
+ static_assert(!std::is_same<Value, at::Scalar>::value,
253
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
254
+ };
255
+
256
+ template<class T, bool AllowDeprecatedTypes>
257
+ struct assert_is_valid_output_type<List<T>, AllowDeprecatedTypes>
258
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
259
+ static_assert(!std::is_same<T, at::Scalar>::value,
260
+ "You tried to register a kernel with an unsupported output type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
261
+ };
262
+
263
+ template<class T, bool AllowDeprecatedTypes>
264
+ struct assert_is_valid_output_type<std::vector<T>, AllowDeprecatedTypes>
265
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
266
+ static_assert(!std::is_same<T, at::Scalar>::value,
267
+ "You tried to register a kernel with an unsupported output type: std::vector<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
268
+ // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector<T>. Please use List<T> instead.");
269
+ };
270
+
271
+ template<class T, size_t N, bool AllowDeprecatedTypes>
272
+ struct assert_is_valid_output_type<std::array<T, N>, AllowDeprecatedTypes>
273
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
274
+ static_assert(!std::is_same<T, at::Scalar>::value,
275
+ "You tried to register a kernel with an unsupported output type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
276
+ };
277
+
278
+ // The following specialisations of assert_is_valid_output_type are technically not
279
+ // necessary since we would hit the base case and show an error message
280
+ // there if they didn't exist, but we can show a better error message
281
+ // in some common error scenarios.
282
+ template<class T, bool AllowDeprecatedTypes>
283
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
284
+ // There is no reason to support float when we have double. Keep the API lean.
285
+ static_assert(guts::false_t<T>::value,
286
+ "You tried to register a kernel with an unsupported output type: float. Please use double instead.");
287
+ };
288
+ template<class T, bool AllowDeprecatedTypes>
289
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
290
+ static_assert(guts::false_t<T>::value,
291
+ "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead.");
292
+ };
293
+ template<class T, bool AllowDeprecatedTypes>
294
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
295
+ static_assert(guts::false_t<T>::value,
296
+ "You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
297
+ };
298
+ template<class T, bool AllowDeprecatedTypes>
299
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
300
+ static_assert(guts::false_t<T>::value,
301
+ "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead.");
302
+ };
303
+
304
+ // ivalue_to_arg
305
+
306
+ template<class T>
307
+ struct decay_if_not_tensor final {
308
+ using type = std::decay_t<T>;
309
+ };
310
+
311
+ template<>
312
+ struct decay_if_not_tensor<at::Tensor&> final {
313
+ using type = at::Tensor&;
314
+ };
315
+
316
+ template<>
317
+ struct decay_if_not_tensor<const at::Tensor&> final {
318
+ using type = const at::Tensor&;
319
+ };
320
+
321
+ template<class T, bool AllowDeprecatedTypes>
322
+ struct ivalue_to_arg final {
323
+ static decltype(auto) call(IValue& v) {
324
+ assert_is_valid_input_type<T, AllowDeprecatedTypes>();
325
+ return std::move(v).to<T>();
326
+ }
327
+ };
328
+
329
+ // The following two specializations take advantage of specialized
330
+ // `toTensor()` overloads on IValue to avoid copying.
331
+ template<bool AllowDeprecatedTypes>
332
+ struct ivalue_to_arg<at::Tensor&, AllowDeprecatedTypes> final {
333
+ // We cannot use the default implementation if they asked for a
334
+ // `at::Tensor&` because it moves from the IValue, so it can't get
335
+ // an lvalue reference.
336
+ static at::Tensor& call(IValue& v) {
337
+ // Tensor& is valid, don't bother asserting
338
+ return v.toTensor();
339
+ }
340
+ };
341
+
342
+ template<bool AllowDeprecatedTypes>
343
+ struct ivalue_to_arg<const at::Tensor&, AllowDeprecatedTypes> final {
344
+ // We should not use the default implementation if they asked for
345
+ // a `const at::Tensor&` because it moves from the IValue and they
346
+ // didn't ask for that.
347
+ static const at::Tensor& call(IValue& v) {
348
+ // const Tensor& is valid, don't bother asserting
349
+ return v.toTensor();
350
+ }
351
+ };
352
+
353
+ template<bool AllowDeprecatedTypes>
354
+ struct ivalue_to_arg<at::ITensorListRef, AllowDeprecatedTypes> final {
355
+ static List<at::Tensor> call(IValue& v) {
356
+ return v.toTensorList();
357
+ }
358
+ };
359
+
360
+ template<class T, bool AllowDeprecatedTypes>
361
+ struct ivalue_to_arg<ArrayRef<T>, AllowDeprecatedTypes> final {
362
+ // If an argument is ArrayRef<T>, convert the IValue to a std::vector<T> and pass that
363
+ // to the operator. std::vector<T> is implicitly convertible to ArrayRef<T>.
364
+ static std::vector<T> call(IValue& v) {
365
+ return ivalue_to_arg<std::vector<T>, AllowDeprecatedTypes>::call(v);
366
+ }
367
+ };
368
+ template<bool AllowDeprecatedTypes>
369
+ struct ivalue_to_arg<c10::SymIntArrayRef, AllowDeprecatedTypes> final {
370
+ static std::vector<c10::SymInt> call(IValue& v) {
371
+ if (v.isIntList()) {
372
+ std::vector<c10::SymInt> r;
373
+ auto src = v.toIntList();
374
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
375
+ return r;
376
+ } else {
377
+ return ivalue_to_arg<std::vector<c10::SymInt>, AllowDeprecatedTypes>::call(v);
378
+ }
379
+ }
380
+ };
381
+ template<bool AllowDeprecatedTypes>
382
+ struct ivalue_to_arg<c10::OptionalArray<c10::SymInt>, AllowDeprecatedTypes> final {
383
+ static OptionalArray<c10::SymInt> call(IValue& v) {
384
+ if (v.isIntList()) {
385
+ std::vector<c10::SymInt> r;
386
+ auto src = v.toIntList();
387
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
388
+ return OptionalArray<c10::SymInt>(std::move(r));
389
+ } else {
390
+ return std::move(v).to<OptionalArray<c10::SymInt>>();
391
+ }
392
+ }
393
+ };
394
+ template<class T, bool AllowDeprecatedTypes>
395
+ struct ivalue_to_arg<optional<ArrayRef<T>>, AllowDeprecatedTypes> final {
396
+ // If an argument is optional<ArrayRef<T>>, convert the IValue to an optional<std::vector<T>> and pass that
397
+ // to the operator. OptionalArray<T> is basically a optional<std::vector<T>> but implicitly convertible
398
+ // to optional<ArrayRef<T>>.
399
+ static OptionalArray<T> call(IValue& v) {
400
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
401
+ }
402
+ };
403
+
404
+ template<class T, bool AllowDeprecatedTypes>
405
+ struct ivalue_to_arg<OptionalArrayRef<T>, AllowDeprecatedTypes> final {
406
+ // If an argument is OptionalArrayRef<T>, convert the IValue to an
407
+ // optional<std::vector<T>> and pass that to the operator. OptionalArray<T>
408
+ // is basically a optional<std::vector<T>> but implicitly convertible to
409
+ // OptionalArrayRef<T>
410
+ static OptionalArray<T> call(IValue& v) {
411
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
412
+ }
413
+ };
414
+
415
+ // return_to_ivalue
416
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
417
+ struct return_to_ivalue final {};
418
+
419
+ template<class T, bool AllowDeprecatedTypes>
420
+ struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<at::Tensor&, T>::value>> final {
421
+ static IValue call(T&& v) {
422
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
423
+ return c10::ivalue::from(std::move(v));
424
+ }
425
+ static IValue copy(const T& v) {
426
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
427
+ return IValue(v);
428
+ }
429
+ };
430
+
431
+ // Special case to allow kernels to return `Tensor&`.
432
+ // TODO Delete this once kernels don't do that anymore
433
+ template<bool AllowDeprecatedTypes>
434
+ struct return_to_ivalue<at::Tensor&, AllowDeprecatedTypes, void> final {
435
+ static IValue call(at::Tensor& v) {
436
+ return c10::ivalue::from(v);
437
+ }
438
+ static IValue copy(at::Tensor& v) {
439
+ return IValue(v);
440
+ }
441
+ };
442
+
443
+ // wrap_kernel_functor_unboxed_
444
+
445
+ template<class KernelFunctor, class OpSignature>
446
+ struct wrap_kernel_functor_unboxed_ final {};
447
+
448
+ // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet
449
+ // This includes kernels with 0 arguments.
450
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
451
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(ParameterTypes...)> final {
452
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
453
+ "Return type mismatch");
454
+ static_assert(std::is_same<guts::typelist::typelist<ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
455
+ "Parameter types mismatch");
456
+
457
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
458
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) {
459
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
460
+ // Note [Plumbing Keys Through The Dispatcher 2]
461
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
462
+ // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel.
463
+ //
464
+ // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type
465
+ // DispatchKeySet.
466
+ // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention
467
+ // of the dispatcher from the calling convention of manually written kernels.
468
+ return (*functor_)(std::forward<ParameterTypes>(args)...);
469
+ }
470
+ };
471
+
472
+ // This specialization is for kernels with a first argument of type DispatchKeySet
473
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
474
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(DispatchKeySet, ParameterTypes...)> final {
475
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
476
+ "Return type mismatch");
477
+ static_assert(std::is_same<guts::typelist::typelist<DispatchKeySet, ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
478
+ "Parameter types mismatch");
479
+
480
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
481
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) {
482
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
483
+ // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel.
484
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
485
+ return (*functor_)(dispatchKeySet, std::forward<ParameterTypes>(args)...);
486
+ }
487
+ };
488
+
489
+ template<class KernelFunctor>
490
+ using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_<KernelFunctor, typename guts::infer_function_traits_t<KernelFunctor>::func_type>;
491
+
492
+ // call_functor_with_args_from_stack
493
+
494
+ template<class Functor, bool AllowDeprecatedTypes, size_t... ivalue_arg_indices, typename... ArgTypes>
495
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
496
+ call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence<ivalue_arg_indices...>, guts::typelist::typelist<ArgTypes...>*) {
497
+ (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.
498
+
499
+ // We're explicitly filtering out DispatchKeySet from the argument list.
500
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
501
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
502
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
503
+ return wrap_kernel_functor_unboxed<Functor>::call(functor, dispatchKeySet,
504
+ ivalue_to_arg<typename decay_if_not_tensor<ArgTypes>::type, AllowDeprecatedTypes>::call(
505
+ torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices))
506
+ )...);
507
+ }
508
+
509
+ template<class Functor, bool AllowDeprecatedTypes>
510
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
511
+ call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) {
512
+ // We're explicitly filtering out DispatchKeySet from the argument list.
513
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
514
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
515
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
516
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<Functor>::parameter_types;
517
+ constexpr size_t num_ivalue_args = guts::typelist::size<ArgTypes>::value;
518
+ return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack, std::make_index_sequence<num_ivalue_args>(), static_cast<ArgTypes*>(nullptr));
519
+ }
520
+
521
+ // push_outputs
522
+
523
+ template<class OutputType, bool AllowDeprecatedTypes>
524
+ struct push_outputs final {
525
+ // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here
526
+ // to avoid one extra call to the move constructor in this case. This is still not a
527
+ // universal reference though because OutputType is an explicitly specified class
528
+ // template parameter.
529
+ static void call(OutputType&& output, Stack* stack) {
530
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::call(std::forward<OutputType>(output)));
531
+ }
532
+ static void copy(const OutputType& output, Stack* stack) {
533
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::copy(output));
534
+ }
535
+ };
536
+ template<class... OutputTypes, bool AllowDeprecatedTypes>
537
+ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
538
+ static void call(std::tuple<OutputTypes...>&& output, Stack* stack) {
539
+ call_(std::move(output), stack, std::make_index_sequence<sizeof...(OutputTypes)>());
540
+ }
541
+ static void copy(const std::tuple<OutputTypes...>& output, Stack* stack) {
542
+ copy_(output, stack, std::make_index_sequence<sizeof...(OutputTypes)>());
543
+ }
544
+
545
+ private:
546
+ template<size_t... indices>
547
+ static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, std::index_sequence<indices...>) {
548
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::call(std::forward<OutputTypes>(std::get<indices>(output)))...);
549
+ }
550
+ template<size_t... indices>
551
+ static void copy_(const std::tuple<OutputTypes...>& output, Stack* stack, std::index_sequence<indices...>) {
552
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::copy(std::get<indices>(output))...);
553
+ }
554
+ };
555
+ template<bool AllowDeprecatedTypes>
556
+ struct push_outputs<void, AllowDeprecatedTypes> final {
557
+ static void call(int /*dummy*/, Stack* /*stack*/) {
558
+ }
559
+ static void copy(int /*dummy*/, Stack* /*stack*/) {
560
+ }
561
+ };
562
+
563
+ // make_boxed_from_unboxed_functor
564
+
565
+ template<class KernelFunctor, bool AllowDeprecatedTypes>
566
+ struct make_boxed_from_unboxed_functor final {
567
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value,
568
+ "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
569
+
570
+ static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) {
571
+ using ReturnType = typename guts::infer_function_traits_t<KernelFunctor>::return_type;
572
+ // We're explicitly filtering out DispatchKeySet from the argument list.
573
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
574
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
575
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
576
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::parameter_types;
577
+ constexpr bool has_outputs = !std::is_same<void, ReturnType>::value;
578
+ constexpr size_t num_inputs = guts::typelist::size<ArgTypes>::value;
579
+ if constexpr (has_outputs) {
580
+ // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value
581
+ // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`.
582
+ // [Note: VC++ and 'std': ambiguous symbol]
583
+ using ReturnType_ = ::std::decay_t<ReturnType>;
584
+ ReturnType_ output = call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
585
+ torch::jit::drop(*stack, num_inputs);
586
+ // See note [ VC++ and 'std': ambiguous symbol]
587
+ push_outputs<ReturnType_, AllowDeprecatedTypes>::call(::std::move(output), stack);
588
+ } else {
589
+ call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
590
+ torch::jit::drop(*stack, num_inputs);
591
+ }
592
+ }
593
+ };
594
+ } // namespace impl
595
+
596
+ } // namespace c10
597
+
598
+ namespace torch {
599
+ using OperatorKernel = c10::OperatorKernel;
600
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <gtest/gtest.h>
4
+ #include <gmock/gmock.h>
5
+
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/core/dispatch/Dispatcher.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <c10/core/CPUAllocator.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ template<class... Inputs>
13
+ inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
14
+ return {std::forward<Inputs>(inputs)...};
15
+ }
16
+
17
+ inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
18
+ auto* allocator = c10::GetCPUAllocator();
19
+ int64_t nelements = 1;
20
+ auto dtype = caffe2::TypeMeta::Make<float>();
21
+ int64_t size_bytes = nelements * dtype.itemsize();
22
+ auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
23
+ c10::StorageImpl::use_byte_size_t(),
24
+ size_bytes,
25
+ allocator->allocate(size_bytes),
26
+ allocator,
27
+ /*resizable=*/true);
28
+ at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
29
+ // TODO: We add this to simulate the ideal case where we only have Autograd backend keys
30
+ // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
31
+ // constructor by default.
32
+ if (!requires_grad) {
33
+ t.unsafeGetTensorImpl()->remove_autograd_key();
34
+ }
35
+ return t;
36
+ }
37
+
38
+ inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
39
+ return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
40
+ }
41
+
42
+ template<class... Args>
43
+ inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
44
+ auto stack = makeStack(std::forward<Args>(args)...);
45
+ op.callBoxed(&stack);
46
+ return stack;
47
+ }
48
+
49
+ template<class Result, class... Args>
50
+ inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
51
+ return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
52
+ }
53
+
54
+ template<class Result, class... Args>
55
+ inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
56
+ return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
57
+ }
58
+
59
+ template<class Result, class... Args>
60
+ inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
61
+ return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
62
+ }
63
+
64
+ inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
65
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
66
+ EXPECT_ANY_THROW(
67
+ callOp(*op, dummyTensor(dispatch_key), 5);
68
+ );
69
+ }
70
+
71
+ inline void expectDoesntFindOperator(const char* op_name) {
72
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
73
+ EXPECT_FALSE(op.has_value());
74
+ }
75
+
76
+ template<class Exception, class Functor>
77
+ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
78
+ try {
79
+ std::forward<Functor>(functor)();
80
+ } catch (const Exception& e) {
81
+ EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
82
+ return;
83
+ }
84
+ ADD_FAILURE() << "Expected to throw exception containing \""
85
+ << expectMessageContains << "\" but didn't throw";
86
+ }
87
+
88
+ template<class T, size_t N>
89
+ void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
90
+ EXPECT_EQ(expected.size(), actual.size());
91
+ for (const auto i : c10::irange(expected.size())) {
92
+ EXPECT_EQ(expected[i], actual[i]);
93
+ }
94
+ }
95
+
96
+ template<class T>
97
+ void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
98
+ EXPECT_EQ(expected.size(), actual.size());
99
+ for (const auto i : c10::irange(expected.size())) {
100
+ EXPECT_EQ(expected[i], actual[i]);
101
+ }
102
+ }
103
+
104
+ template<class T>
105
+ void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
106
+ EXPECT_EQ(expected.size(), actual.size());
107
+ for (const auto i : c10::irange(expected.size())) {
108
+ EXPECT_EQ(expected[i], actual.get(i));
109
+ }
110
+ }
111
+
112
+ template<class T>
113
+ void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
114
+ EXPECT_EQ(expected.size(), actual.size());
115
+ for (const auto i : c10::irange(expected.size())) {
116
+ EXPECT_EQ(expected[i], actual[i]);
117
+ }
118
+ }
119
+
120
+ // NB: This is not really sound, but all of the type sets constructed here
121
+ // are singletons so it's fine
122
+ static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
123
+ return legacyExtractDispatchKey(t.key_set());
124
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+
5
+ #include <ATen/core/ivalue.h>
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+ struct CompilationUnit;
12
+ struct Function;
13
+ } // namespace jit
14
+ } // namespace torch
15
+
16
+ namespace c10 {
17
+
18
+ struct FunctionSchema;
19
+
20
+ // This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither.
21
+ // This state is mutually exclusive. Buffers and Parameters can only appear on modules.
22
+ enum class AttributeKind {
23
+ BUFFER,
24
+ PARAMETER,
25
+ REGULAR_ATTRIBUTE
26
+ };
27
+
28
+ // This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr).
29
+ // Note: This structure does not represent the value of the attribute.
30
+ struct TORCH_API ClassAttribute {
31
+ public:
32
+ ClassAttribute(AttributeKind kind,
33
+ TypePtr attributeType,
34
+ std::string attributeName) :
35
+ kind_(kind),
36
+ attributeType_(std::move(attributeType)),
37
+ attributeName_(std::move(attributeName)) {}
38
+
39
+ AttributeKind getKind() const {
40
+ return kind_;
41
+ }
42
+
43
+ const TypePtr& getType() const {
44
+ return attributeType_;
45
+ }
46
+
47
+ const std::string& getName() const {
48
+ return attributeName_;
49
+ }
50
+
51
+ private:
52
+ AttributeKind kind_;
53
+ TypePtr attributeType_;
54
+ std::string attributeName_;
55
+ };
56
+
57
+ /**
58
+ * User Defined Types
59
+ */
60
+
61
+ struct ClassType;
62
+ using ClassTypePtr = std::shared_ptr<ClassType>;
63
+ using ::torch::jit::CompilationUnit;
64
+
65
+ // This represents a class in TorchScript.
66
+ struct TORCH_API ClassType : public NamedType {
67
+ // This represents an attribute of a class; a name associated with an attribute, and a
68
+ // getter and (optional) setter for that attribute.
69
+ struct Property {
70
+ std::string name;
71
+ torch::jit::Function* getter;
72
+ torch::jit::Function* setter;
73
+ };
74
+
75
+ // Create a class type with name `name` and its methods stored in `cu`.
76
+ static ClassTypePtr create(
77
+ c10::optional<QualifiedName> qualifiedName,
78
+ std::weak_ptr<CompilationUnit> cu,
79
+ bool is_module = false,
80
+ std::string doc_string = "",
81
+ std::vector<std::string> unresolved_class_attributes = {});
82
+
83
+ bool equals(const Type& rhs) const override {
84
+ if (this == &rhs) {
85
+ return true;
86
+ }
87
+ if (auto user_rhs = rhs.castRaw<ClassType>()) {
88
+ const auto& lhs_name = name().value();
89
+ const auto& rhs_name = user_rhs->name().value();
90
+
91
+ return lhs_name == rhs_name &&
92
+ this->compilation_unit() == user_rhs->compilation_unit();
93
+ }
94
+ return false;
95
+ }
96
+
97
+ std::string str() const override {
98
+ return annotation_str();
99
+ }
100
+
101
+ std::string repr_str() const override {
102
+ std::stringstream ss;
103
+ ss << str()
104
+ << " (of Python compilation unit at: " << compilation_unit().get() << ")";
105
+ return ss.str();
106
+ }
107
+
108
+ const std::vector<torch::jit::Function*>& methods() const;
109
+
110
+ TypePtr findAttribute(const std::string& name) const {
111
+ size_t pos = 0;
112
+ for (const auto& attr : attributes_) {
113
+ if (name == attr.getName()) {
114
+ break;
115
+ }
116
+ ++pos;
117
+ }
118
+
119
+ if (pos >= attributes_.size()) {
120
+ return nullptr;
121
+ }
122
+ return attributes_[pos].getType();
123
+ }
124
+
125
+ const TypePtr& getAttribute(const std::string& name) const {
126
+ auto slot = findAttributeSlot(name);
127
+ TORCH_CHECK(
128
+ slot,
129
+ repr_str(),
130
+ " does not have an attribute with name '",
131
+ name,
132
+ "'");
133
+ return attributes_[*slot].getType();
134
+ }
135
+
136
+ size_t numAttributes() const {
137
+ return attributes_.size();
138
+ }
139
+
140
+ const TypePtr& getAttribute(size_t slot) const {
141
+ AT_ASSERT(slot < attributes_.size());
142
+ return attributes_.at(slot).getType();
143
+ }
144
+
145
+ const std::string getAttributeName(size_t slot) const {
146
+ AT_ASSERT(slot < attributes_.size());
147
+ return attributes_[slot].getName();
148
+ }
149
+
150
+ void checkNotExist(const std::string& name, const std::string& what) const;
151
+
152
+ // Attributes are stored in a specific slot at runtime for effiency.
153
+ // When emitting instructions we specify the slot so that attribute access is
154
+ // a constant lookup
155
+ c10::optional<size_t> findAttributeSlot(const std::string& name) const {
156
+ size_t slot = 0;
157
+ for (const auto& attr : attributes_) {
158
+ if (name == attr.getName()) {
159
+ return slot;
160
+ }
161
+ slot++;
162
+ }
163
+ return c10::nullopt;
164
+ }
165
+ size_t getAttributeSlot(const std::string& name) const {
166
+ if (auto r = findAttributeSlot(name)) {
167
+ return *r;
168
+ }
169
+ TORCH_CHECK(
170
+ false,
171
+ repr_str(),
172
+ " does not have an attribute with name '",
173
+ name,
174
+ "'");
175
+ }
176
+
177
+ bool hasAttribute(const std::string& name) const {
178
+ return std::find_if(
179
+ attributes_.cbegin(),
180
+ attributes_.cend(),
181
+ [&](const ClassAttribute& attr) { return attr.getName() == name; }) !=
182
+ attributes_.cend();
183
+ }
184
+
185
+ bool isUnresolvedClassAttribute(const std::string& name) const;
186
+
187
+ at::ArrayRef<TypePtr> containedTypes() const override {
188
+ return attributeTypes_;
189
+ }
190
+
191
+ size_t addAttribute(
192
+ const std::string& name,
193
+ TypePtr type,
194
+ bool is_parameter = false,
195
+ bool is_buffer = false);
196
+
197
+ // [Internal Only] Remove attribute from the ClassType,
198
+ // caller is responsible to make sure the modification is safe:
199
+ // it is unsafe to having existing allocations
200
+ // of this object around anymore, and any code that works on
201
+ // the attribute is now invalid. Only newly created code is
202
+ // valid again.
203
+ void unsafeRemoveAttribute(const std::string& name);
204
+
205
+ // [Internal Only] Change the type of an attribute of the ClassType,
206
+ // The caller is responsible to make sure the modification is safe:
207
+ // it is unsafe to maintain uses of the old type of the attribute,
208
+ // and any code that works on the attribute is now invalid.
209
+ // Only newly created code is valid again.
210
+ void unsafeChangeAttributeType(const std::string& name, const TypePtr& new_ty);
211
+
212
+ // Add attribute \p NAME if it doesn't exist or verify that it has a
213
+ // compatible type otherwise.
214
+ size_t addOrCheckAttribute(
215
+ const std::string& name,
216
+ TypePtr ty,
217
+ bool is_parameter = false,
218
+ bool is_buffer = false) {
219
+ auto slot_idx = findAttributeSlot(name);
220
+ if (!slot_idx) {
221
+ return addAttribute(name, std::move(ty), is_parameter, is_buffer);
222
+ }
223
+
224
+ TORCH_CHECK(
225
+ is_parameter == this->is_parameter(*slot_idx),
226
+ "Parameter field mismatch for the field '",
227
+ name,
228
+ "'");
229
+ const TypePtr& atype = getAttribute(*slot_idx);
230
+ TORCH_CHECK(
231
+ ty->isSubtypeOf(*atype),
232
+ ty->repr_str(),
233
+ " is not compatible with the type ",
234
+ atype->repr_str(),
235
+ " for the field '",
236
+ name,
237
+ "'");
238
+ return *slot_idx;
239
+ }
240
+
241
+ // Get the property with the given \p name, if it exists on the class.
242
+ c10::optional<ClassType::Property> getProperty(const std::string& name);
243
+ // Add a property named \p name with \p getter and \p setter as its getter and setter.
244
+ void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter);
245
+ // Get a list of all properties.
246
+ const std::vector<Property>& properties() const {
247
+ return properties_;
248
+ }
249
+
250
+ bool hasConstant(const std::string& name) const {
251
+ return std::find_if(
252
+ constantNames_.cbegin(),
253
+ constantNames_.cend(),
254
+ [&](const std::string& constant) { return constant == name; }) !=
255
+ constantNames_.cend();
256
+ }
257
+
258
+ size_t addConstant(const std::string& name, const IValue& value);
259
+
260
+ c10::optional<size_t> findConstantSlot(const std::string& name) const;
261
+
262
+ size_t getConstantSlot(const std::string& name) const {
263
+ if (auto r = findConstantSlot(name)) {
264
+ return *r;
265
+ }
266
+ TORCH_CHECK(
267
+ false,
268
+ repr_str(),
269
+ " does not have constant field with the name '",
270
+ name,
271
+ "'");
272
+ }
273
+
274
+ const std::string& getConstantName(size_t slot) const;
275
+
276
+ const std::string& doc_string() const {
277
+ return doc_string_;
278
+ }
279
+
280
+ IValue getConstant(const std::string& name) const;
281
+
282
+ IValue getConstant(size_t slot) const;
283
+
284
+ c10::optional<IValue> findConstant(const std::string& name) const;
285
+
286
+ size_t numConstants() const;
287
+
288
+ at::ArrayRef<std::string> constantNames() const {
289
+ return constantNames_;
290
+ }
291
+
292
+ at::ArrayRef<IValue> constantValues() const;
293
+
294
+ // [Internal Only] Remove constant from the ClassType
295
+ // caller is responsible to make sure the modification is safe:
296
+ // it is unsafe to having existing allocations
297
+ // of this object around anymore, and any code that works on
298
+ // the attribute is now invalid. Only newly created code is
299
+ // valid again.
300
+ void unsafeRemoveConstant(const std::string& name);
301
+
302
+ TypePtr createWithContained(std::vector<TypePtr> contained_types) const override {
303
+ auto ptr = ClassType::create(name(), compilation_unit_, is_module());
304
+ AT_ASSERT(numAttributes() == contained_types.size());
305
+ for(size_t i = 0; i < attributes_.size(); ++i) {
306
+ AT_ASSERT(attributes_[i].getType()->isSubtypeOf(*contained_types[i]));
307
+ ptr->addAttribute(attributes_[i].getName(), std::move(contained_types[i]));
308
+ }
309
+ // Copy methods over
310
+ for (const auto& method : methods()) {
311
+ ptr->addMethod(method);
312
+ }
313
+ return ptr;
314
+ }
315
+
316
+ bool is_module() const override {
317
+ return isModule_;
318
+ }
319
+
320
+ const std::vector<ClassAttribute>& getAttributes() const {
321
+ return attributes_;
322
+ }
323
+
324
+ bool is_parameter(size_t slot) const {
325
+ TORCH_INTERNAL_ASSERT(
326
+ is_module(), "asking for parameterSlots of non-Module");
327
+ return attributes_.at(slot).getKind() == AttributeKind::PARAMETER;
328
+ }
329
+
330
+ bool is_buffer(size_t slot) const {
331
+ TORCH_INTERNAL_ASSERT(
332
+ is_module(), "asking for bufferWrittenSlots of non-Module");
333
+ return attributes_.at(slot).getKind() == AttributeKind::BUFFER;
334
+ }
335
+
336
+ void addForwardPreHook(torch::jit::Function* pre_hook_ptr);
337
+ void addForwardHook(torch::jit::Function* hook_ptr);
338
+ torch::jit::Function* findForwardPreHook(const std::string& name) const;
339
+ torch::jit::Function* findForwardHook(const std::string& name) const;
340
+ const std::vector<torch::jit::Function*>& getForwardHooks() const;
341
+ const std::vector<torch::jit::Function*>& getForwardPreHooks() const;
342
+
343
+ void checkForwardPreHookSchema(
344
+ int pre_hook_idx,
345
+ const FunctionSchema& pre_hook_schema) const;
346
+ void checkForwardHookSchema(
347
+ int hook_idx,
348
+ const FunctionSchema& hook_schema) const;
349
+
350
+ void addMethod(torch::jit::Function* method);
351
+ torch::jit::Function* findMethod(const std::string& name) const;
352
+ torch::jit::Function& getMethod(const std::string& name) const;
353
+ torch::jit::Function* findHook(const std::string& name) const;
354
+ torch::jit::Function& getHook(const std::string& name) const;
355
+ bool hasMethod(const std::string& name) const;
356
+
357
+ torch::jit::Function* findStaticMethod(const std::string& name) const;
358
+ void addStaticMethod(torch::jit::Function* method);
359
+
360
+ // [Internal Only] Remove method from the ClassType
361
+ // caller is responsible to make sure the modification is safe:
362
+ // it is unsafe to having existing allocations
363
+ // of this object around anymore, and any code that works on
364
+ // the attribute is now invalid. Only newly created code is
365
+ // valid again.
366
+ // Note this method is intended for freezing only.
367
+ void unsafeRemoveMethod(const std::string& name);
368
+
369
+ std::shared_ptr<CompilationUnit> compilation_unit();
370
+
371
+ std::shared_ptr<const CompilationUnit> compilation_unit() const;
372
+
373
+ // generate a refined version of this class.
374
+ // It has the same name but the slot Types are subtypes of
375
+ // the original slots. It is only valid to refine a class type in a context
376
+ // where it is know that there are not assignments to the objects slots
377
+ // that would invalidate the refinement.
378
+ // These variants are not registered in the global class table.
379
+ ClassTypePtr refine(at::ArrayRef<TypePtr> refined_slots) const;
380
+
381
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
382
+
383
+ static const TypeKind Kind = TypeKind::ClassType;
384
+
385
+ private:
386
+ ClassType(
387
+ c10::optional<QualifiedName> name,
388
+ std::weak_ptr<CompilationUnit> cu,
389
+ bool is_module = false,
390
+ std::string doc_string = "",
391
+ std::vector<std::string> unresolved_class_attributes = {});
392
+
393
+ std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
394
+ const auto& n = name().value();
395
+ return n.qualifiedName();
396
+ }
397
+
398
+ void addAttribute(ClassAttribute classAttribute);
399
+ std::string getForwardPreHookErrorMessage(int pre_hook_idx) const;
400
+ std::string getForwardHookErrorMessage(int hook_idx) const;
401
+
402
+ // Mapping of attribute names -> their type.
403
+ // NOTE: this does not contain methods, which are stored in the module
404
+ // TODO: once modules support arbitrary ivalue attributes, we don't need this
405
+ // anymore.
406
+ // TODO: This is better represented as an OrderedDict, but alas it is not yet
407
+ // available from c10
408
+
409
+ // Mapping of constant names -> their value.
410
+ std::vector<std::string> constantNames_;
411
+ std::vector<IValue> constantValues_;
412
+ // Holds method attributes
413
+ std::weak_ptr<CompilationUnit> compilation_unit_;
414
+
415
+ // Holds all atrributes, attribute details are found on ClassAttribute
416
+ std::vector<ClassAttribute> attributes_;
417
+ // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef.
418
+ // Never fill this without using the appropriate provideNewClassAttribute method
419
+ std::vector<TypePtr> attributeTypes_;
420
+
421
+ // List of methods associated with this class.
422
+ std::vector<torch::jit::Function*> methods_;
423
+ std::vector<torch::jit::Function*> staticmethods_;
424
+
425
+ // List of hooks to be run before/after forward.
426
+ std::vector<torch::jit::Function*> forward_hooks_;
427
+ std::vector<torch::jit::Function*> forward_pre_hooks_;
428
+
429
+ // List of properties exposed by this class.
430
+ std::vector<Property> properties_;
431
+
432
+ bool isModule_ = false;
433
+
434
+ // Doc string of class.
435
+ std::string doc_string_ = "";
436
+
437
+ // For error reporting accesses to class level attributes.
438
+ std::vector<std::string> unresolved_class_attributes_;
439
+ };
440
+
441
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/Type.h>
8
+
9
+ namespace c10 {
10
+ namespace impl {
11
+
12
+ // A CppSignature object holds RTTI information about a C++ function signature at runtime
13
+ // and can compare them or get a debug-printable name.
14
+ class TORCH_API CppSignature final {
15
+ public:
16
+ CppSignature(const CppSignature&) = default;
17
+ CppSignature(CppSignature&&) noexcept = default;
18
+ CppSignature& operator=(const CppSignature&) = default;
19
+ CppSignature& operator=(CppSignature&&) noexcept = default;
20
+
21
+ template<class FuncType>
22
+ static CppSignature make() {
23
+ // Normalize functors, lambdas, function pointers, etc. into the plain function type
24
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
25
+ // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
26
+ // with different calling conventions.
27
+ // See Note [Plumbing Keys Through The Dispatcher]
28
+ using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
29
+
30
+ return CppSignature(std::type_index(typeid(decayed_function_type)));
31
+ }
32
+
33
+ std::string name() const {
34
+ return c10::demangle(signature_.name());
35
+ }
36
+
37
+ friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
38
+ if (lhs.signature_ == rhs.signature_) {
39
+ return true;
40
+ }
41
+ // Without RTLD_GLOBAL, the type_index comparison could yield false because
42
+ // they point to different instances of the RTTI data, but the types would
43
+ // still be the same. Let's check for that case too.
44
+ // Note that there still is a case where this might not work, i.e. when
45
+ // linking libraries of different compilers together, they might have
46
+ // different ways to serialize a type name. That, together with a missing
47
+ // RTLD_GLOBAL, would still fail this.
48
+ if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
49
+ return true;
50
+ }
51
+
52
+ return false;
53
+ }
54
+
55
+ private:
56
+ explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
57
+ std::type_index signature_;
58
+ };
59
+
60
+ inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
61
+ return !(lhs == rhs );
62
+ }
63
+
64
+ }
65
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <ATen/core/function_schema.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <c10/util/Bitset.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/core/Variadic.h>
10
+ #include <ATen/core/stack.h>
11
+
12
+ namespace c10 {
13
+
14
+ namespace impl {
15
+
16
+ // Take a DispatchKeySet for a Tensor and determine what the actual dispatch
17
+ // DispatchKey should be, taking into account TLS, and skipping backends which
18
+ // fall through.
19
+ //
20
+ // Unlike Tensor::key_set(), the value of this on a tensor can change depending
21
+ // on TLS.
22
+ //
23
+ // NB: If there is no valid dispatch key, this will return Undefined
24
+ static inline DispatchKeySet computeDispatchKeySet(
25
+ DispatchKeySet ks,
26
+ // The key mask lets us eliminate (by zero entries) keys which should not
27
+ // be considered for dispatch. There are two cases when we use this:
28
+ //
29
+ // - If an operator's dispatch table contains a fallthrough entry, we
30
+ // should bypass it entirely when finding the key
31
+ // - If a user invokes with redispatch, the mask lets us
32
+ // zero out the key the user asked us to stop.
33
+ //
34
+ // These excluded backends are NOT tracked in the TLS, but must be applied
35
+ // AFTER TLS (since the backend may have been introduced for consideration
36
+ // by the included TLS), which is why you have to pass them in to this
37
+ // function (as opposed to just applying it to the input 'ks').
38
+ DispatchKeySet key_mask
39
+ ) {
40
+ c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
41
+ // TODO: It's a bit irritating that we have to do logical ORs here, it would
42
+ // be nice to only do one. Can always_included be folded into the TLS? Well,
43
+ // it's a bit troublesome, because fastpath TLS access requires the type of
44
+ // the TLS in question to be zero-initialized, so you don't actually win
45
+ // anyting in that case.
46
+ return (((ks | local.included_) - local.excluded_) & key_mask);
47
+ }
48
+
49
+ }
50
+
51
+ namespace detail {
52
+ // A small gadget to extract the DispatchKeySet from types which are known
53
+ // to have it. Used to extract dispatch keys from unboxed calls.
54
+ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
55
+ DispatchKeySet ts;
56
+ void operator()(const at::Tensor& x) {
57
+ ts = ts | x.key_set();
58
+ }
59
+ void operator()(const c10::optional<at::Tensor>& x) {
60
+ if (x.has_value()) {
61
+ ts = ts | x->key_set();
62
+ }
63
+ }
64
+ void operator()(at::ArrayRef<at::Tensor> xs) {
65
+ for (const auto& x : xs) {
66
+ ts = ts | x.key_set();
67
+ }
68
+ }
69
+ // Tensor?[] translates to this case.
70
+ void operator()(const c10::List<c10::optional<at::Tensor>>& xs) {
71
+ for (c10::optional<at::Tensor> x : xs) {
72
+ if (x.has_value()) {
73
+ ts = ts | x.value().key_set();
74
+ }
75
+ }
76
+ }
77
+ // Structured Tensor[] translates to this case
78
+ void operator()(const at::ITensorListRef& xs) {
79
+ for (const auto& x : xs) {
80
+ ts = ts | x.key_set();
81
+ }
82
+ }
83
+ [[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) {
84
+ // Just checking that the handling of Tensor?[] didn't change.
85
+ TORCH_INTERNAL_ASSERT(false);
86
+ }
87
+ void operator()(const at::Generator& gen) {
88
+ if (gen.defined()) {
89
+ ts = ts | gen.key_set();
90
+ }
91
+ }
92
+ void operator()(const c10::optional<at::Generator>& gen) {
93
+ if (gen.has_value() && gen->defined()) {
94
+ ts = ts | gen->key_set();
95
+ }
96
+ }
97
+ template <typename T>
98
+ void operator()(const T&) {
99
+ // do nothing
100
+ }
101
+ };
102
+
103
+ // NB: take by const reference (Don't do universal forwarding here! You
104
+ // don't want to move into this function!)
105
+ template <typename... Args>
106
+ DispatchKeySet multi_dispatch_key_set(const Args&... args) {
107
+ return MultiDispatchKeySet().apply(args...).ts;
108
+ }
109
+ }
110
+
111
+ /**
112
+ * An instance of DispatchKeyExtractor knows how to get a dispatch key given
113
+ * a list of arguments for an operator call.
114
+ *
115
+ * The instance is specific for a certain operator as:
116
+ * - In boxed dispatch, different operators have different ways to extract
117
+ * the dispatch key (e.g. different numbers of arguments), and we precompute
118
+ * the stack locations we should look at; and
119
+ * - In all dispatch, some backends should be excluded from dispatch because
120
+ * they have been registered as fallthrough. The set of excluded backends
121
+ * varies from operator, as some operators may have overridden the
122
+ * fallthrough with custom behavior.
123
+ *
124
+ * Note - this should maintain identical impl to the py dispatcher key extraction logic
125
+ * at pytorch/torch/dispatcher.py
126
+ */
127
+ struct TORCH_API DispatchKeyExtractor final {
128
+ public:
129
+ static DispatchKeyExtractor make(const FunctionSchema& schema) {
130
+ return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
131
+ }
132
+
133
+ static DispatchKeyExtractor makeUninitialized() {
134
+ return DispatchKeyExtractor(c10::utils::bitset());
135
+ }
136
+
137
+ void registerSchema(const FunctionSchema& schema) {
138
+ TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
139
+ dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
140
+ }
141
+ void deregisterSchema() {
142
+ dispatch_arg_indices_reverse_ = c10::utils::bitset();
143
+ }
144
+
145
+ DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
146
+ DispatchKeySet ks;
147
+ dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
148
+ const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
149
+ if (C10_LIKELY(ivalue.isTensor())) {
150
+ // NB: Take care not to introduce a refcount bump (there's
151
+ // no safe toTensorRef method, alas)
152
+ ks = ks | ivalue.unsafeToTensorImpl()->key_set();
153
+ } else if (C10_UNLIKELY(ivalue.isTensorList())) {
154
+ for (const at::Tensor& tensor : ivalue.toTensorList()) {
155
+ ks = ks | tensor.key_set();
156
+ }
157
+ }
158
+ // Tensor?[] translates to a c10::List<IValue> so we need to peek inside
159
+ else if (C10_UNLIKELY(ivalue.isList())) {
160
+ for (const auto& elt : ivalue.toListRef()) {
161
+ if (elt.isTensor()) {
162
+ ks = ks | elt.toTensor().key_set();
163
+ }
164
+ }
165
+ }
166
+ });
167
+ // Keys that are fallthrough should be skipped
168
+ if (requiresBitsetPerBackend_) {
169
+ auto backend_idx = ks.getBackendIndex();
170
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
171
+ } else {
172
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
173
+ }
174
+ }
175
+
176
+ template<class... Args>
177
+ DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
178
+ auto ks = detail::multi_dispatch_key_set(args...);
179
+ // Keys that are fallthrough should be skipped
180
+ if (requiresBitsetPerBackend_) {
181
+ auto backend_idx = ks.getBackendIndex();
182
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
183
+ } else {
184
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
185
+ }
186
+ }
187
+
188
+ void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
189
+
190
+ std::string dumpState() const;
191
+ void checkInvariants(const FunctionSchema& schema) const;
192
+
193
+ private:
194
+ static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
195
+ TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
196
+ "The function schema has ", schema.arguments().size(),
197
+ " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
198
+ c10::utils::bitset dispatch_arg_indices_reverse;
199
+ for (const auto index : c10::irange(schema.arguments().size())) {
200
+ if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
201
+ schema.arguments()[index].type()->isSubtypeOf(
202
+ *ListType::ofTensors()) ||
203
+ schema.arguments()[index].type()->isSubtypeOf(
204
+ *ListType::ofOptionalTensors()) ||
205
+ schema.arguments()[index].type()->isSubtypeOf(
206
+ *OptionalType::ofTensor())) {
207
+ dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
208
+ }
209
+ }
210
+ return dispatch_arg_indices_reverse;
211
+ }
212
+
213
+ explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
214
+ : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
215
+ , nonFallthroughKeys_(DispatchKeySet::FULL)
216
+ , requiresBitsetPerBackend_(false) {
217
+ for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
218
+ nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
219
+ }
220
+ }
221
+
222
+ // this is a bitset that has ones for each argument index which has to be
223
+ // considered for dispatch. This avoids having to iterate over the stack
224
+ // to find all the tensors. The bits are stored in reverse order, i.e.
225
+ // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
226
+ // the top of the stack (i.e. the i-th last argument of the function)
227
+ // is relevant for dispatch.
228
+ // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
229
+ // fallthrough
230
+ c10::utils::bitset dispatch_arg_indices_reverse_;
231
+
232
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel.
233
+ DispatchKeySet nonFallthroughKeys_;
234
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
235
+ // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
236
+ std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
237
+ // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
238
+ // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
239
+ bool requiresBitsetPerBackend_;
240
+ };
241
+
242
+ }