applied-ai-018 commited on
Commit
6ca79e0
·
verified ·
1 Parent(s): ff642f6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h +176 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h +99 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h +260 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h +229 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h +27 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +600 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +160 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh +47 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh +508 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h +375 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h +9 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h +95 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h +115 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h +23 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h +138 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h +92 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh +57 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h +76 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h +318 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh +15 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h +20 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h +37 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh +121 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h +44 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh +121 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h +11 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h +5 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh +4 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh +78 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h +10 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h +23 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh +413 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h +87 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h +54 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh +36 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh +124 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h +37 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h +11 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh +119 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh +43 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh +116 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh +28 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h +40 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h +249 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h +14 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h +174 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h +379 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace c10 {
8
+
9
+ struct IValue;
10
+ using Stack = std::vector<IValue>;
11
+
12
+ class OperatorHandle;
13
+ class KernelFunction;
14
+
15
+ // This kernel implements the behavior of falling through to the next available
16
+ // registered dispatch key. The implementation of this function is FAST; it is
17
+ // no overhead to fallthrough to the next key. See cpp file for some more
18
+ // implementation notes; notably, this does NOT actually go through the
19
+ // boxing/unboxing codepath.
20
+ TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
21
+
22
+ // Note [Ambiguity in AutogradOther kernel]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // This error-reporting kernel is registered to the AutogradOther entry in the
25
+ // dispatch table when there is both a CompositeImplicitAutograd kernel and a
26
+ // backend kernel for ANY backend that maps to AutogradOther. To see why
27
+ // this is necessary in the AutogradOther case, it's helpful to first see
28
+ // why everything works out fine for a backend that has a reserved Autograd
29
+ // entry (see rule 2.2 in [Note] DispatchTable computation):
30
+ //
31
+ // CPU AutogradCPU
32
+ // reg? registers with...
33
+ // -------------------------------------------------
34
+ // y Autograd registration takes precedence
35
+ // over CompositeImplicitAutograd.
36
+ // This is good, because the CPU specific backend
37
+ // implementation is more specialized and typically better;
38
+ // if we used the composite, we would bypass it.
39
+ // (NB: the Autograd key is guaranteed to exist because
40
+ // the autograd codegen requires it!)
41
+ //
42
+ // n CompositeImplicitAutograd takes precedence.
43
+ // This is also good, because the Autograd
44
+ // registration (if it exists) would try to redispatch
45
+ // to the (non-existent) CPU implementation; by
46
+ // using the composite, we ensure the operator
47
+ // actually works.
48
+ //
49
+ // As you can see, when we have a specific Autograd key (AutogradCPU), we can
50
+ // decide whether or not to use the CompositeImplicitAutograd kernel or the
51
+ // Autograd kernel based on whether or not the backend kernel exists.
52
+ //
53
+ // However, for AutogradOther (which is the catchall autograd kernel for
54
+ // everything that doesn't have a specific Autograd key), we can't do this
55
+ // trick because there isn't any unique backend to peek at to disambiguate;
56
+ // if there are some backends that have implementations they prefer Autograd,
57
+ // but unimplemented backends would prefer CompositeImplicitAutograd. Rather
58
+ // than arbitrarily pick one or the other, we just register a kernel that raises
59
+ // an error and let the user decide how to proceed.
60
+ TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
61
+
62
+ // Note [named_not_supported_kernel]
63
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64
+ // This kernel implements reporting an error message saying that named tensor is
65
+ // not supported. This kernel doesn't rely on the Stack, and so it is special
66
+ // cased in the dispatcher to be triggered before we attempt boxing (so we can
67
+ // give a good error message in cases when boxing is not supported). When
68
+ // boxing is universally supported this can be removed.
69
+ [[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
70
+
71
+ /**
72
+ * BoxedKernel is similar to a std::function storing a boxed kernel.
73
+ */
74
+ class TORCH_API BoxedKernel final {
75
+ public:
76
+ // This is how boxed kernels are actually stored
77
+ //
78
+ // Note [Plumbing Keys Through The Dispatcher]
79
+ // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS)
80
+ // upon every dispatch call into order to compute which kernel to dispatch to.
81
+ //
82
+ // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores
83
+ // to have a first argument of type DispatchKeySet.
84
+ //
85
+ // What are the invariants of the DispatchKeySet when it gets passed to a kernel?
86
+ // - All keys to the left of the current dispatch key have been masked out.
87
+ // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer)
88
+ // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments
89
+ // are still in the set.
90
+ //
91
+ // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches:
92
+ // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will
93
+ // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher
94
+ // upon redispatching.
95
+ //
96
+ // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature
97
+ // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples.
98
+ //
99
+ // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h.
100
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
101
+ using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
102
+ // This is the public API for how boxed kernels are defined
103
+ using BoxedKernelFunction = void(const OperatorHandle&, Stack*);
104
+ using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*);
105
+
106
+ BoxedKernel();
107
+
108
+ // Fast path for dispatch to allow not touching the boxed kernel in
109
+ // the common case where unboxed is available.
110
+ bool isValid() const;
111
+ bool isFallthrough() const;
112
+
113
+ /**
114
+ * Call the function with boxed arguments.
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Create a KernelFunction from a boxed function.
120
+ *
121
+ * Example:
122
+ *
123
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
124
+ * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>();
125
+ */
126
+ template<BoxedKernelFunction* func>
127
+ static BoxedKernel makeFromFunction();
128
+
129
+ /**
130
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
131
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
132
+ */
133
+ template<BoxedKernelFunction_withDispatchKeys* func>
134
+ static BoxedKernel makeFromFunction();
135
+
136
+ /**
137
+ * Create a KernelFunction from a boxed functor.
138
+ *
139
+ * Example:
140
+ *
141
+ * > class MyFunctor final : public c10::OperatorKernel {
142
+ * > public:
143
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
144
+ * > };
145
+ * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>());
146
+ */
147
+ template<class KernelFunctor>
148
+ static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
149
+
150
+
151
+ static BoxedKernel makeFallthrough();
152
+ static BoxedKernel makeAmbiguousAutogradOther();
153
+ static BoxedKernel makeNamedNotSupported();
154
+
155
+ private:
156
+
157
+ friend class KernelFunction;
158
+
159
+ template<BoxedKernelFunction* func>
160
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
161
+
162
+ template<BoxedKernelFunction_withDispatchKeys* func>
163
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
164
+
165
+ explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func);
166
+
167
+ OperatorKernel* getFunctor() const;
168
+ InternalBoxedKernelFunction* getFnPtr() const;
169
+
170
+ c10::intrusive_ptr<OperatorKernel> functor_;
171
+ InternalBoxedKernelFunction* boxed_kernel_func_;
172
+ };
173
+
174
+ } // namespace c10
175
+
176
+ #include <ATen/core/boxing/BoxedKernel_impl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ inline BoxedKernel::BoxedKernel()
6
+ : functor_()
7
+ , boxed_kernel_func_(nullptr)
8
+ {}
9
+
10
+ inline BoxedKernel::BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func)
11
+ : functor_(std::move(functor))
12
+ , boxed_kernel_func_(boxed_kernel_func)
13
+ {}
14
+
15
+ template<BoxedKernel::BoxedKernelFunction* func>
16
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) {
17
+ // Note that we're dropping the DispatchKeySet argument.
18
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
19
+ func(opHandle, stack);
20
+ }
21
+
22
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
23
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) {
24
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
25
+ func(opHandle, ks, stack);
26
+ }
27
+
28
+ inline bool BoxedKernel::isValid() const {
29
+ return boxed_kernel_func_ != nullptr;
30
+ }
31
+
32
+ inline bool BoxedKernel::isFallthrough() const {
33
+ return boxed_kernel_func_ == &fallthrough_kernel;
34
+ }
35
+
36
+ inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
37
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
38
+ boxed_kernel_func_ != nullptr,
39
+ "Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel."
40
+ );
41
+ (*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack);
42
+ }
43
+
44
+ template<BoxedKernel::BoxedKernelFunction* func>
45
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
46
+ return BoxedKernel(
47
+ nullptr, // no functor_ object
48
+ &make_boxed_function<func>
49
+ );
50
+ }
51
+
52
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
53
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
54
+ return BoxedKernel(
55
+ nullptr, // no functor_ object
56
+ &make_boxed_function<func>
57
+ );
58
+ }
59
+
60
+ inline BoxedKernel BoxedKernel::makeFallthrough() {
61
+ return BoxedKernel(
62
+ nullptr, // no functor_ object
63
+ &fallthrough_kernel
64
+ );
65
+ }
66
+
67
+ inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() {
68
+ return BoxedKernel(
69
+ nullptr, // no functor_ object
70
+ &ambiguous_autogradother_kernel
71
+ );
72
+ }
73
+
74
+ inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
75
+ return BoxedKernel(
76
+ nullptr, // no functor_ object
77
+ &named_not_supported_kernel
78
+ );
79
+ }
80
+
81
+ template<class KernelFunctor>
82
+ inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
83
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
84
+ return BoxedKernel(
85
+ std::move(kernelFunctor),
86
+ [](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {
87
+ (*static_cast<KernelFunctor*>(kernel))(op, ks, stack);
88
+ }
89
+ );
90
+ }
91
+
92
+ inline OperatorKernel* BoxedKernel::getFunctor() const {
93
+ return functor_.get();
94
+ }
95
+ inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const {
96
+ return boxed_kernel_func_;
97
+ }
98
+
99
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/core/boxing/BoxedKernel.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/core/DispatchKeySet.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <c10/util/TypeList.h>
9
+ #include <type_traits>
10
+
11
+ namespace c10 {
12
+
13
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
14
+
15
+ class OperatorHandle;
16
+ struct OperatorKernel;
17
+ class KernelFunction;
18
+
19
+ template <typename T>
20
+ using has_symint =
21
+ std::disjunction<
22
+ std::is_same<c10::SymInt, T>,
23
+ std::is_same<c10::SymIntArrayRef, T>,
24
+ std::is_same<at::OptionalSymIntArrayRef, T>,
25
+ std::is_same<c10::optional<c10::SymInt>, T>
26
+ >;
27
+
28
+ template <typename T>
29
+ struct remove_symint {
30
+ using type = T;
31
+ };
32
+
33
+ template <>
34
+ struct remove_symint<c10::SymInt> {
35
+ using type = int64_t;
36
+ };
37
+
38
+ template <>
39
+ struct remove_symint<at::OptionalSymIntArrayRef> {
40
+ using type = OptionalIntArrayRef;
41
+ };
42
+
43
+ template <>
44
+ struct remove_symint<c10::SymIntArrayRef> {
45
+ using type = c10::IntArrayRef;
46
+ };
47
+
48
+ template <>
49
+ struct remove_symint<c10::optional<c10::SymInt>> {
50
+ using type = c10::optional<int64_t>;
51
+ };
52
+
53
+
54
+ template <bool symint, typename T>
55
+ struct maybe_keep_symint final {};
56
+
57
+ template <typename T>
58
+ struct maybe_keep_symint<true, T> { using type = T; };
59
+
60
+ template <typename T>
61
+ struct maybe_keep_symint<false, T> { using type = typename remove_symint<T>::type; };
62
+
63
+ template <typename T>
64
+ using fn_has_symint = typename guts::typelist::true_for_any_type<
65
+ has_symint,
66
+ typename guts::infer_function_traits<T>::type::parameter_types
67
+ >;
68
+
69
+ template <typename T>
70
+ struct fn_remove_symint;
71
+
72
+ template <typename Ret, typename... Args>
73
+ struct fn_remove_symint<Ret(Args...)> {
74
+ using type = Ret(typename remove_symint<Args>::type...);
75
+ };
76
+
77
+ /**
78
+ * KernelFunction is similar to std::function but stores a kernel function.
79
+ * You can create a KernelFunction from a boxed or unboxed function/functor/lambda
80
+ * and call it in a boxed or unboxed way. If the way it was created doesn't
81
+ * match the way it was called, it will do boxing or unboxing as necessary.
82
+ */
83
+ class TORCH_API KernelFunction final {
84
+ public:
85
+ using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction;
86
+ using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction;
87
+ using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys;
88
+
89
+ KernelFunction();
90
+
91
+ // Fast path for dispatch to allow not touching the boxed kernel in
92
+ // the common case where unboxed is available.
93
+ bool isValidUnboxed() const;
94
+ bool isValidSymUnboxed() const;
95
+ bool isValid() const;
96
+ bool isFallthrough() const;
97
+
98
+ /**
99
+ * Call the function in a boxed way.
100
+ * If the kernel function was created with an unboxed function,
101
+ * this will call an unboxing wrapper which then calls into that
102
+ * unboxed function.
103
+ *
104
+ * Example:
105
+ *
106
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
107
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
108
+ * > Tensor result = func.callBoxed(stack);
109
+ *
110
+ * Or, with an unboxed implementation:
111
+ *
112
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
113
+ * > [] (Tensor a, bool b) -> Tensor {...});
114
+ * > Tensor result = func.callBoxed(stack);
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Call the function in an unboxed way.
120
+ * If the kernel function was created with a boxed function,
121
+ * this will box all inputs and then call into that boxed function.
122
+ *
123
+ * Note that this doesn't work for all types yet.
124
+ *
125
+ * Example:
126
+ *
127
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
128
+ * > [] (Tensor a, bool b) -> Tensor {...});
129
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
130
+ *
131
+ * Or, with a boxed implementation:
132
+ *
133
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
134
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
135
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
136
+ */
137
+ template<class Return, class... Args>
138
+ Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const;
139
+
140
+ /**
141
+ * Create a KernelFunction from a BoxedKernel.
142
+ */
143
+ static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn);
144
+
145
+ /**
146
+ * Create a KernelFunction from a boxed function.
147
+ *
148
+ * Example:
149
+ *
150
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
151
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>();
152
+ */
153
+ template<BoxedKernelFunction* func>
154
+ static KernelFunction makeFromBoxedFunction();
155
+
156
+ /**
157
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
158
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
159
+ */
160
+ template<BoxedKernelFunction_withDispatchKeys* func>
161
+ static KernelFunction makeFromBoxedFunction();
162
+
163
+ /**
164
+ * Create a KernelFunction from an unboxed functor.
165
+ *
166
+ * Example:
167
+ *
168
+ * > class MyFunctor final : public c10::OperatorKernel {
169
+ * > public:
170
+ * > Tensor operator()(Tensor a, Tensor b) {...}
171
+ * > };
172
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor<MyFunctor>(std::make_unique<MyFunctor>());
173
+ */
174
+ template<bool AllowLegacyTypes = false, class KernelFunctor>
175
+ static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
176
+
177
+ /**
178
+ * Create a KernelFunction from a boxed functor.
179
+ *
180
+ * Example:
181
+ *
182
+ * > class MyFunctor final : public c10::OperatorKernel {
183
+ * > public:
184
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
185
+ * > };
186
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique<MyFunctor>());
187
+ */
188
+ template<class KernelFunctor>
189
+ static KernelFunction makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
190
+
191
+ /**
192
+ * Create a KernelFunction from an unboxed function.
193
+ * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction
194
+ * because knowing the function pointer as a template argument (i.e. at
195
+ * compile time) allows the compiler to inline the function into its
196
+ * unboxing wrapper and yields better performance when calling the function.
197
+ *
198
+ * Example:
199
+ *
200
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
201
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunction<decltype(unboxed_func), &unboxed_func>();
202
+ */
203
+ template<class FuncPtr, bool AllowLegacyTypes = false>
204
+ static KernelFunction makeFromUnboxedFunction(FuncPtr);
205
+
206
+ /**
207
+ * Create a KernelFunction from an unboxed function.
208
+ * KernelFunction::makeFromUnboxedFunction is usually a better choice than
209
+ * this if you know the function pointer at compile time, see doc comment
210
+ * there for an explanation.
211
+ *
212
+ * Example:
213
+ *
214
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
215
+ * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func);
216
+ */
217
+ template<bool AllowLegacyTypes = false, class FuncType>
218
+ static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func);
219
+
220
+ static KernelFunction makeFallthrough();
221
+ static KernelFunction makeAmbiguousAutogradOther();
222
+ static KernelFunction makeNamedNotSupported();
223
+
224
+ /**
225
+ * Create a KernelFunction from an unboxed lambda.
226
+ *
227
+ * Example:
228
+ *
229
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
230
+ * > [] (Tensor a, bool b) -> Tensor {...});
231
+ */
232
+ template<bool AllowLegacyTypes = false, class Lambda>
233
+ static std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
234
+ template<bool AllowLegacyTypes = false, class Lambda>
235
+ static std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
236
+
237
+ std::string dumpState() const;
238
+ // For testing internal invariants only
239
+ bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
240
+
241
+ private:
242
+
243
+ explicit KernelFunction(
244
+ std::unique_ptr<OperatorKernel> functor,
245
+ InternalBoxedKernelFunction* boxed_kernel_func,
246
+ void* unboxed_kernel_func,
247
+ void* sym_unboxed_kernel_func);
248
+ explicit KernelFunction(
249
+ BoxedKernel boxed_fn,
250
+ void* unboxed_kernel_func,
251
+ void* sym_unboxed_kernel_func);
252
+
253
+ BoxedKernel boxed_kernel_func_;
254
+ void* unboxed_kernel_func_;
255
+ void* sym_unboxed_kernel_func_;
256
+ };
257
+
258
+ }
259
+
260
+ #include <ATen/core/boxing/KernelFunction_impl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/boxing/impl/boxing.h>
2
+ #include <ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h>
3
+ #include <ATen/core/boxing/impl/WrapFunctionIntoFunctor.h>
4
+ #include <ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h>
5
+
6
+ #include <c10/util/C++17.h>
7
+ #include <type_traits>
8
+
9
+ namespace c10 {
10
+
11
+ inline KernelFunction::KernelFunction()
12
+ : boxed_kernel_func_()
13
+ , unboxed_kernel_func_(nullptr)
14
+ , sym_unboxed_kernel_func_(nullptr)
15
+ {}
16
+
17
+ inline KernelFunction::KernelFunction(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
18
+ : boxed_kernel_func_(std::move(functor), boxed_kernel_func)
19
+ , unboxed_kernel_func_(unboxed_kernel_func)
20
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
21
+ {}
22
+
23
+ inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
24
+ : boxed_kernel_func_(std::move(boxed_fn))
25
+ , unboxed_kernel_func_(unboxed_kernel_func)
26
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
27
+ {}
28
+
29
+ inline bool KernelFunction::isValidUnboxed() const {
30
+ return unboxed_kernel_func_ != nullptr;
31
+ }
32
+
33
+ inline bool KernelFunction::isValidSymUnboxed() const {
34
+ return sym_unboxed_kernel_func_ != nullptr;
35
+ }
36
+
37
+ inline bool KernelFunction::isValid() const {
38
+ return boxed_kernel_func_.isValid();
39
+ }
40
+
41
+ inline bool KernelFunction::isFallthrough() const {
42
+ return boxed_kernel_func_.isFallthrough();
43
+ }
44
+
45
+ inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
46
+ boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack);
47
+ }
48
+
49
+ template<class Return, class... Args>
50
+ inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) {
51
+ using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...);
52
+ ActualSignature* func = reinterpret_cast<ActualSignature*>(unboxed_kernel_func);
53
+ return (*func)(functor, dispatchKeySet, std::forward<Args>(args)...);
54
+ }
55
+
56
+ // This template requires you to explicitly specify the argument you want to
57
+ // forward; it doesn't work if you try to deduce it
58
+ // NB: keep this in sync with cloneWithRealTypes in function_schema.cpp
59
+
60
+ template <typename T>
61
+ inline typename remove_symint<T>::type unpackSymInt(T x) { return x; }
62
+
63
+ template <>
64
+ inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
65
+ return x.guard_int(__FILE__, __LINE__);
66
+ }
67
+
68
+ template <>
69
+ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIntArrayRef x) {
70
+ return C10_AS_INTARRAYREF_SLOW(x);
71
+ }
72
+
73
+ template <>
74
+ inline typename remove_symint<c10::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
75
+ return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
76
+ }
77
+
78
+ template <>
79
+ inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(at::OptionalSymIntArrayRef x) {
80
+ return x.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : c10::nullopt;
81
+ }
82
+
83
+ template<class Return, class... Args>
84
+ C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const {
85
+ // note: Args above is intentionally not Args&&. We don't want perfect
86
+ // forwarding, which would require Args to be deduced, but instead we
87
+ // want callers to explicitly specify the Args.
88
+
89
+ if constexpr (std::disjunction_v<has_symint<Args>...>) {
90
+ if (sym_unboxed_kernel_func_ != nullptr) {
91
+ auto *functor = boxed_kernel_func_.getFunctor();
92
+ return callUnboxedKernelFunction<Return, Args...>(
93
+ sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
94
+ }
95
+
96
+ if (unboxed_kernel_func_ != nullptr) {
97
+ auto *functor = boxed_kernel_func_.getFunctor();
98
+ return callUnboxedKernelFunction<Return, typename remove_symint<Args>::type...>(
99
+ unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt<Args>(args)...);
100
+ }
101
+ } else {
102
+ if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) {
103
+ auto *functor = boxed_kernel_func_.getFunctor();
104
+ return callUnboxedKernelFunction<Return, Args...>(
105
+ unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
106
+ }
107
+ }
108
+
109
+ return impl::BoxedKernelWrapper<Return(Args...)>::call(
110
+ boxed_kernel_func_,
111
+ opHandle,
112
+ dispatchKeySet,
113
+ std::forward<Args>(args)...
114
+ );
115
+ }
116
+
117
+ inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) {
118
+ return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer
119
+ }
120
+
121
+ template<KernelFunction::BoxedKernelFunction* func>
122
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
123
+ return KernelFunction::makeFromBoxedKernel(
124
+ BoxedKernel::makeFromFunction<func>());
125
+ }
126
+
127
+ template<KernelFunction::BoxedKernelFunction_withDispatchKeys* func>
128
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
129
+ return KernelFunction::makeFromBoxedKernel(
130
+ BoxedKernel::makeFromFunction<func>());
131
+ }
132
+
133
+ inline KernelFunction KernelFunction::makeFallthrough() {
134
+ return KernelFunction::makeFromBoxedKernel(
135
+ BoxedKernel::makeFallthrough());
136
+ }
137
+
138
+ inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() {
139
+ return KernelFunction::makeFromBoxedKernel(
140
+ BoxedKernel::makeAmbiguousAutogradOther());
141
+ }
142
+
143
+ inline KernelFunction KernelFunction::makeNamedNotSupported() {
144
+ return KernelFunction::makeFromBoxedKernel(
145
+ BoxedKernel::makeNamedNotSupported());
146
+ }
147
+
148
+ template<bool AllowLegacyTypes, class KernelFunctor>
149
+ inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor) {
150
+ #ifndef NDEBUG
151
+ // This assertion is costly for build time so it's debug-gated.
152
+ static_assert(guts::is_functor<KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor> but the argument is not a functor.");
153
+ #endif
154
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
155
+
156
+ auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed<KernelFunctor>::call;
157
+ void* void_unboxed_fn = reinterpret_cast<void*>(unboxed_fn);
158
+ bool is_symint = fn_has_symint<decltype(unboxed_fn)>::value;
159
+ return KernelFunction(
160
+ std::move(kernelFunctor),
161
+ &impl::make_boxed_from_unboxed_functor<KernelFunctor, AllowLegacyTypes>::call,
162
+ is_symint ? nullptr : void_unboxed_fn,
163
+ is_symint ? void_unboxed_fn : nullptr
164
+ );
165
+ }
166
+
167
+ template<class KernelFunctor>
168
+ inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
169
+ return KernelFunction::makeFromBoxedKernel(
170
+ BoxedKernel::makeFromFunctor(std::move(kernelFunctor)));
171
+ }
172
+
173
+ template<class FuncPtr, bool AllowLegacyTypes>
174
+ inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) {
175
+ static_assert(is_compile_time_function_pointer<FuncPtr>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN.");
176
+ static_assert(!std::is_same<typename FuncPtr::FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
177
+ static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr");
178
+
179
+ #if !defined(C10_MOBILE)
180
+ (void)func_ptr; // Suppress unused variable warning
181
+ return makeFromUnboxedFunctor<AllowLegacyTypes, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>(
182
+ guts::make_unique_base<OperatorKernel, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>()
183
+ );
184
+ #else
185
+ // On mobile, we rather want to optimize for binary size than for performance,
186
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
187
+ // instead.
188
+ return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr());
189
+ #endif
190
+ }
191
+
192
+ template<bool AllowLegacyTypes, class FuncType>
193
+ inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) {
194
+ static_assert(guts::is_function_type<FuncType>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type.");
195
+ static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
196
+ TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
197
+
198
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(
199
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(func)
200
+ );
201
+ }
202
+
203
+ template<bool AllowLegacyTypes, class Lambda>
204
+ inline std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
205
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
206
+
207
+ #if !defined(C10_MOBILE)
208
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
209
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
210
+ );
211
+ #else
212
+ // On mobile, we rather want to optimize for binary size than for performance,
213
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
214
+ // instead.
215
+ using FuncType = typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type;
216
+ return makeFromUnboxedRuntimeFunction<AllowLegacyTypes, FuncType>(lambda);
217
+ #endif
218
+ }
219
+
220
+ template<bool AllowLegacyTypes, class Lambda>
221
+ inline std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
222
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
223
+
224
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
225
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
226
+ );
227
+ }
228
+
229
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/intrusive_ptr.h>
3
+
4
+ namespace c10 {
5
+
6
+ /**
7
+ * Inherit from OperatorKernel to implement a c10 kernel.
8
+ *
9
+ * Example:
10
+ * > namespace {
11
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
12
+ * > public:
13
+ * > Tensor operator()(Tensor a, Tensor b) {...}
14
+ * > };
15
+ * > }
16
+ *
17
+ * The kernel class is allowed to have members but these are equivalent
18
+ * to global variables. The kernel implementation is responsible for
19
+ * preventing race conditions on them.
20
+ *
21
+ * See below for how to register this kernel with PyTorch.
22
+ */
23
+ struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target {
24
+ ~OperatorKernel() override = default;
25
+ };
26
+
27
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/CompileTimeFunctionPointer.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+ namespace detail {
8
+ template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
9
+ template<class FuncPtr, class ReturnType, class... Parameters>
10
+ class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
11
+ public:
12
+ C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
13
+ return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
14
+ }
15
+ };
16
+ }
17
+
18
+ // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
19
+ // Since it is a compile time function pointer, many compilers can inline it
20
+ // into the wrapper and you don't get any performance overhead for wrapping.
21
+ template<class FuncPtr>
22
+ struct WrapFunctionIntoFunctor final {
23
+ static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
24
+ using type = detail::WrapFunctionIntoFunctor_<
25
+ FuncPtr,
26
+ typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
27
+ typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
28
+ >;
29
+ };
30
+ }
31
+
32
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/util/TypeList.h>
7
+ #include <ATen/core/IListRef.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
16
+ class OperatorHandle;
17
+
18
+ /*
19
+ * [Note: Argument forwarding in the dispatcher]
20
+ *
21
+ * The dispatcher uses a somewhat unusual way to forward arguments through several layers of
22
+ * wrapper functions. This can be confusing because an experienced C++ programmer would look at this
23
+ * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.".
24
+ * It is not a bug. The common way in C++ to forward arguments is to use universal references:
25
+ *
26
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
27
+ *
28
+ * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument.
29
+ * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a
30
+ * different reference type than was used in the kernel function. The correct reference type
31
+ * is dictated by the kernel signature and must be identical since we cast function pointers
32
+ * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines
33
+ * the reference type to use by looking at the explicitly supplied operator signature, not by looking at
34
+ * the argument we're calling it with.
35
+ *
36
+ * What does std::forward do, exactly?
37
+ * ------------------------------------
38
+ * std::forward<T>(t) is a way to cast t to the reference type supplied in T.
39
+ * Let's assume decay_t<T> == U and T is either U or some reference of U.
40
+ * - std::forward<T&>(t) will return U&, no matter what kind of reference t is.
41
+ * - std::forward<T&&>(t) will return U&&, no matter what kind of reference t is.
42
+ * - std::forward<T>(t) will return U&& (not U!), no matter what kind of reference t is.
43
+ *
44
+ * For universal references, that means that in the following function
45
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
46
+ *
47
+ * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be
48
+ * a non-reference U, and std::forward<T>(t) will return U&&, correctly moving the argument.
49
+ * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only
50
+ * way to match the signature (in C++, a type that is (T&)&& will collapse to T&).
51
+ * That means std::forward<T>(t) will return U& and the value will not be moved but passed on as
52
+ * a lvalue reference.
53
+ *
54
+ * How do we use that?
55
+ * ------------------------------------
56
+ * But std::forward can also be used outside of the common "universal forwarding" pattern to change
57
+ * reference types. So instead of following the common C++ pattern, we notice what
58
+ * std::forward<T>() actually does, and that is it takes a value and changes its reference to the
59
+ * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this
60
+ * to forward based on an explicitly specified reference type instead of the inferred argument type.
61
+ *
62
+ * This is why many of the dispatcher functions look like
63
+ * > template<class T> func(T t) { func2<T>(std::forward<T>(t)); }
64
+ * instead of the common
65
+ * > template<class T> func(T&& t) { func2(std::forward<T>(t)); }
66
+ *
67
+ * and are expected to be called by explicitly specifying the template parameters in a way that matches
68
+ * the expected operator signature at each call site.
69
+ */
70
+
71
+ namespace impl {
72
+ // supported_primitive_arg_types defines which primitive types we allow in
73
+ // kernel functions as arguments or returns.
74
+ // Additionally, we support lists, dicts and optionals containing these types.
75
+ using supported_primitive_arg_types = guts::typelist::typelist<
76
+ int64_t,
77
+ double,
78
+ bool,
79
+ c10::string_view,
80
+ at::Tensor,
81
+ at::Scalar,
82
+ c10::QScheme,
83
+ c10::ScalarType,
84
+ c10::Device,
85
+ c10::DeviceIndex,
86
+ c10::Layout,
87
+ c10::MemoryFormat,
88
+ at::Dimname
89
+ >;
90
+
91
+ // We have an unboxed functor in hand that takes C++ arguments, and
92
+ // we're building a boxed functor wrapper for it that takes IValues.
93
+ // So "outside" is boxed and "inside" is unboxed.
94
+ //
95
+ // So a valid input type is one that our boxed functor wrapper can
96
+ // unbox from an IValue into a C++ value.
97
+ //
98
+ // Whereas a valid output type is one that our wrapper can recieve
99
+ // as a C++ value from the unboxed functor, and box into an IValue.
100
+
101
+ //
102
+ // assert_is_valid_input_type
103
+ // checks that T can be unboxed from an IValue into a C++ value.
104
+ //
105
+
106
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
107
+ struct assert_is_valid_input_type {
108
+ assert_is_valid_input_type() {
109
+ if constexpr (guts::typelist::contains<supported_primitive_arg_types, T>::value) {
110
+ /* everything is ok, this is a primitive type */
111
+ } else {
112
+ /* otherwise this must be an instance of a valid custom class, since it can only
113
+ have been created via IValue(x), which ensures this. */
114
+ }
115
+ }
116
+ };
117
+
118
+ template<class T, bool AllowDeprecatedTypes>
119
+ struct assert_is_valid_input_type<c10::optional<T>, AllowDeprecatedTypes>
120
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
121
+
122
+ template <bool AllowDeprecatedTypes, class... Args>
123
+ struct TypeCheckHelper;
124
+
125
+ template <bool AllowDeprecatedTypes>
126
+ struct TypeCheckHelper<AllowDeprecatedTypes> {};
127
+
128
+ template <bool AllowDeprecatedTypes, class Head, class... Rest>
129
+ struct TypeCheckHelper<AllowDeprecatedTypes, Head, Rest...>
130
+ : TypeCheckHelper<AllowDeprecatedTypes, Rest...> {
131
+ assert_is_valid_input_type<Head, AllowDeprecatedTypes> check;
132
+ };
133
+
134
+ template<class... Contained, bool AllowDeprecatedTypes>
135
+ struct assert_is_valid_input_type<std::tuple<Contained...>, AllowDeprecatedTypes>
136
+ : TypeCheckHelper<AllowDeprecatedTypes, Contained...> {};
137
+
138
+ template<class Key, class Value, bool AllowDeprecatedTypes>
139
+ struct assert_is_valid_input_type<Dict<Key, Value>, AllowDeprecatedTypes>
140
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
141
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
142
+ "You tried to register a kernel with an unsupported input type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
143
+ };
144
+
145
+ template<class Key, class Value, bool AllowDeprecatedTypes>
146
+ struct assert_is_valid_input_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
147
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
148
+ static_assert(AllowDeprecatedTypes,
149
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
150
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
151
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
152
+ };
153
+
154
+ template<class T, bool AllowDeprecatedTypes>
155
+ struct assert_is_valid_input_type<List<T>, AllowDeprecatedTypes>
156
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
157
+ static_assert(!std::is_same<T, at::Scalar>::value,
158
+ "You tried to register a kernel with an unsupported input type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
159
+ };
160
+
161
+ template<class T, bool AllowDeprecatedTypes>
162
+ struct assert_is_valid_input_type<c10::ArrayRef<T>, AllowDeprecatedTypes>
163
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
164
+ static_assert(!std::is_same<T, at::Scalar>::value,
165
+ "You tried to register a kernel with an unsupported input type: ArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
166
+ };
167
+
168
+ template<class T, bool AllowDeprecatedTypes>
169
+ struct assert_is_valid_input_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
170
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
171
+ static_assert(!std::is_same<T, at::Scalar>::value,
172
+ "You tried to register a kernel with an unsupported input type: OptionalArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
173
+ };
174
+
175
+ template<class T, size_t N, bool AllowDeprecatedTypes>
176
+ struct assert_is_valid_input_type<std::array<T, N>, AllowDeprecatedTypes>
177
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
178
+ static_assert(!std::is_same<T, at::Scalar>::value,
179
+ "You tried to register a kernel with an unsupported input type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
180
+ };
181
+
182
+ template<class T, bool AllowDeprecatedTypes>
183
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
184
+ // There is no reason to support float when we have double. Keep the API lean.
185
+ static_assert(guts::false_t<T>::value,
186
+ "You tried to register a kernel with an unsupported input type: float. Please use double instead.");
187
+ };
188
+ template<class T, bool AllowDeprecatedTypes>
189
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
190
+ static_assert(guts::false_t<T>::value,
191
+ "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead.");
192
+ };
193
+ template<class T, bool AllowDeprecatedTypes>
194
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
195
+ static_assert(guts::false_t<T>::value,
196
+ "You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
197
+ };
198
+ template<class T, bool AllowDeprecatedTypes>
199
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
200
+ static_assert(guts::false_t<T>::value,
201
+ "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead.");
202
+ };
203
+ template<class T, bool AllowDeprecatedTypes>
204
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> {
205
+ static_assert(guts::false_t<T>::value,
206
+ "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead.");
207
+ };
208
+
209
+ // TODO: it probably would be good to tighten this up quite a bit more with
210
+ // an explicit list for everything
211
+
212
+ //
213
+ // assert_is_valid_output_type
214
+ //
215
+
216
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
217
+ struct assert_is_valid_output_type {
218
+ assert_is_valid_output_type() {
219
+ if constexpr(guts::typelist::contains<supported_primitive_arg_types, T>::value) {
220
+ /* everything is ok, this is a primitive type */
221
+ } else {
222
+ /* otherwise T is verified to be a registered custom class in the IValue
223
+ constructor, so no benefit in double-checking here */
224
+ }
225
+ }
226
+ };
227
+
228
+ template<class T, bool AllowDeprecatedTypes>
229
+ struct assert_is_valid_output_type<c10::optional<T>, AllowDeprecatedTypes>
230
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
231
+
232
+ template<class T, bool AllowDeprecatedTypes>
233
+ struct assert_is_valid_output_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
234
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
235
+
236
+ template<class Key, class Value, bool AllowDeprecatedTypes>
237
+ struct assert_is_valid_output_type<Dict<Key, Value>, AllowDeprecatedTypes>
238
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
239
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
240
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
241
+ static_assert(!std::is_same<Value, at::Scalar>::value,
242
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
243
+ };
244
+
245
+ template<class Key, class Value, bool AllowDeprecatedTypes>
246
+ struct assert_is_valid_output_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
247
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
248
+ static_assert(AllowDeprecatedTypes,
249
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
250
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
251
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
252
+ static_assert(!std::is_same<Value, at::Scalar>::value,
253
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
254
+ };
255
+
256
+ template<class T, bool AllowDeprecatedTypes>
257
+ struct assert_is_valid_output_type<List<T>, AllowDeprecatedTypes>
258
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
259
+ static_assert(!std::is_same<T, at::Scalar>::value,
260
+ "You tried to register a kernel with an unsupported output type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
261
+ };
262
+
263
+ template<class T, bool AllowDeprecatedTypes>
264
+ struct assert_is_valid_output_type<std::vector<T>, AllowDeprecatedTypes>
265
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
266
+ static_assert(!std::is_same<T, at::Scalar>::value,
267
+ "You tried to register a kernel with an unsupported output type: std::vector<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
268
+ // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector<T>. Please use List<T> instead.");
269
+ };
270
+
271
+ template<class T, size_t N, bool AllowDeprecatedTypes>
272
+ struct assert_is_valid_output_type<std::array<T, N>, AllowDeprecatedTypes>
273
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
274
+ static_assert(!std::is_same<T, at::Scalar>::value,
275
+ "You tried to register a kernel with an unsupported output type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
276
+ };
277
+
278
+ // The following specialisations of assert_is_valid_output_type are technically not
279
+ // necessary since we would hit the base case and show an error message
280
+ // there if they didn't exist, but we can show a better error message
281
+ // in some common error scenarios.
282
+ template<class T, bool AllowDeprecatedTypes>
283
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
284
+ // There is no reason to support float when we have double. Keep the API lean.
285
+ static_assert(guts::false_t<T>::value,
286
+ "You tried to register a kernel with an unsupported output type: float. Please use double instead.");
287
+ };
288
+ template<class T, bool AllowDeprecatedTypes>
289
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
290
+ static_assert(guts::false_t<T>::value,
291
+ "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead.");
292
+ };
293
+ template<class T, bool AllowDeprecatedTypes>
294
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
295
+ static_assert(guts::false_t<T>::value,
296
+ "You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
297
+ };
298
+ template<class T, bool AllowDeprecatedTypes>
299
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
300
+ static_assert(guts::false_t<T>::value,
301
+ "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead.");
302
+ };
303
+
304
+ // ivalue_to_arg
305
+
306
+ template<class T>
307
+ struct decay_if_not_tensor final {
308
+ using type = std::decay_t<T>;
309
+ };
310
+
311
+ template<>
312
+ struct decay_if_not_tensor<at::Tensor&> final {
313
+ using type = at::Tensor&;
314
+ };
315
+
316
+ template<>
317
+ struct decay_if_not_tensor<const at::Tensor&> final {
318
+ using type = const at::Tensor&;
319
+ };
320
+
321
+ template<class T, bool AllowDeprecatedTypes>
322
+ struct ivalue_to_arg final {
323
+ static decltype(auto) call(IValue& v) {
324
+ assert_is_valid_input_type<T, AllowDeprecatedTypes>();
325
+ return std::move(v).to<T>();
326
+ }
327
+ };
328
+
329
+ // The following two specializations take advantage of specialized
330
+ // `toTensor()` overloads on IValue to avoid copying.
331
+ template<bool AllowDeprecatedTypes>
332
+ struct ivalue_to_arg<at::Tensor&, AllowDeprecatedTypes> final {
333
+ // We cannot use the default implementation if they asked for a
334
+ // `at::Tensor&` because it moves from the IValue, so it can't get
335
+ // an lvalue reference.
336
+ static at::Tensor& call(IValue& v) {
337
+ // Tensor& is valid, don't bother asserting
338
+ return v.toTensor();
339
+ }
340
+ };
341
+
342
+ template<bool AllowDeprecatedTypes>
343
+ struct ivalue_to_arg<const at::Tensor&, AllowDeprecatedTypes> final {
344
+ // We should not use the default implementation if they asked for
345
+ // a `const at::Tensor&` because it moves from the IValue and they
346
+ // didn't ask for that.
347
+ static const at::Tensor& call(IValue& v) {
348
+ // const Tensor& is valid, don't bother asserting
349
+ return v.toTensor();
350
+ }
351
+ };
352
+
353
+ template<bool AllowDeprecatedTypes>
354
+ struct ivalue_to_arg<at::ITensorListRef, AllowDeprecatedTypes> final {
355
+ static List<at::Tensor> call(IValue& v) {
356
+ return v.toTensorList();
357
+ }
358
+ };
359
+
360
+ template<class T, bool AllowDeprecatedTypes>
361
+ struct ivalue_to_arg<ArrayRef<T>, AllowDeprecatedTypes> final {
362
+ // If an argument is ArrayRef<T>, convert the IValue to a std::vector<T> and pass that
363
+ // to the operator. std::vector<T> is implicitly convertible to ArrayRef<T>.
364
+ static std::vector<T> call(IValue& v) {
365
+ return ivalue_to_arg<std::vector<T>, AllowDeprecatedTypes>::call(v);
366
+ }
367
+ };
368
+ template<bool AllowDeprecatedTypes>
369
+ struct ivalue_to_arg<c10::SymIntArrayRef, AllowDeprecatedTypes> final {
370
+ static std::vector<c10::SymInt> call(IValue& v) {
371
+ if (v.isIntList()) {
372
+ std::vector<c10::SymInt> r;
373
+ auto src = v.toIntList();
374
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
375
+ return r;
376
+ } else {
377
+ return ivalue_to_arg<std::vector<c10::SymInt>, AllowDeprecatedTypes>::call(v);
378
+ }
379
+ }
380
+ };
381
+ template<bool AllowDeprecatedTypes>
382
+ struct ivalue_to_arg<c10::OptionalArray<c10::SymInt>, AllowDeprecatedTypes> final {
383
+ static OptionalArray<c10::SymInt> call(IValue& v) {
384
+ if (v.isIntList()) {
385
+ std::vector<c10::SymInt> r;
386
+ auto src = v.toIntList();
387
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
388
+ return OptionalArray<c10::SymInt>(std::move(r));
389
+ } else {
390
+ return std::move(v).to<OptionalArray<c10::SymInt>>();
391
+ }
392
+ }
393
+ };
394
+ template<class T, bool AllowDeprecatedTypes>
395
+ struct ivalue_to_arg<optional<ArrayRef<T>>, AllowDeprecatedTypes> final {
396
+ // If an argument is optional<ArrayRef<T>>, convert the IValue to an optional<std::vector<T>> and pass that
397
+ // to the operator. OptionalArray<T> is basically a optional<std::vector<T>> but implicitly convertible
398
+ // to optional<ArrayRef<T>>.
399
+ static OptionalArray<T> call(IValue& v) {
400
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
401
+ }
402
+ };
403
+
404
+ template<class T, bool AllowDeprecatedTypes>
405
+ struct ivalue_to_arg<OptionalArrayRef<T>, AllowDeprecatedTypes> final {
406
+ // If an argument is OptionalArrayRef<T>, convert the IValue to an
407
+ // optional<std::vector<T>> and pass that to the operator. OptionalArray<T>
408
+ // is basically a optional<std::vector<T>> but implicitly convertible to
409
+ // OptionalArrayRef<T>
410
+ static OptionalArray<T> call(IValue& v) {
411
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
412
+ }
413
+ };
414
+
415
+ // return_to_ivalue
416
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
417
+ struct return_to_ivalue final {};
418
+
419
+ template<class T, bool AllowDeprecatedTypes>
420
+ struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<at::Tensor&, T>::value>> final {
421
+ static IValue call(T&& v) {
422
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
423
+ return c10::ivalue::from(std::move(v));
424
+ }
425
+ static IValue copy(const T& v) {
426
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
427
+ return IValue(v);
428
+ }
429
+ };
430
+
431
+ // Special case to allow kernels to return `Tensor&`.
432
+ // TODO Delete this once kernels don't do that anymore
433
+ template<bool AllowDeprecatedTypes>
434
+ struct return_to_ivalue<at::Tensor&, AllowDeprecatedTypes, void> final {
435
+ static IValue call(at::Tensor& v) {
436
+ return c10::ivalue::from(v);
437
+ }
438
+ static IValue copy(at::Tensor& v) {
439
+ return IValue(v);
440
+ }
441
+ };
442
+
443
+ // wrap_kernel_functor_unboxed_
444
+
445
+ template<class KernelFunctor, class OpSignature>
446
+ struct wrap_kernel_functor_unboxed_ final {};
447
+
448
+ // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet
449
+ // This includes kernels with 0 arguments.
450
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
451
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(ParameterTypes...)> final {
452
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
453
+ "Return type mismatch");
454
+ static_assert(std::is_same<guts::typelist::typelist<ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
455
+ "Parameter types mismatch");
456
+
457
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
458
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) {
459
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
460
+ // Note [Plumbing Keys Through The Dispatcher 2]
461
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
462
+ // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel.
463
+ //
464
+ // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type
465
+ // DispatchKeySet.
466
+ // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention
467
+ // of the dispatcher from the calling convention of manually written kernels.
468
+ return (*functor_)(std::forward<ParameterTypes>(args)...);
469
+ }
470
+ };
471
+
472
+ // This specialization is for kernels with a first argument of type DispatchKeySet
473
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
474
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(DispatchKeySet, ParameterTypes...)> final {
475
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
476
+ "Return type mismatch");
477
+ static_assert(std::is_same<guts::typelist::typelist<DispatchKeySet, ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
478
+ "Parameter types mismatch");
479
+
480
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
481
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) {
482
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
483
+ // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel.
484
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
485
+ return (*functor_)(dispatchKeySet, std::forward<ParameterTypes>(args)...);
486
+ }
487
+ };
488
+
489
+ template<class KernelFunctor>
490
+ using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_<KernelFunctor, typename guts::infer_function_traits_t<KernelFunctor>::func_type>;
491
+
492
+ // call_functor_with_args_from_stack
493
+
494
+ template<class Functor, bool AllowDeprecatedTypes, size_t... ivalue_arg_indices, typename... ArgTypes>
495
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
496
+ call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence<ivalue_arg_indices...>, guts::typelist::typelist<ArgTypes...>*) {
497
+ (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.
498
+
499
+ // We're explicitly filtering out DispatchKeySet from the argument list.
500
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
501
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
502
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
503
+ return wrap_kernel_functor_unboxed<Functor>::call(functor, dispatchKeySet,
504
+ ivalue_to_arg<typename decay_if_not_tensor<ArgTypes>::type, AllowDeprecatedTypes>::call(
505
+ torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices))
506
+ )...);
507
+ }
508
+
509
+ template<class Functor, bool AllowDeprecatedTypes>
510
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
511
+ call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) {
512
+ // We're explicitly filtering out DispatchKeySet from the argument list.
513
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
514
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
515
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
516
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<Functor>::parameter_types;
517
+ constexpr size_t num_ivalue_args = guts::typelist::size<ArgTypes>::value;
518
+ return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack, std::make_index_sequence<num_ivalue_args>(), static_cast<ArgTypes*>(nullptr));
519
+ }
520
+
521
+ // push_outputs
522
+
523
+ template<class OutputType, bool AllowDeprecatedTypes>
524
+ struct push_outputs final {
525
+ // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here
526
+ // to avoid one extra call to the move constructor in this case. This is still not a
527
+ // universal reference though because OutputType is an explicitly specified class
528
+ // template parameter.
529
+ static void call(OutputType&& output, Stack* stack) {
530
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::call(std::forward<OutputType>(output)));
531
+ }
532
+ static void copy(const OutputType& output, Stack* stack) {
533
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::copy(output));
534
+ }
535
+ };
536
+ template<class... OutputTypes, bool AllowDeprecatedTypes>
537
+ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
538
+ static void call(std::tuple<OutputTypes...>&& output, Stack* stack) {
539
+ call_(std::move(output), stack, std::make_index_sequence<sizeof...(OutputTypes)>());
540
+ }
541
+ static void copy(const std::tuple<OutputTypes...>& output, Stack* stack) {
542
+ copy_(output, stack, std::make_index_sequence<sizeof...(OutputTypes)>());
543
+ }
544
+
545
+ private:
546
+ template<size_t... indices>
547
+ static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, std::index_sequence<indices...>) {
548
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::call(std::forward<OutputTypes>(std::get<indices>(output)))...);
549
+ }
550
+ template<size_t... indices>
551
+ static void copy_(const std::tuple<OutputTypes...>& output, Stack* stack, std::index_sequence<indices...>) {
552
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::copy(std::get<indices>(output))...);
553
+ }
554
+ };
555
+ template<bool AllowDeprecatedTypes>
556
+ struct push_outputs<void, AllowDeprecatedTypes> final {
557
+ static void call(int /*dummy*/, Stack* /*stack*/) {
558
+ }
559
+ static void copy(int /*dummy*/, Stack* /*stack*/) {
560
+ }
561
+ };
562
+
563
+ // make_boxed_from_unboxed_functor
564
+
565
+ template<class KernelFunctor, bool AllowDeprecatedTypes>
566
+ struct make_boxed_from_unboxed_functor final {
567
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value,
568
+ "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
569
+
570
+ static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) {
571
+ using ReturnType = typename guts::infer_function_traits_t<KernelFunctor>::return_type;
572
+ // We're explicitly filtering out DispatchKeySet from the argument list.
573
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
574
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
575
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
576
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::parameter_types;
577
+ constexpr bool has_outputs = !std::is_same<void, ReturnType>::value;
578
+ constexpr size_t num_inputs = guts::typelist::size<ArgTypes>::value;
579
+ if constexpr (has_outputs) {
580
+ // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value
581
+ // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`.
582
+ // [Note: VC++ and 'std': ambiguous symbol]
583
+ using ReturnType_ = ::std::decay_t<ReturnType>;
584
+ ReturnType_ output = call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
585
+ torch::jit::drop(*stack, num_inputs);
586
+ // See note [ VC++ and 'std': ambiguous symbol]
587
+ push_outputs<ReturnType_, AllowDeprecatedTypes>::call(::std::move(output), stack);
588
+ } else {
589
+ call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
590
+ torch::jit::drop(*stack, num_inputs);
591
+ }
592
+ }
593
+ };
594
+ } // namespace impl
595
+
596
+ } // namespace c10
597
+
598
+ namespace torch {
599
+ using OperatorKernel = c10::OperatorKernel;
600
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <gtest/gtest.h>
4
+ #include <gmock/gmock.h>
5
+
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/core/dispatch/Dispatcher.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <c10/core/CPUAllocator.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ template<class... Inputs>
13
+ inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
14
+ return {std::forward<Inputs>(inputs)...};
15
+ }
16
+
17
+ inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
18
+ auto* allocator = c10::GetCPUAllocator();
19
+ int64_t nelements = 1;
20
+ auto dtype = caffe2::TypeMeta::Make<float>();
21
+ int64_t size_bytes = nelements * dtype.itemsize();
22
+ auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
23
+ c10::StorageImpl::use_byte_size_t(),
24
+ size_bytes,
25
+ allocator->allocate(size_bytes),
26
+ allocator,
27
+ /*resizable=*/true);
28
+ at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
29
+ // TODO: We add this to simulate the ideal case where we only have Autograd backend keys
30
+ // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
31
+ // constructor by default.
32
+ if (!requires_grad) {
33
+ t.unsafeGetTensorImpl()->remove_autograd_key();
34
+ }
35
+ return t;
36
+ }
37
+
38
+ inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
39
+ return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
40
+ }
41
+
42
+ template<class... Args>
43
+ inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
44
+ auto stack = makeStack(std::forward<Args>(args)...);
45
+ op.callBoxed(&stack);
46
+ return stack;
47
+ }
48
+
49
+ template<class Result, class... Args>
50
+ inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
51
+ return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
52
+ }
53
+
54
+ template<class Result, class... Args>
55
+ inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
56
+ return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
57
+ }
58
+
59
+ template<class Result, class... Args>
60
+ inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
61
+ return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
62
+ }
63
+
64
+ inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
65
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
66
+ EXPECT_ANY_THROW(
67
+ callOp(*op, dummyTensor(dispatch_key), 5);
68
+ );
69
+ }
70
+
71
+ inline void expectDoesntFindOperator(const char* op_name) {
72
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
73
+ EXPECT_FALSE(op.has_value());
74
+ }
75
+
76
+ template<class Exception, class Functor>
77
+ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
78
+ try {
79
+ std::forward<Functor>(functor)();
80
+ } catch (const Exception& e) {
81
+ EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
82
+ return;
83
+ }
84
+ ADD_FAILURE() << "Expected to throw exception containing \""
85
+ << expectMessageContains << "\" but didn't throw";
86
+ }
87
+
88
+ template<class T, size_t N>
89
+ void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
90
+ EXPECT_EQ(expected.size(), actual.size());
91
+ for (const auto i : c10::irange(expected.size())) {
92
+ EXPECT_EQ(expected[i], actual[i]);
93
+ }
94
+ }
95
+
96
+ template<class T>
97
+ void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
98
+ EXPECT_EQ(expected.size(), actual.size());
99
+ for (const auto i : c10::irange(expected.size())) {
100
+ EXPECT_EQ(expected[i], actual[i]);
101
+ }
102
+ }
103
+
104
+ template<class T>
105
+ void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
106
+ EXPECT_EQ(expected.size(), actual.size());
107
+ for (const auto i : c10::irange(expected.size())) {
108
+ EXPECT_EQ(expected[i], actual.get(i));
109
+ }
110
+ }
111
+
112
+ template<class T>
113
+ void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
114
+ EXPECT_EQ(expected.size(), actual.size());
115
+ for (const auto i : c10::irange(expected.size())) {
116
+ EXPECT_EQ(expected[i], actual[i]);
117
+ }
118
+ }
119
+
120
+ // NB: This is not really sound, but all of the type sets constructed here
121
+ // are singletons so it's fine
122
+ static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
123
+ return legacyExtractDispatchKey(t.key_set());
124
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * This file contains functionality to take a C++ function and infer its
5
+ * c10::FunctionSchema.
6
+ */
7
+
8
+ #include <ATen/core/function_schema.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ namespace c10 {
12
+ namespace detail {
13
+
14
+ namespace infer_schema {
15
+
16
+ /// The templated inference code creates `ArgumentDef` instead of `Argument`,
17
+ /// because that can be constructed at compile time and has a much smaller
18
+ /// binary size than having calls to `Argument` constructors in the template.
19
+ /// Creating `Argument` objects from `ArgumentDef` can then be done at
20
+ /// runtime in a non-templated way.
21
+ struct ArgumentDef final {
22
+ using GetTypeFn = TypePtr();
23
+ GetTypeFn* getTypeFn;
24
+ GetTypeFn* getFakeTypeFn;
25
+ constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
26
+ explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
27
+ };
28
+
29
+ template<bool V>
30
+ struct bool_t {};
31
+ template<> struct bool_t<true> : std::true_type {};
32
+ template<> struct bool_t<false> : std::false_type {};
33
+
34
+ /// Checks the static C++ types `Types` for correctness to catch common error cases.
35
+ template <class... Types>
36
+ constexpr int checkStaticTypes() {
37
+ // Give nice error messages for some of the common error cases.
38
+ // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
39
+ static_assert(std::conjunction<
40
+ bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
41
+ >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
42
+ static_assert(std::conjunction<
43
+ bool_t<!std::is_same<Types, float>::value>...
44
+ >::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
45
+ return 0;
46
+ }
47
+
48
+ template <typename... Ts, size_t... Is>
49
+ constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
50
+ return (
51
+ // Check types for common errors
52
+ checkStaticTypes<Ts...>(),
53
+
54
+ // Create the return value
55
+ std::array<ArgumentDef, sizeof...(Ts)>{
56
+ ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
57
+ );
58
+ }
59
+
60
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
61
+ /// as template arguments.
62
+ template<class ParameterTypes> struct createArguments final {};
63
+ template<class... ParameterTypes>
64
+ struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
65
+ static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
66
+ return createArgumentVectorFromTypes<ParameterTypes...>(
67
+ std::make_index_sequence<sizeof...(ParameterTypes)>()
68
+ );
69
+ }
70
+ };
71
+
72
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
73
+ /// as a tuple (i.e. in the way c10 kernels return values).
74
+ /// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
75
+ /// It can be an empty tuple<>, or void for kernels that don't return anything.
76
+ /// It can be a single type A (i.e. no tuple) for the case where a kernel just
77
+ /// returns one value.
78
+ template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
79
+
80
+ template<class... ReturnTypes>
81
+ struct createReturns<std::tuple<ReturnTypes...>, void> final {
82
+ static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
83
+ return createArgumentVectorFromTypes<ReturnTypes...>(
84
+ std::make_index_sequence<sizeof...(ReturnTypes)>()
85
+ );
86
+ }
87
+ };
88
+
89
+ template<class ReturnType>
90
+ struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
91
+ static constexpr std::array<ArgumentDef, 1> call() {
92
+ return createReturns<std::tuple<ReturnType>>::call();
93
+ }
94
+ };
95
+
96
+ template<>
97
+ struct createReturns<void, void> final {
98
+ static constexpr std::array<ArgumentDef, 0> call() {
99
+ return createReturns<std::tuple<>>::call();
100
+ }
101
+ };
102
+
103
+ template <typename ReturnType>
104
+ struct createSingleReturn {
105
+ static constexpr std::array<ArgumentDef, 1> call() {
106
+ return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
107
+ }
108
+ };
109
+
110
+ TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
111
+ TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
112
+
113
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
114
+ /// function. Flattens std::tuple returns into multiple return types
115
+ template <typename FunctionTraits>
116
+ FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
117
+ using ReturnType = typename FunctionTraits::return_type;
118
+ using ParameterTypes = typename FunctionTraits::parameter_types;
119
+
120
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
121
+ // The only code executed at runtime here is the one that creates a std::vector
122
+ // of the arguments/returns from the std::array.
123
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
124
+ constexpr auto returns = createReturns<ReturnType>::call();
125
+
126
+ return make_function_schema(arguments, returns);
127
+ }
128
+
129
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
130
+ /// function. Preserves std::tuple returns as a Tuple return type
131
+ template <typename FunctionTraits>
132
+ FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
133
+ using ReturnType = typename FunctionTraits::return_type;
134
+ using ParameterTypes = typename FunctionTraits::parameter_types;
135
+
136
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
137
+ // The only code executed at runtime here is the one that creates a std::vector
138
+ // of the arguments/returns from the std::array.
139
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
140
+ constexpr auto returns = createSingleReturn<ReturnType>::call();
141
+
142
+ return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
143
+ }
144
+
145
+ }
146
+ }
147
+
148
+ template<class FuncType>
149
+ FunctionSchema inferFunctionSchemaFlattenedReturns() {
150
+ return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
151
+ }
152
+
153
+ template<class FuncType>
154
+ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
155
+ return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
156
+ }
157
+
158
+ TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
159
+
160
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * Include this file if you want to register operators. It includes all
5
+ * functionality needed to do so for you.
6
+ */
7
+
8
+ #include <c10/core/DispatchKey.h>
9
+ #include <c10/core/DispatchKeySet.h>
10
+ #include <c10/core/CompileTimeFunctionPointer.h>
11
+ #include <ATen/core/boxing/KernelFunction.h>
12
+ #include <ATen/core/dispatch/CppSignature.h>
13
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
14
+ #include <ATen/core/op_registration/infer_schema.h>
15
+ #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
16
+ #include <torch/csrc/jit/frontend/function_schema_parser.h>
17
+ #endif
18
+ #include <ATen/core/ATenOpList.h>
19
+
20
+ namespace c10 {
21
+
22
+ namespace detail {
23
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
24
+ // We do this because every argument in a function schema is expected to be convertable
25
+ // to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
26
+ // See Note [Plumbing Keys Through The Dispatcher]
27
+ template<class KernelFunctor>
28
+ std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
29
+ using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
30
+ return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
31
+ }
32
+ }
33
+
34
+ /**
35
+ * An instance of this class handles the registration for one or more operators.
36
+ * Make sure you keep the RegisterOperators instance around since it will
37
+ * deregister the operator it's responsible for in its destructor.
38
+ *
39
+ * Example:
40
+ *
41
+ * > namespace {
42
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
43
+ * > public:
44
+ * > Tensor operator()(Tensor a, Tensor b) {...}
45
+ * > };
46
+ * > }
47
+ * >
48
+ * > static auto registry = c10::RegisterOperators()
49
+ * > .op(c10::RegisterOperators::options()
50
+ * > .schema("my_op")
51
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
52
+ */
53
+ class TORCH_API RegisterOperators final {
54
+ public:
55
+ RegisterOperators() = default;
56
+ ~RegisterOperators() = default;
57
+
58
+ RegisterOperators(const RegisterOperators&) = delete;
59
+ RegisterOperators& operator=(const RegisterOperators&) = delete;
60
+ RegisterOperators(RegisterOperators&&) noexcept = default;
61
+ RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
62
+
63
+ class TORCH_API Options final {
64
+ public:
65
+ Options(const Options&) = delete;
66
+ Options(Options&&) noexcept = delete;
67
+ Options& operator=(const Options&) = delete;
68
+ Options& operator=(Options&&) noexcept = delete;
69
+
70
+ // internal-only for registering stack based kernels
71
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
72
+ Options&& kernel(DispatchKey dispatch_key) && {
73
+ return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
74
+ }
75
+
76
+ // internal-only for registering stack based catch-all kernels
77
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
78
+ Options&& catchAllKernel() && {
79
+ return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
80
+ }
81
+
82
+ // internal only for registering caffe2 ops
83
+ Options&& schema(FunctionSchema&& schema) {
84
+ TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
85
+ schemaOrName_ = FunctionSchema(std::move(schema));
86
+ return std::move(*this);
87
+ }
88
+
89
+ /**
90
+ * Use this to specify the schema for an operator. You can also specify
91
+ * the operator name only to have the function signature part of the
92
+ * schema be inferred from the kernel function.
93
+ *
94
+ * Example:
95
+ *
96
+ * > // Infer function signature from my_kernel_cpu
97
+ * > static auto registry = c10::RegisterOperators()
98
+ * > .op(c10::RegisterOperators::options()
99
+ * > .schema("my_op")
100
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
101
+ * >
102
+ * >
103
+ * > // Explicitly specify full schema
104
+ * > static auto registry = c10::RegisterOperators()
105
+ * > .op(c10::RegisterOperators::options()
106
+ * > .schema("my_op(Tensor a) -> Tensor")
107
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
108
+ */
109
+ Options&& schema(const std::string& schemaOrName) {
110
+ TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
111
+
112
+ #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
113
+ throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
114
+ #else
115
+ schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
116
+ #endif
117
+
118
+ return std::move(*this);
119
+ }
120
+
121
+ /**
122
+ * Use this to register an operator whose kernel is implemented as a functor.
123
+ * The kernel is only called for inputs matching the given dispatch key.
124
+ * You can register multiple kernels for different dispatch keys.
125
+ *
126
+ * Example:
127
+ *
128
+ * > namespace {
129
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
130
+ * > public:
131
+ * > Tensor operator()(Tensor a, Tensor b) {...}
132
+ * > };
133
+ * > }
134
+ * >
135
+ * > static auto registry = c10::RegisterOperators()
136
+ * > .op(c10::RegisterOperators::options()
137
+ * > .schema("my_op")
138
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
139
+ *
140
+ * The functor constructor can take arguments to configure the kernel.
141
+ * The arguments are defined in the kernel registration.
142
+ * Example:
143
+ *
144
+ * > namespace {
145
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
146
+ * > public:
147
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
148
+ * > : ... {...}
149
+ * >
150
+ * > Tensor operator()(Tensor a, Tensor b) {...}
151
+ * > };
152
+ * > }
153
+ * >
154
+ * > static auto registry = c10::RegisterOperators()
155
+ * > .op(c10::RegisterOperators::options()
156
+ * > .schema("my_op")
157
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
158
+ */
159
+ template<class KernelFunctor, class... ConstructorParameters>
160
+ // enable_if: only enable it if KernelFunctor is actually a functor
161
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
162
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
163
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
164
+
165
+ return std::move(*this).kernel(
166
+ dispatch_key,
167
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
168
+ impl::CppSignature::make<KernelFunctor>(),
169
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
170
+ );
171
+ }
172
+
173
+ /**
174
+ * Use this to register an operator whose kernel is implemented as a functor.
175
+ * The kernel is a catch-all kernel, meaning it's called independent from
176
+ * the input. Dispatch is disabled for this operator.
177
+ *
178
+ * Example:
179
+ *
180
+ * > namespace {
181
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
182
+ * > public:
183
+ * > Tensor operator()(Tensor a, Tensor b) {...}
184
+ * > };
185
+ * > }
186
+ * >
187
+ * > static auto registry = c10::RegisterOperators()
188
+ * > .op(c10::RegisterOperators::options()
189
+ * > .schema("my_op")
190
+ * > .catchAllKernel<my_kernel_cpu>());
191
+ *
192
+ * The functor constructor can take arguments to configure the kernel.
193
+ * The arguments are defined in the kernel registration.
194
+ * Example:
195
+ *
196
+ * > namespace {
197
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
198
+ * > public:
199
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
200
+ * > : ... {...}
201
+ * >
202
+ * > Tensor operator()(Tensor a, Tensor b) {...}
203
+ * > };
204
+ * > }
205
+ * >
206
+ * > static auto registry = c10::RegisterOperators()
207
+ * > .op(c10::RegisterOperators::options()
208
+ * > .schema("my_op")
209
+ * > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
210
+ */
211
+ template<class KernelFunctor, class... ConstructorParameters>
212
+ // enable_if: only enable it if KernelFunctor is actually a functor
213
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
214
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
215
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
216
+
217
+ return std::move(*this).kernel(
218
+ c10::nullopt,
219
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
220
+ impl::CppSignature::make<KernelFunctor>(),
221
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
222
+ );
223
+ }
224
+
225
+ /**
226
+ * Use this to register an operator whose kernel is implemented by a function.
227
+ * The kernel is only called for inputs matching the given dispatch key.
228
+ * You can register multiple kernels for different dispatch keys.
229
+ *
230
+ * Example:
231
+ *
232
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
233
+ * >
234
+ * > static auto registry = c10::RegisterOperators()
235
+ * > .op(c10::RegisterOperators::options()
236
+ * > .schema("my_op")
237
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
238
+ */
239
+ template<class FuncType, FuncType* kernel_func>
240
+ // enable_if: only enable it if FuncType is actually a function
241
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
242
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
243
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
244
+
245
+ return std::move(*this).kernel(
246
+ dispatch_key,
247
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
248
+ impl::CppSignature::make<FuncType>(),
249
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
250
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
251
+ );
252
+ }
253
+
254
+ /**
255
+ * Use this to register an operator whose kernel is implemented by a function.
256
+ * The kernel is a catch-all kernel, meaning it's called independent from
257
+ * the input. Dispatch is disabled for this operator.
258
+ *
259
+ * Example:
260
+ *
261
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
262
+ * >
263
+ * > static auto registry = c10::RegisterOperators()
264
+ * > .op(c10::RegisterOperators::options()
265
+ * > .schema("my_op")
266
+ * > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
267
+ */
268
+ template<class FuncType, FuncType* kernel_func>
269
+ // enable_if: only enable it if FuncType is actually a function
270
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
271
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
272
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
273
+
274
+ return std::move(*this).kernel(
275
+ c10::nullopt,
276
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
277
+ impl::CppSignature::make<FuncType>(),
278
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
279
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
280
+ );
281
+ }
282
+
283
+ template<class FuncType>
284
+ // enable_if: only enable it if FuncType is actually a function
285
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
286
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
287
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
288
+
289
+ return std::move(*this).kernel(
290
+ dispatch_key,
291
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
292
+ impl::CppSignature::make<FuncType>(),
293
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
294
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
295
+ );
296
+ }
297
+
298
+ template<class FuncType>
299
+ // enable_if: only enable it if FuncType is actually a function
300
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
301
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
302
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
303
+
304
+ return std::move(*this).kernel(
305
+ c10::nullopt,
306
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
307
+ impl::CppSignature::make<FuncType>(),
308
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
309
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
310
+ );
311
+ }
312
+
313
+ /**
314
+ * Use this to register an operator whose kernel is implemented as a lambda.
315
+ * The kernel is only called for inputs matching the given dispatch key.
316
+ * You can register multiple kernels for different dispatch keys.
317
+ *
318
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
319
+ * needs to store some configuration parameters, write the kernel as a
320
+ * functor instead.
321
+ *
322
+ * Example:
323
+ *
324
+ * > static auto registry = c10::RegisterOperators()
325
+ * > .op(c10::RegisterOperators::options()
326
+ * > .schema("my_op")
327
+ * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
328
+ */
329
+ template<class Lambda>
330
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
331
+ std::enable_if_t<
332
+ guts::is_functor<std::decay_t<Lambda>>::value
333
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
334
+ Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
335
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
336
+
337
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
338
+ // behavior would be nonobvious. A functor kernel with cache gets a new instance of
339
+ // its cache each time the kernel is looked up from the dispatch table.
340
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
341
+ // So, instead of making users having to think about it (including the thread-safety
342
+ // issues this causes), let's just forbid stateful lambdas altogether.
343
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
344
+
345
+ return std::move(*this).kernel(
346
+ dispatch_key,
347
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
348
+ impl::CppSignature::make<Lambda>(),
349
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
350
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
351
+ );
352
+ }
353
+
354
+ /**
355
+ * Use this to register an operator whose kernel is implemented as a lambda.
356
+ * The kernel is a catch-all kernel, meaning it's called independent from
357
+ * the input. Dispatch is disabled for this operator.
358
+ *
359
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
360
+ * needs to store some configuration parameters, write the kernel as a
361
+ * functor instead.
362
+ *
363
+ * Example:
364
+ *
365
+ * > static auto registry = c10::RegisterOperators()
366
+ * > .op(c10::RegisterOperators::options()
367
+ * > .schema("my_op")
368
+ * > .catchAllKernel([] (Tensor a) -> Tensor {...}));
369
+ */
370
+ template<class Lambda>
371
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
372
+ std::enable_if_t<
373
+ guts::is_functor<std::decay_t<Lambda>>::value
374
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
375
+ Options&&> catchAllKernel(Lambda&& lambda) && {
376
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
377
+
378
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
379
+ // behavior would be nonobvious.
380
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
381
+ // This would be a likely source for unexpected race conditions, so we forbid it.
382
+ // If a kernel really needs global state, they can just have regular global state
383
+ // in their .cpp file next to the kernel lambda.
384
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
385
+
386
+ return std::move(*this).kernel(
387
+ c10::nullopt,
388
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
389
+ impl::CppSignature::make<Lambda>(),
390
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
391
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
392
+ );
393
+ }
394
+
395
+ Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
396
+ TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
397
+ aliasAnalysisKind_ = aliasAnalysisKind;
398
+ return std::move(*this);
399
+ }
400
+
401
+ private:
402
+ Options&& kernel(c10::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
403
+ KernelRegistrationConfig config;
404
+ config.dispatch_key = dispatch_key;
405
+ config.func = std::move(func);
406
+ config.cpp_signature = cpp_signature;
407
+ config.inferred_function_schema = std::move(inferred_function_schema);
408
+ kernels.push_back(std::move(config));
409
+ return std::move(*this);
410
+ }
411
+
412
+ Options()
413
+ : schemaOrName_(c10::nullopt)
414
+ , kernels()
415
+ , aliasAnalysisKind_(c10::nullopt)
416
+ {}
417
+
418
+ // KernelRegistrationConfig accumulates all information from the config
419
+ // parameters passed to a RegisterOperators::op() call into one object.
420
+ struct KernelRegistrationConfig final {
421
+ KernelRegistrationConfig()
422
+ : dispatch_key(c10::nullopt)
423
+ , func()
424
+ , cpp_signature(c10::nullopt)
425
+ , inferred_function_schema(nullptr)
426
+ {}
427
+
428
+ c10::optional<DispatchKey> dispatch_key;
429
+ KernelFunction func;
430
+ c10::optional<impl::CppSignature> cpp_signature;
431
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
432
+ };
433
+
434
+ c10::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
435
+
436
+ std::vector<KernelRegistrationConfig> kernels;
437
+ optional<AliasAnalysisKind> aliasAnalysisKind_;
438
+ friend class RegisterOperators;
439
+ friend class Library;
440
+ };
441
+
442
+ /**
443
+ * Call this to get an instance of registration options, which
444
+ * can be passed to a call to RegisterOperators::op() to specify
445
+ * these options for the operator registration.
446
+ * See class doc comment for examples.
447
+ */
448
+ static Options options() {
449
+ return {};
450
+ }
451
+
452
+ /**
453
+ * Call this to register an operator. See class doc comment for examples.
454
+ */
455
+ RegisterOperators&& op(Options&& options) && {
456
+ checkSchemaAndRegisterOp_(std::move(options));
457
+ return std::move(*this);
458
+ }
459
+
460
+ // Regular mutator version of the && version above
461
+ RegisterOperators& op(Options&& options) & {
462
+ checkSchemaAndRegisterOp_(std::move(options));
463
+ return *this;
464
+ }
465
+
466
+ /**
467
+ * This is a shorthand for RegisterOperators::op(Options) where you can
468
+ * specify the operator schema outside of the options parameter.
469
+ * See class doc comment for examples.
470
+ */
471
+ RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
472
+ return std::move(*this).op(std::move(options).schema(schemaOrName));
473
+ }
474
+
475
+ // internal only for registering caffe2 ops
476
+ RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
477
+ return std::move(*this).op(std::move(options).schema(std::move(schema)));
478
+ }
479
+
480
+ template<class FuncType>
481
+ explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
482
+ : RegisterOperators() {
483
+ std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
484
+ }
485
+
486
+ /**
487
+ * This API registers an operator based on a kernel function pointer.
488
+ *
489
+ * Given a kernel
490
+ *
491
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
492
+ *
493
+ * This API looks like:
494
+ *
495
+ * > static auto registry = c10::RegisterOperators()
496
+ * > .op("my_op", &my_kernel_cpu);
497
+ *
498
+ * If your kernel is small and the overhead of calling it matters,
499
+ * then this API might be the wrong choice since the following API
500
+ * has a slightly lower overhead for calling into the kernel:
501
+ *
502
+ * > static auto registry = c10::RegisterOperators()
503
+ * > .op("my_op", c10::RegisterOperators::options()
504
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
505
+ *
506
+ * Or, alternatively, write your kernel as a functor:
507
+ *
508
+ * > namespace {
509
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
510
+ * > public:
511
+ * > Tensor operator()(Tensor a, Tensor b) {...}
512
+ * > };
513
+ * > }
514
+ * >
515
+ * > static auto registry = c10::RegisterOperators()
516
+ * > .op("my_op", c10::RegisterOperators::options()
517
+ * > .kernel<my_kernel_cpu>());
518
+ */
519
+ template<class FuncType>
520
+ // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
521
+ std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
522
+ op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
523
+ constexpr bool AllowLegacyTypes = true;
524
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
525
+ c10::nullopt,
526
+ KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
527
+ impl::CppSignature::make<FuncType>(),
528
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
529
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
530
+ ));
531
+ }
532
+
533
+ /**
534
+ * This API registers an operator based on a kernel lambda.
535
+ *
536
+ * This API looks like:
537
+ *
538
+ * > static auto registry = c10::RegisterOperators()
539
+ * > .op("my_op", [] (Tensor a, Tensor b) {...});
540
+ *
541
+ * This is equivalent to:
542
+ *
543
+ * > static auto registry = c10::RegisterOperators()
544
+ * > .op("my_op", c10::RegisterOperators::options()
545
+ * > .catchAllKernel([] (Tensor a, Tensor b) {...}));
546
+ *
547
+ */
548
+ template<class Lambda>
549
+ // enable_if: only enable it if Lambda is actually a stateless lambda
550
+ std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
551
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
552
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
553
+
554
+ constexpr bool AllowLegacyTypes = true;
555
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
556
+ c10::nullopt,
557
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
558
+ impl::CppSignature::make<Lambda>(),
559
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
560
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
561
+ ));
562
+ }
563
+
564
+ template<class Lambda>
565
+ C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
566
+ // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
567
+ std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
568
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
569
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
570
+
571
+ constexpr bool AllowLegacyTypes = true;
572
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
573
+ c10::nullopt,
574
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
575
+ impl::CppSignature::make<Lambda>(),
576
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
577
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
578
+ ));
579
+ }
580
+
581
+ private:
582
+ void checkSchemaAndRegisterOp_(Options&& config);
583
+
584
+ static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
585
+ void checkNoDuplicateKernels_(const Options& options);
586
+ void registerOp_(Options&& options);
587
+
588
+ std::vector<RegistrationHandleRAII> registrars_;
589
+ };
590
+
591
+ } // namespace c10
592
+
593
+ namespace torch {
594
+ // Old-style API
595
+ using RegisterOperators = c10::RegisterOperators;
596
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cuda/CUDAContext.h>
2
+
3
+ #include <cuda_runtime.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T>
11
+ __host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ namespace {
16
+
17
+ // Threads per block for our apply kernel
18
+ // FIXME: use occupancy calculator instead
19
+ constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
20
+ constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
21
+
22
+ template <int step = 1>
23
+ inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
24
+ if (curDevice == -1) return false;
25
+ uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
26
+ uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
27
+ uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
28
+ if (numBlocks > maxGridX)
29
+ numBlocks = maxGridX;
30
+ grid = dim3(numBlocks);
31
+ return true;
32
+ }
33
+
34
+ constexpr int getApplyBlocksPerSM() {
35
+ return AT_APPLY_BLOCKS_PER_SM;
36
+ }
37
+
38
+ constexpr int getApplyBlockSize() {
39
+ return AT_APPLY_THREADS_PER_BLOCK;
40
+ }
41
+
42
+ inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
43
+ return dim3(max_threads_per_block);
44
+ }
45
+
46
+ } // anonymous namespace
47
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/Half.h>
5
+ #include <c10/util/BFloat16.h>
6
+
7
+ #include <ATen/NumericUtils.h>
8
+
9
+ #if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))))
10
+ #include <cuda_bf16.h>
11
+ #endif
12
+
13
+ template <typename T>
14
+ struct AtomicFPOp;
15
+
16
+ template <>
17
+ struct AtomicFPOp<at::Half> {
18
+ template <typename func_t>
19
+ inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) {
20
+ unsigned int * address_as_ui =
21
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
22
+ unsigned int old = *address_as_ui;
23
+ unsigned int assumed;
24
+
25
+ at::Half hsum;
26
+ do {
27
+ assumed = old;
28
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
29
+ hsum = func(hsum, val);
30
+ old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
31
+ old = atomicCAS(address_as_ui, assumed, old);
32
+ } while (assumed != old);
33
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
34
+ return hsum;
35
+ }
36
+ };
37
+
38
+ template <>
39
+ struct AtomicFPOp<at::BFloat16> {
40
+ template <typename func_t>
41
+ inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) {
42
+ unsigned int * address_as_ui =
43
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
44
+ unsigned int old = *address_as_ui;
45
+ unsigned int assumed;
46
+
47
+ at::BFloat16 bsum;
48
+ do {
49
+ assumed = old;
50
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
51
+ bsum = func(bsum, val);
52
+ old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x;
53
+ old = atomicCAS(address_as_ui, assumed, old);
54
+ } while (assumed != old);
55
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
56
+ return bsum.x;
57
+ }
58
+ };
59
+
60
+ template <>
61
+ struct AtomicFPOp<double> {
62
+ template <typename func_t>
63
+ inline __device__ double operator() (double * address, double val, const func_t& func) {
64
+ unsigned long long int* address_as_ull = (unsigned long long int*)address;
65
+ unsigned long long int old = *address_as_ull;
66
+ unsigned long long int assumed;
67
+
68
+ do {
69
+ assumed = old;
70
+ old = atomicCAS(address_as_ull, assumed, func(val, assumed));
71
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
72
+ } while (assumed != old);
73
+
74
+ return __longlong_as_double(old);
75
+ }
76
+ };
77
+
78
+ #define ATOMIC_INTEGER_IMPL(NAME) \
79
+ template <typename T, size_t n> \
80
+ struct Atomic##NAME##IntegerImpl; \
81
+ \
82
+ template<typename T> \
83
+ struct Atomic##NAME##IntegerImpl<T, 1> { \
84
+ template <typename func_t> \
85
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
86
+ size_t offset = (size_t)address & 3; \
87
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
88
+ uint32_t old = *address_as_ui; \
89
+ uint32_t shift = offset * 8; \
90
+ uint32_t old_byte; \
91
+ uint32_t newval; \
92
+ uint32_t assumed; \
93
+ \
94
+ do { \
95
+ assumed = old; \
96
+ old_byte = (old >> shift) & 0xff; \
97
+ newval = static_cast<uint8_t>(func(val, static_cast<T>(old_byte))); \
98
+ newval = (old & ~(0x000000ff << shift)) | (newval << shift); \
99
+ old = atomicCAS(address_as_ui, assumed, newval); \
100
+ } while (assumed != old); \
101
+ } \
102
+ }; \
103
+ \
104
+ template<typename T> \
105
+ struct Atomic##NAME##IntegerImpl<T, 2> { \
106
+ template <typename func_t> \
107
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
108
+ size_t offset = (size_t)address & 2; \
109
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
110
+ bool is_32_align = offset; \
111
+ uint32_t old = *address_as_ui; \
112
+ uint32_t old_bytes; \
113
+ uint32_t newval; \
114
+ uint32_t assumed; \
115
+ \
116
+ do { \
117
+ assumed = old; \
118
+ old_bytes = is_32_align ? old >> 16 : old & 0xffff; \
119
+ newval = static_cast<uint16_t>(func(val, static_cast<T>(old_bytes))); \
120
+ newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \
121
+ old = atomicCAS(address_as_ui, assumed, newval); \
122
+ } while (assumed != old); \
123
+ } \
124
+ }; \
125
+ \
126
+ template<typename T> \
127
+ struct Atomic##NAME##IntegerImpl<T, 4> { \
128
+ template <typename func_t> \
129
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
130
+ uint32_t * address_as_ui = (uint32_t *) (address); \
131
+ uint32_t old = *address_as_ui; \
132
+ uint32_t newval; \
133
+ uint32_t assumed; \
134
+ \
135
+ do { \
136
+ assumed = old; \
137
+ newval = static_cast<uint32_t>(func(val, static_cast<T>(old))); \
138
+ old = atomicCAS(address_as_ui, assumed, newval); \
139
+ } while (assumed != old); \
140
+ } \
141
+ }; \
142
+ \
143
+ template<typename T> \
144
+ struct Atomic##NAME##IntegerImpl<T, 8> { \
145
+ template <typename func_t> \
146
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
147
+ unsigned long long * address_as_ui = (unsigned long long *) (address); \
148
+ unsigned long long old = *address_as_ui; \
149
+ unsigned long long newval; \
150
+ unsigned long long assumed; \
151
+ \
152
+ do { \
153
+ assumed = old; \
154
+ newval = static_cast<uint64_t>(func(val, static_cast<T>(old))); \
155
+ old = atomicCAS(address_as_ui, assumed, newval); \
156
+ } while (assumed != old); \
157
+ } \
158
+ };
159
+
160
+
161
+ # define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \
162
+ static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \
163
+ Atomic##NAME##IntegerImpl<DTYPE, sizeof(DTYPE)>()(address, \
164
+ val, \
165
+ [](DTYPE a, DTYPE b) { \
166
+ return OP; \
167
+ }); \
168
+ } \
169
+
170
+ ATOMIC_INTEGER_IMPL(Add)
171
+ GPU_ATOMIC_INTEGER(Add, a || b, bool)
172
+
173
+ // Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64)
174
+ static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) {
175
+ AtomicAddIntegerImpl<uint8_t, sizeof(uint8_t)>()(address,
176
+ val,
177
+ [](uint8_t a, uint8_t b) {
178
+ return a + b;
179
+ });
180
+ }
181
+
182
+ static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) {
183
+ AtomicAddIntegerImpl<int8_t, sizeof(int8_t)>()(address,
184
+ val,
185
+ [](int8_t a, int8_t b) {
186
+ return a + b;
187
+ });
188
+ }
189
+
190
+ static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) {
191
+ AtomicAddIntegerImpl<int16_t, sizeof(int16_t)>()(address,
192
+ val,
193
+ [](int16_t a, int16_t b) {
194
+ return a + b;
195
+ });
196
+ }
197
+
198
+ static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) {
199
+ return atomicAdd(address, val);
200
+ }
201
+
202
+ static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) {
203
+ #if defined(USE_ROCM)
204
+ __atomic_fetch_add(address, val, __ATOMIC_RELAXED);
205
+ #else
206
+ static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed");
207
+ atomicAdd(reinterpret_cast<unsigned long long int *>(address), static_cast<unsigned long long int>(val));
208
+ #endif
209
+ }
210
+
211
+ static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) {
212
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
213
+ return AtomicFPOp<at::Half>()(address, val,
214
+ [](at::Half hsum, at::Half val) {
215
+ return hsum + val;
216
+ });
217
+ #else
218
+ return atomicAdd(reinterpret_cast<__half*>(address), val);
219
+ #endif
220
+ }
221
+
222
+ static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) {
223
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))
224
+ return AtomicFPOp<at::BFloat16>()(address, val,
225
+ [](at::BFloat16 bsum, at::BFloat16 val) {
226
+ return bsum + val;
227
+ });
228
+ #else
229
+ __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val));
230
+ return *reinterpret_cast<c10::BFloat16*>(&r);
231
+ #endif
232
+ }
233
+
234
+ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
235
+ // from CUDA C Programmic Guide
236
+ static inline __device__ double atomicAdd(double* address, double val)
237
+ #if defined(__clang__) && defined(__CUDA__)
238
+ #pragma GCC diagnostic push
239
+ #pragma GCC diagnostic ignored "-Wgcc-compat"
240
+ __attribute__((enable_if(true, "")))
241
+ #pragma GCC diagnostic pop
242
+ #endif
243
+ {
244
+
245
+ return AtomicFPOp<double>()(address, val,
246
+ [](double val, unsigned long long int assumed) {
247
+ return __double_as_longlong(val + __longlong_as_double(assumed));
248
+ });
249
+ }
250
+ #elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__))
251
+
252
+ /* Note [hip-clang differences to hcc]
253
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
254
+ * The upcoming hip-clang compiler for ROCm differs from hcc in a few details.
255
+ * It exports the __HIP__ macro, we can hence differentiate between hcc and
256
+ * hip-clang. In the below, hcc only received support for atomicAdd with double
257
+ * typing after work week 18312. hip-clang had support from the first version.
258
+ * In general, the code-visible differences between hip-clang and hcc will be
259
+ * minimal.
260
+ */
261
+
262
+ #if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__
263
+ // This needs to be defined for the host side pass
264
+ static inline __device__ double atomicAdd(double *address, double val) { }
265
+ #endif
266
+ #endif
267
+
268
+ static inline __device__ double gpuAtomicAdd(double *address, double val) {
269
+ return atomicAdd(address, val);
270
+ }
271
+
272
+ static inline __device__ float gpuAtomicAdd(float *address, float val) {
273
+ return atomicAdd(address, val);
274
+ }
275
+
276
+ template<typename T>
277
+ static inline __device__ void gpuAtomicAdd(c10::complex<T> *address, c10::complex<T> val) {
278
+ gpuAtomicAdd(&address->real_, val.real_);
279
+ gpuAtomicAdd(&address->imag_, val.imag_);
280
+ }
281
+
282
+ /* Note [gpuAtomicAdd vs atomicAdd]
283
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
284
+ * Some extensions such as torchvision call atomicAdd()
285
+ * directly and require non-library provided data type support. Only for these, we
286
+ * continue to provide atomicAdd overloads.
287
+ */
288
+ static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
289
+ return gpuAtomicAdd(address, val);
290
+ }
291
+
292
+ static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) {
293
+ return gpuAtomicAdd(address, val);
294
+ }
295
+
296
+ static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) {
297
+ gpuAtomicAdd(address, val);
298
+ }
299
+
300
+ static inline __device__ void atomicAdd(int8_t *address, int8_t val) {
301
+ gpuAtomicAdd(address, val);
302
+ }
303
+
304
+ static inline __device__ void atomicAdd(int16_t *address, int16_t val) {
305
+ gpuAtomicAdd(address, val);
306
+ }
307
+
308
+ static inline __device__ void atomicAdd(int64_t *address, int64_t val) {
309
+ gpuAtomicAdd(address, val);
310
+ }
311
+
312
+ static inline __device__ void atomicAdd(bool *address, bool val) {
313
+ gpuAtomicAdd(address, val);
314
+ }
315
+
316
+ /* Note [explicitly non-returning atomics]
317
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318
+ * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet().
319
+ * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction.
320
+ * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd,
321
+ * therefore we need a new API 'gpuAtomicAddNoReturn'.
322
+ */
323
+ template<typename T>
324
+ static inline __device__ void gpuAtomicAddNoReturn(c10::complex<T> *address, c10::complex<T> val) { gpuAtomicAdd(address, val); }
325
+ static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); }
326
+ static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); }
327
+ static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); }
328
+ static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); }
329
+ static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); }
330
+ static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); }
331
+ static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); }
332
+ static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); }
333
+ static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); }
334
+
335
+ /* Special case fp32 atomic. */
336
+ #if defined(USE_ROCM)
337
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); }
338
+ #else
339
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); }
340
+ #endif
341
+
342
+ // Atomic multiplication implementation.
343
+
344
+ ATOMIC_INTEGER_IMPL(Mul)
345
+ GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t)
346
+ GPU_ATOMIC_INTEGER(Mul, a * b, int8_t)
347
+ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
348
+ GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
349
+ GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
350
+
351
+ inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
352
+ return AtomicFPOp<at::Half>()(address, val,
353
+ [](at::Half bsum, at::Half val) {
354
+ return bsum * val;
355
+ });
356
+ }
357
+
358
+ inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) {
359
+ return AtomicFPOp<at::BFloat16>()(address, val,
360
+ [](at::BFloat16 bsum, at::BFloat16 val) {
361
+ return bsum * val;
362
+ });
363
+ }
364
+
365
+ inline __device__ double gpuAtomicMul(double * address, double val) {
366
+ return AtomicFPOp<double>()(address, val,
367
+ [](double val, unsigned long long int assumed) {
368
+ return __double_as_longlong(val * __longlong_as_double(assumed));
369
+ });
370
+ }
371
+
372
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
373
+ inline __device__ float gpuAtomicMul (float * address, float val) {
374
+ unsigned int* address_as_ull = (unsigned int*)address;
375
+ unsigned int old = *address_as_ull;
376
+ unsigned int assumed;
377
+
378
+ do {
379
+ assumed = old;
380
+ old = atomicCAS(address_as_ull, assumed,
381
+ __float_as_int(val *
382
+ __int_as_float(assumed)));
383
+
384
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
385
+ } while (assumed != old);
386
+
387
+ return __int_as_float(old);
388
+ }
389
+
390
+ // Atomic maximum implementation.
391
+
392
+ template <typename T>
393
+ __host__ __device__ T safe_max(T a, T b) {
394
+ #if defined(__HIPCC__)
395
+ // TODO: remove this special case for HIP when issue is fixed:
396
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
397
+ T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max<T>(a, b));
398
+ #else
399
+ T max = at::_isnan(b) ? b : std::max<T>(a, b);
400
+ #endif
401
+
402
+ return max;
403
+ }
404
+
405
+ ATOMIC_INTEGER_IMPL(Max)
406
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
407
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
408
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t)
409
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t)
410
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t)
411
+
412
+ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
413
+ return AtomicFPOp<at::Half>()(address, val,
414
+ [](at::Half bsum, at::Half val) {
415
+ return safe_max(bsum, val);
416
+ });
417
+ }
418
+
419
+ inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
420
+ return AtomicFPOp<at::BFloat16>()(address, val,
421
+ [](at::BFloat16 bsum, at::BFloat16 val) {
422
+ return safe_max(bsum, val);
423
+ });
424
+ }
425
+
426
+ inline __device__ double gpuAtomicMax(double * address, double val) {
427
+ return AtomicFPOp<double>()(address, val,
428
+ [](double val, unsigned long long int assumed) {
429
+ return __double_as_longlong(safe_max(val, __longlong_as_double(assumed)));
430
+ });
431
+ }
432
+
433
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
434
+ inline __device__ float gpuAtomicMax(float * address, float val) {
435
+ unsigned int* address_as_ull = (unsigned int*)address;
436
+ unsigned int old = *address_as_ull;
437
+ unsigned int assumed;
438
+
439
+ do {
440
+ assumed = old;
441
+ old = atomicCAS(address_as_ull, assumed,
442
+ __float_as_int(safe_max(val, __int_as_float(assumed))));
443
+
444
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
445
+ } while (assumed != old);
446
+
447
+ return __int_as_float(old);
448
+ }
449
+
450
+ // Atomic minimum implementation.
451
+
452
+ template <typename T>
453
+ __host__ __device__ T safe_min(T a, T b) {
454
+ #if defined(__HIPCC__)
455
+ // TODO: remove this special case for HIP when issue is fixed:
456
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
457
+ T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min<T>(a, b));
458
+ #else
459
+ T min = at::_isnan(b) ? b : std::min<T>(a, b);
460
+ #endif
461
+
462
+ return min;
463
+ }
464
+
465
+ ATOMIC_INTEGER_IMPL(Min)
466
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
467
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
468
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t)
469
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t)
470
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t)
471
+
472
+ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
473
+ return AtomicFPOp<at::Half>()(address, val,
474
+ [](at::Half bsum, at::Half val) {
475
+ return safe_min(bsum, val);
476
+ });
477
+ }
478
+
479
+ inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
480
+ return AtomicFPOp<at::BFloat16>()(address, val,
481
+ [](at::BFloat16 bsum, at::BFloat16 val) {
482
+ return safe_min(bsum, val);
483
+ });
484
+ }
485
+
486
+ inline __device__ double gpuAtomicMin(double * address, double val) {
487
+ return AtomicFPOp<double>()(address, val,
488
+ [](double val, unsigned long long int assumed) {
489
+ return __double_as_longlong(safe_min(val, __longlong_as_double(assumed)));
490
+ });
491
+ }
492
+
493
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
494
+ inline __device__ float gpuAtomicMin(float * address, float val) {
495
+ unsigned int* address_as_ull = (unsigned int*)address;
496
+ unsigned int old = *address_as_ull;
497
+ unsigned int assumed;
498
+
499
+ do {
500
+ assumed = old;
501
+ old = atomicCAS(address_as_ull, assumed,
502
+ __float_as_int(safe_min(val, __int_as_float(assumed))));
503
+
504
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
505
+ } while (assumed != old);
506
+
507
+ return __int_as_float(old);
508
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ /*
3
+ Provides a subset of CUDA BLAS functions as templates:
4
+
5
+ gemm<Dtype>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c,
6
+ ldc)
7
+
8
+ gemv<Dtype>(transa, m, n, alpha, a, lda, x, incx, beta, y, incy)
9
+
10
+ dot<Dtype>(n, x, incx, y, incy, result)
11
+
12
+ where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot).
13
+ The functions are available in at::cuda::blas namespace.
14
+ */
15
+
16
+ #include <ATen/cuda/CUDAContext.h>
17
+ #include <ATen/OpMathType.h>
18
+
19
+ namespace at::cuda::blas {
20
+
21
+ // RAII guard that sets the CuBLAS pointer mode and restores it to
22
+ // its previous value when the guard is destroyed
23
+ class PointerModeGuard {
24
+ public:
25
+ PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) :
26
+ handle(handle) {
27
+ TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode));
28
+ TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode));
29
+ }
30
+
31
+ ~PointerModeGuard() {
32
+ cublasSetPointerMode(handle, previous_mode);
33
+ }
34
+
35
+ private:
36
+ cublasHandle_t handle;
37
+ cublasPointerMode_t previous_mode;
38
+ };
39
+
40
+ /* LEVEL 3 BLAS FUNCTIONS */
41
+
42
+ #define CUDABLAS_GEMM_ARGTYPES(Dtype) \
43
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
44
+ const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type<Dtype> beta,\
45
+ Dtype *c, int64_t ldc
46
+
47
+ #define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc
48
+
49
+ template <typename Dtype>
50
+ inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
51
+ AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name());
52
+ }
53
+
54
+ template <>
55
+ void gemm<double>(CUDABLAS_GEMM_ARGTYPES(double));
56
+ template <>
57
+ void gemm<float>(CUDABLAS_GEMM_ARGTYPES(float));
58
+ template <>
59
+ void gemm<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
60
+ template <>
61
+ void gemm<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
62
+ template <>
63
+ void gemm<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
64
+ template <>
65
+ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
66
+
67
+ template <typename Dtype>
68
+ inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
69
+ AT_ERROR("at::cuda::blas::gemm_internal: not implemented for ", typeid(Dtype).name());
70
+ }
71
+
72
+ template <>
73
+ void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double));
74
+ template <>
75
+ void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float));
76
+ template <>
77
+ void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
78
+ template <>
79
+ void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
80
+ template <>
81
+ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
82
+ template <>
83
+ void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
84
+
85
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
86
+ enum GEMMAndBiasActivationEpilogue {
87
+ None,
88
+ RELU,
89
+ GELU,
90
+ };
91
+
92
+ // NOTE: GELU activation is not supported prior to CUDA 11.4 and will
93
+ // do nothing if passed in that case.
94
+ template <typename Dtype>
95
+ void gemm_and_bias(
96
+ bool transpose_mat1,
97
+ bool transpose_mat2,
98
+ int64_t m,
99
+ int64_t n,
100
+ int64_t k,
101
+ at::opmath_type<Dtype> alpha_val,
102
+ const Dtype* mat1_ptr,
103
+ int64_t mat1_ld,
104
+ const Dtype* mat2_ptr,
105
+ int64_t mat2_ld,
106
+ const Dtype* bias,
107
+ Dtype* result_ptr,
108
+ int64_t result_ld,
109
+ GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None);
110
+
111
+ void int8_gemm(
112
+ bool transpose_mat1,
113
+ bool transpose_mat2,
114
+ int64_t m,
115
+ int64_t n,
116
+ int64_t k,
117
+ const int8_t* mat1_ptr,
118
+ int64_t mat1_ld,
119
+ const int8_t* mat2_ptr,
120
+ int64_t mat2_ld,
121
+ int32_t* result_ptr,
122
+ int64_t result_ld);
123
+
124
+ void scaled_gemm(
125
+ char transa,
126
+ char transb,
127
+ int64_t m,
128
+ int64_t n,
129
+ int64_t k,
130
+ const void* mat1_ptr,
131
+ const void* mat1_scale_ptr,
132
+ int64_t mat1_ld,
133
+ ScalarType mat1_dtype,
134
+ const void* mat2_ptr,
135
+ const void* mat2_scale_ptr,
136
+ int64_t mat2_ld,
137
+ ScalarType mat2_dtype,
138
+ const void* bias_ptr,
139
+ ScalarType bias_dtype,
140
+ void* result_ptr,
141
+ const void* result_scale_ptr,
142
+ int64_t result_ld,
143
+ ScalarType result_dtype,
144
+ void* amax_ptr,
145
+ bool use_fast_accum);
146
+ #endif
147
+
148
+ #define CUDABLAS_BGEMM_ARGTYPES(Dtype) \
149
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
150
+ const Dtype *a, int64_t lda, int64_t stridea, \
151
+ const Dtype *b, int64_t ldb, int64_t strideb, \
152
+ at::opmath_type<Dtype> beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches
153
+
154
+ #define CUDABLAS_BGEMM_ARGS(Dtype) \
155
+ transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches
156
+
157
+ template <typename Dtype>
158
+ inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
159
+ AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name());
160
+ }
161
+
162
+ template <>
163
+ void bgemm<double>(CUDABLAS_BGEMM_ARGTYPES(double));
164
+ template <>
165
+ void bgemm<float>(CUDABLAS_BGEMM_ARGTYPES(float));
166
+ template <>
167
+ void bgemm<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
168
+ template <>
169
+ void bgemm<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
170
+ template <>
171
+ void bgemm<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
172
+ template <>
173
+ void bgemm<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
174
+
175
+ template <typename Dtype>
176
+ inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
177
+ AT_ERROR("at::cuda::blas::bgemm_internal: not implemented for ", typeid(Dtype).name());
178
+ }
179
+
180
+ template <>
181
+ void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double));
182
+ template <>
183
+ void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float));
184
+ template <>
185
+ void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
186
+ template <>
187
+ void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
188
+ template <>
189
+ void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
190
+ template <>
191
+ void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
192
+
193
+ #if defined(USE_ROCM) && ROCM_VERSION <= 50500
194
+ // ROCm 5.6 hipblas matches the const Dtype *A API, but prior hipblas does not.
195
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
196
+ hipblasHandle_t handle, hipblasSideMode_t side, hipblasFillMode_t uplo, \
197
+ hipblasOperation_t trans, hipblasDiagType_t diag, int m, int n, \
198
+ const Dtype *alpha, Dtype *A, int lda, Dtype *B, int ldb
199
+ #else
200
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
201
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
202
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
203
+ const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb
204
+ #endif
205
+
206
+ template <typename Dtype>
207
+ inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) {
208
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name());
209
+ }
210
+
211
+ template <>
212
+ TORCH_CUDA_CU_API void trsm<float>(CUDABLAS_TRSM_ARGTYPES(float));
213
+ template <>
214
+ TORCH_CUDA_CU_API void trsm<double>(CUDABLAS_TRSM_ARGTYPES(double));
215
+ template <>
216
+ TORCH_CUDA_CU_API void trsm<c10::complex<float>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<float>));
217
+ template <>
218
+ TORCH_CUDA_CU_API void trsm<c10::complex<double>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<double>));
219
+
220
+ #define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \
221
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
222
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
223
+ const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \
224
+ int batchCount
225
+
226
+ template <typename Dtype>
227
+ inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) {
228
+ TORCH_INTERNAL_ASSERT(
229
+ false,
230
+ "at::cuda::blas::trsmBatched: not implemented for ",
231
+ typeid(Dtype).name());
232
+ }
233
+
234
+ template <>
235
+ TORCH_CUDA_CU_API void trsmBatched<float>(CUDABLAS_TRSM_BATCHED_ARGTYPES(float));
236
+ template <>
237
+ TORCH_CUDA_CU_API void trsmBatched<double>(CUDABLAS_TRSM_BATCHED_ARGTYPES(double));
238
+ template <>
239
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<float>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<float>));
240
+ template <>
241
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<double>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<double>));
242
+
243
+ /* LEVEL 2 BLAS FUNCTIONS */
244
+
245
+ #define CUDABLAS_GEMV_ARGTYPES(Dtype) \
246
+ char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \
247
+ const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy
248
+
249
+ template <typename Dtype>
250
+ inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) {
251
+ AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name());
252
+ }
253
+
254
+ template <>
255
+ void gemv<double>(CUDABLAS_GEMV_ARGTYPES(double));
256
+ template <>
257
+ void gemv<float>(CUDABLAS_GEMV_ARGTYPES(float));
258
+ template <>
259
+ void gemv<c10::complex<double>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<double>));
260
+ template <>
261
+ void gemv<c10::complex<float>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<float>));
262
+ template <>
263
+ void gemv<at::Half>(CUDABLAS_GEMV_ARGTYPES(at::Half));
264
+ template <>
265
+ void gemv<at::BFloat16>(CUDABLAS_GEMV_ARGTYPES(at::BFloat16));
266
+
267
+ /* LEVEL 1 BLAS FUNCTIONS */
268
+
269
+ #define CUDABLAS_DOT_ARGTYPES(Dtype) \
270
+ cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \
271
+ int incy, Dtype *result
272
+
273
+ template <typename Dtype>
274
+ inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
275
+ AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name());
276
+ }
277
+
278
+ template <>
279
+ void dot<double>(CUDABLAS_DOT_ARGTYPES(double));
280
+ template <>
281
+ void dot<float>(CUDABLAS_DOT_ARGTYPES(float));
282
+ template <>
283
+ void dot<at::Half>(CUDABLAS_DOT_ARGTYPES(at::Half));
284
+ template <>
285
+ void dot<at::BFloat16>(CUDABLAS_DOT_ARGTYPES(at::BFloat16));
286
+ template <>
287
+ void dot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
288
+ template <>
289
+ void dot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
290
+
291
+ template <typename Dtype>
292
+ inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
293
+ AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name());
294
+ }
295
+
296
+ template <>
297
+ void vdot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
298
+ template <>
299
+ void vdot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
300
+
301
+ #define CUDABLAS_GETRS_ARGTYPES(Dtype) \
302
+ cublasHandle_t handle, cublasOperation_t trans, \
303
+ int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \
304
+ Dtype** dB_array, int ldb, int* info_array, int batchsize
305
+
306
+ template<class Dtype>
307
+ void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) {
308
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ",
309
+ typeid(Dtype).name());
310
+ }
311
+ template<>
312
+ TORCH_CUDA_CU_API void getrsBatched<float>(CUDABLAS_GETRS_ARGTYPES(float));
313
+ template<>
314
+ TORCH_CUDA_CU_API void getrsBatched<double>(CUDABLAS_GETRS_ARGTYPES(double));
315
+ template<>
316
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<float>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<float>));
317
+ template<>
318
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<double>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<double>));
319
+
320
+ #define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \
321
+ cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \
322
+ Dtype **tau_array, int *info, int batchsize
323
+
324
+ template <class Dtype>
325
+ void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) {
326
+ TORCH_INTERNAL_ASSERT(
327
+ false,
328
+ "at::cuda::blas::geqrfBatched: not implemented for ",
329
+ typeid(Dtype).name());
330
+ }
331
+ template <>
332
+ TORCH_CUDA_CU_API void geqrfBatched<float>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float));
333
+ template <>
334
+ TORCH_CUDA_CU_API void geqrfBatched<double>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double));
335
+ template <>
336
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<double>>(
337
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<double>));
338
+ template <>
339
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<float>>(
340
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<float>));
341
+
342
+ #define CUDABLAS_GETRF_ARGTYPES(Dtype) \
343
+ int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize
344
+
345
+ template<class Dtype>
346
+ void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) {
347
+ TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name());
348
+ }
349
+ template<>
350
+ TORCH_CUDA_CU_API void getrfBatched<float>(CUDABLAS_GETRF_ARGTYPES(float));
351
+ template<>
352
+ TORCH_CUDA_CU_API void getrfBatched<double>(CUDABLAS_GETRF_ARGTYPES(double));
353
+ template<>
354
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<double>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<double>));
355
+ template<>
356
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<float>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<float>));
357
+
358
+ #define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \
359
+ cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize
360
+
361
+ template <class Dtype>
362
+ void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) {
363
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name());
364
+ }
365
+
366
+ template<>
367
+ TORCH_CUDA_CU_API void gelsBatched<double>(CUDABLAS_GELS_BATCHED_ARGTYPES(double));
368
+ template<>
369
+ TORCH_CUDA_CU_API void gelsBatched<float>(CUDABLAS_GELS_BATCHED_ARGTYPES(float));
370
+ template<>
371
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<double>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<double>));
372
+ template<>
373
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<float>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<float>));
374
+
375
+ } // namespace at::cuda::blas
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContextLight.h>
4
+
5
+ // Preserved for BC, as many files depend on these includes
6
+ #include <ATen/Context.h>
7
+ #include <c10/cuda/CUDAStream.h>
8
+ #include <c10/util/Logging.h>
9
+ #include <ATen/cuda/Exceptions.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Light-weight version of CUDAContext.h with fewer transitive includes
3
+
4
+ #include <cstdint>
5
+
6
+ #include <cuda_runtime_api.h>
7
+ #include <cusparse.h>
8
+ #include <cublas_v2.h>
9
+
10
+ // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
11
+ // added bf16 support
12
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
13
+ #include <cublasLt.h>
14
+ #endif
15
+
16
+ #ifdef CUDART_VERSION
17
+ #include <cusolverDn.h>
18
+ #endif
19
+
20
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50300
21
+ #include <hipsolver/hipsolver.h>
22
+ #endif
23
+
24
+ #include <c10/core/Allocator.h>
25
+ #include <c10/cuda/CUDAFunctions.h>
26
+
27
+ namespace c10 {
28
+ struct Allocator;
29
+ }
30
+
31
+ namespace at::cuda {
32
+
33
+ /*
34
+ A common CUDA interface for ATen.
35
+
36
+ This interface is distinct from CUDAHooks, which defines an interface that links
37
+ to both CPU-only and CUDA builds. That interface is intended for runtime
38
+ dispatch and should be used from files that are included in both CPU-only and
39
+ CUDA builds.
40
+
41
+ CUDAContext, on the other hand, should be preferred by files only included in
42
+ CUDA builds. It is intended to expose CUDA functionality in a consistent
43
+ manner.
44
+
45
+ This means there is some overlap between the CUDAContext and CUDAHooks, but
46
+ the choice of which to use is simple: use CUDAContext when in a CUDA-only file,
47
+ use CUDAHooks otherwise.
48
+
49
+ Note that CUDAContext simply defines an interface with no associated class.
50
+ It is expected that the modules whose functions compose this interface will
51
+ manage their own state. There is only a single CUDA context/state.
52
+ */
53
+
54
+ /**
55
+ * DEPRECATED: use device_count() instead
56
+ */
57
+ inline int64_t getNumGPUs() {
58
+ return c10::cuda::device_count();
59
+ }
60
+
61
+ /**
62
+ * CUDA is available if we compiled with CUDA, and there are one or more
63
+ * devices. If we compiled with CUDA but there is a driver problem, etc.,
64
+ * this function will report CUDA is not available (rather than raise an error.)
65
+ */
66
+ inline bool is_available() {
67
+ return c10::cuda::device_count() > 0;
68
+ }
69
+
70
+ TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties();
71
+
72
+ TORCH_CUDA_CPP_API int warp_size();
73
+
74
+ TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device);
75
+
76
+ TORCH_CUDA_CPP_API bool canDeviceAccessPeer(
77
+ c10::DeviceIndex device,
78
+ c10::DeviceIndex peer_device);
79
+
80
+ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
81
+
82
+ /* Handles */
83
+ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
84
+ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
85
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
86
+ TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
87
+ #endif
88
+
89
+ TORCH_CUDA_CPP_API void clearCublasWorkspaces();
90
+
91
+ #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300
92
+ TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle();
93
+ #endif
94
+
95
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+
5
+ #include <cuda.h>
6
+ #include <library_types.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ template <typename scalar_t>
11
+ cudaDataType getCudaDataType() {
12
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.")
13
+ }
14
+
15
+ template<> inline cudaDataType getCudaDataType<at::Half>() {
16
+ return CUDA_R_16F;
17
+ }
18
+ template<> inline cudaDataType getCudaDataType<float>() {
19
+ return CUDA_R_32F;
20
+ }
21
+ template<> inline cudaDataType getCudaDataType<double>() {
22
+ return CUDA_R_64F;
23
+ }
24
+ template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() {
25
+ return CUDA_C_16F;
26
+ }
27
+ template<> inline cudaDataType getCudaDataType<c10::complex<float>>() {
28
+ return CUDA_C_32F;
29
+ }
30
+ template<> inline cudaDataType getCudaDataType<c10::complex<double>>() {
31
+ return CUDA_C_64F;
32
+ }
33
+
34
+ // HIP doesn't define integral types
35
+ #ifndef USE_ROCM
36
+ template<> inline cudaDataType getCudaDataType<uint8_t>() {
37
+ return CUDA_R_8U;
38
+ }
39
+ template<> inline cudaDataType getCudaDataType<int8_t>() {
40
+ return CUDA_R_8I;
41
+ }
42
+ template<> inline cudaDataType getCudaDataType<int>() {
43
+ return CUDA_R_32I;
44
+ }
45
+ #endif
46
+
47
+ #if !defined(USE_ROCM)
48
+ template<> inline cudaDataType getCudaDataType<int16_t>() {
49
+ return CUDA_R_16I;
50
+ }
51
+ template<> inline cudaDataType getCudaDataType<int64_t>() {
52
+ return CUDA_R_64I;
53
+ }
54
+ template<> inline cudaDataType getCudaDataType<at::BFloat16>() {
55
+ return CUDA_R_16BF;
56
+ }
57
+ #endif
58
+
59
+ inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) {
60
+ switch (scalar_type) {
61
+ // HIP doesn't define integral types
62
+ #ifndef USE_ROCM
63
+ case c10::ScalarType::Byte:
64
+ return CUDA_R_8U;
65
+ case c10::ScalarType::Char:
66
+ return CUDA_R_8I;
67
+ case c10::ScalarType::Int:
68
+ return CUDA_R_32I;
69
+ #endif
70
+ case c10::ScalarType::Half:
71
+ return CUDA_R_16F;
72
+ case c10::ScalarType::Float:
73
+ return CUDA_R_32F;
74
+ case c10::ScalarType::Double:
75
+ return CUDA_R_64F;
76
+ case c10::ScalarType::ComplexHalf:
77
+ return CUDA_C_16F;
78
+ case c10::ScalarType::ComplexFloat:
79
+ return CUDA_C_32F;
80
+ case c10::ScalarType::ComplexDouble:
81
+ return CUDA_C_64F;
82
+ #if !defined(USE_ROCM)
83
+ case c10::ScalarType::Short:
84
+ return CUDA_R_16I;
85
+ case c10::ScalarType::Long:
86
+ return CUDA_R_64I;
87
+ case c10::ScalarType::BFloat16:
88
+ return CUDA_R_16BF;
89
+ #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080
90
+ case c10::ScalarType::Float8_e4m3fn:
91
+ return CUDA_R_8F_E4M3;
92
+ case c10::ScalarType::Float8_e5m2:
93
+ return CUDA_R_8F_E5M2;
94
+ #endif
95
+ #else // USE_ROCM
96
+ case c10::ScalarType::BFloat16:
97
+ return CUDA_R_16BF;
98
+ #if defined(HIP_NEW_TYPE_ENUMS)
99
+ case c10::ScalarType::Float8_e4m3fnuz:
100
+ return HIP_R_8F_E4M3_FNUZ;
101
+ case c10::ScalarType::Float8_e5m2fnuz:
102
+ return HIP_R_8F_E5M2_FNUZ;
103
+ #else
104
+ case c10::ScalarType::Float8_e4m3fnuz:
105
+ return static_cast<hipDataType>(1000);
106
+ case c10::ScalarType::Float8_e5m2fnuz:
107
+ return static_cast<hipDataType>(1001);
108
+ #endif
109
+ #endif
110
+ default:
111
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.")
112
+ }
113
+ }
114
+
115
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/Exceptions.h>
4
+
5
+ #include <cuda.h>
6
+ #include <cuda_runtime.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ inline Device getDeviceFromPtr(void* ptr) {
11
+ cudaPointerAttributes attr{};
12
+
13
+ AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr));
14
+
15
+ #if !defined(USE_ROCM)
16
+ TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered,
17
+ "The specified pointer resides on host memory and is not registered with any CUDA device.");
18
+ #endif
19
+
20
+ return {c10::DeviceType::CUDA, static_cast<DeviceIndex>(attr.device)};
21
+ }
22
+
23
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <ATen/cuda/PhiloxCudaState.h>
5
+ #include <ATen/Context.h>
6
+ #include <limits>
7
+ #include <atomic>
8
+
9
+ namespace at {
10
+ /**
11
+ * Note [CUDA Graph-safe RNG states]
12
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
+ *
14
+ * Strategy:
15
+ * ~~~~~~~~~
16
+ * (It helps to look at
17
+ * cuda/detail/PhiloxCudaStateRaw.cuh and
18
+ * cuda/detail/UnpackRaw.cuh
19
+ * while you read this.)
20
+ *
21
+ * A CUDA graph containing multiple RNG ops behaves like a
22
+ * single giant kernel from the perspective of ops external
23
+ * to the graph. During graph capture, logic in CUDAGeneratorImpl
24
+ * records the total of all offset increments that occur in the
25
+ * graphed region, and records the final total as the offset for
26
+ * the entire graph.
27
+ *
28
+ * When the graph reruns, the logic that reruns it
29
+ * increments this device's CUDA generator's offset
30
+ * by that total.
31
+ *
32
+ * Meanwhile, within the graph, at capture time, instead of
33
+ * populating PhiloxCudaStates with the uint64_t offset pulled
34
+ * directly from the global state, PhiloxCudaState uses a pointer
35
+ * to a one-element stream-local int64_t device tensor
36
+ * holding an initial offset value, and a uint64_t holding an
37
+ * intra-graph offset. (The intra-graph offset starts from zero
38
+ * when capture begins.) In each consumer kernel,
39
+ * at::cuda::philox::unpack computes the offset to use for this kernel
40
+ * as intra-graph offset + *initial offset.
41
+ *
42
+ * When the graph reruns, the logic that reruns it first
43
+ * fill_s the initial offset tensor with this device's
44
+ * CUDA generator's current offset.
45
+ *
46
+ * The control flow above ensures graphed execution is bitwise
47
+ * identical to eager execution as long as RNG ops are enqueued
48
+ * from a single thread, even if RNG ops and graphs containing
49
+ * RNG ops are enqueued and run simultaneously on multiple streams.
50
+ *
51
+ * Usage:
52
+ * ~~~~~~
53
+ * PhiloxCudaState in this file, and unpack() in
54
+ * cuda/CUDAGraphsUtils.cuh allow non-divergent use of
55
+ * CUDAGeneratorImpl whether graph capture is underway or not.
56
+ *
57
+ * Each PhiloxCudaState instance should be used for one and only one
58
+ * consumer kernel.
59
+ *
60
+ * Example (see e.g. native/cuda/Dropout.cu):
61
+ *
62
+ * #include <ATen/cuda/CUDAGeneratorImpl.h>
63
+ * #include <ATen/cuda/CUDAGraphsUtils.cuh>
64
+ *
65
+ * __global__ void kernel(..., PhiloxCudaState philox_args) {
66
+ * auto seeds = at::cuda::philox::unpack(philox_args);
67
+ * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
68
+ * curandStatePhilox4_32_10_t state;
69
+ * curand_init(std::get<0>(seeds), // seed
70
+ * idx, // per-thread subsequence
71
+ * std::get<1>(seeds), // offset in subsequence
72
+ * &state);
73
+ * ...
74
+ * }
75
+ *
76
+ * host_caller(...) {
77
+ * PhiloxCudaState rng_engine_inputs;
78
+ * {
79
+ * // See Note [Acquire lock when using random generators]
80
+ * std::lock_guard<std::mutex> lock(gen->mutex_);
81
+ *
82
+ * // gen could be HostState or DevState here! No divergent code needed!
83
+ * rng_engine_inputs = gen->philox_cuda_state(offset_increment);
84
+ * }
85
+ * kernel<<<...>>>(..., rng_engine_inputs);
86
+ * }
87
+ *
88
+ */
89
+
90
+ struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl {
91
+ // Constructors
92
+ CUDAGeneratorImpl(DeviceIndex device_index = -1);
93
+ ~CUDAGeneratorImpl() override = default;
94
+
95
+ // CUDAGeneratorImpl methods
96
+ std::shared_ptr<CUDAGeneratorImpl> clone() const;
97
+ void set_current_seed(uint64_t seed) override;
98
+ void set_offset(uint64_t offset) override;
99
+ uint64_t get_offset() const override;
100
+ uint64_t current_seed() const override;
101
+ uint64_t seed() override;
102
+ void set_state(const c10::TensorImpl& new_state) override;
103
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
104
+ void set_philox_offset_per_thread(uint64_t offset);
105
+ uint64_t philox_offset_per_thread() const;
106
+ void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph);
107
+ uint64_t capture_epilogue();
108
+ PhiloxCudaState philox_cuda_state(uint64_t increment);
109
+
110
+ bool reset_rnn_state() {
111
+ return !no_reset_rnn_state_.test_and_set();
112
+ }
113
+
114
+ // Temporarily accommodates call sites that use philox_engine_inputs.
115
+ // Allows incremental refactor of call sites to use philox_cuda_state.
116
+ std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
117
+
118
+ static c10::DeviceType device_type();
119
+
120
+ private:
121
+ CUDAGeneratorImpl* clone_impl() const override;
122
+ uint64_t seed_ = default_rng_seed_val;
123
+ uint64_t philox_offset_per_thread_ = 0;
124
+ int64_t* seed_extragraph_{};
125
+ int64_t* offset_extragraph_{};
126
+ uint32_t offset_intragraph_ = 0;
127
+ bool graph_expects_this_gen_ = false;
128
+ std::atomic_flag no_reset_rnn_state_;
129
+ };
130
+
131
+ namespace cuda::detail {
132
+
133
+ TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator(
134
+ DeviceIndex device_index = -1);
135
+ TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1);
136
+
137
+ } // namespace cuda::detail
138
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+
8
+ #include <mutex>
9
+
10
+ namespace at {
11
+
12
+ struct CUDAGeneratorImpl;
13
+
14
+ namespace cuda {
15
+
16
+ // Standalone way to get a unique mempool id usable as a pool=... argument
17
+ // to CUDAGraph::capture_begin
18
+ TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle();
19
+
20
+ struct TORCH_CUDA_CPP_API CUDAGraph {
21
+ CUDAGraph();
22
+ ~CUDAGraph();
23
+
24
+ static void inc_pending_event_queries();
25
+ static void dec_pending_event_queries();
26
+ static int num_pending_event_queries();
27
+ void capture_begin(MempoolId_t pool={0, 0}, cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal);
28
+ void capture_end();
29
+ void replay();
30
+ void reset();
31
+ MempoolId_t pool();
32
+ void enable_debug_mode();
33
+ void debug_dump(const std::string& debug_path);
34
+
35
+ protected:
36
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
37
+ cudaGraph_t graph_ = NULL;
38
+ cudaGraphExec_t graph_exec_ = NULL;
39
+ #endif
40
+
41
+ static std::atomic<int> pending_event_queries;
42
+
43
+ // internal states so reset() can do its best cleaning up
44
+ // Set to true in capture_end if cudaStreamEndCapture succeeded
45
+ // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate
46
+ // to create graph_exec_, then graph_ is deleted
47
+ bool has_graph_ = false;
48
+ // Set to true in capture_end if cudaGraphInstantiate succeeded
49
+ bool has_graph_exec_ = false;
50
+
51
+ // uuid of this instance's current capture, used to
52
+ // specify the pool.
53
+ CaptureId_t id_;
54
+
55
+ // the ID assigned by cuda during graph capture,
56
+ // used to identify when a stream is participating in capture
57
+ CaptureId_t capture_id_ = -1;
58
+
59
+ // uuid used to request a particular private mempool from CUDACachingAllocator.
60
+ // By default, this will be set to {id_, 0}.
61
+ //
62
+ // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_
63
+ // will be set to the other graph's mempool_id_, and therefore share a mempool with the
64
+ // other graph.
65
+ //
66
+ // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(),
67
+ // it will share a mempool with any other captures that used "pool=handle".
68
+ //
69
+ // Sharing a mempool across graphs saves memory, and it's safe if you
70
+ // know you'll replay those graphs in the same order you captured them.
71
+ MempoolId_t mempool_id_;
72
+
73
+ // Stream on which capture began
74
+ at::cuda::CUDAStream capture_stream_;
75
+
76
+ // Default generator on device where capture began
77
+ at::CUDAGeneratorImpl* capture_gen_;
78
+
79
+ // Device where capture occurred. Right now, for simplicity, we require all ops
80
+ // in a capture to run on the same device, but this is a limitation of CUDAGraph,
81
+ // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
82
+ // captures if needed.
83
+ int capture_dev_;
84
+
85
+ // RNG state trackers
86
+ at::Tensor seed_extragraph_;
87
+ at::Tensor offset_extragraph_;
88
+ uint64_t wholegraph_increment_;
89
+ };
90
+
91
+ } // namespace cuda
92
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAGeneratorImpl.h>
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <ATen/cuda/PhiloxUtils.cuh>
6
+ #include <ATen/cuda/detail/CUDAHooks.h>
7
+ #include <ATen/detail/CUDAHooksInterface.h>
8
+ #include <c10/core/StreamGuard.h>
9
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
10
+ #include <c10/cuda/CUDAGuard.h>
11
+
12
+ // c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.
13
+ // This file adds utils used by aten only.
14
+
15
+ namespace at::cuda {
16
+
17
+ using CaptureId_t = c10::cuda::CaptureId_t;
18
+ using CaptureStatus = c10::cuda::CaptureStatus;
19
+
20
+ // Use this version where you don't want to create a CUDA context if none exists.
21
+ inline CaptureStatus currentStreamCaptureStatus() {
22
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
23
+ // don't create a context if we don't have to
24
+ if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {
25
+ return c10::cuda::currentStreamCaptureStatusMayInitCtx();
26
+ } else {
27
+ return CaptureStatus::None;
28
+ }
29
+ #else
30
+ return CaptureStatus::None;
31
+ #endif
32
+ }
33
+
34
+ inline void assertNotCapturing(std::string attempt) {
35
+ auto status = currentStreamCaptureStatus();
36
+ TORCH_CHECK(status == CaptureStatus::None,
37
+ attempt,
38
+ " during CUDA graph capture. If you need this call to be captured, "
39
+ "please file an issue. "
40
+ "Current cudaStreamCaptureStatus: ",
41
+ status);
42
+ }
43
+
44
+ inline void errorIfCapturingCudnnBenchmark(std::string version_specific) {
45
+ auto status = currentStreamCaptureStatus();
46
+ TORCH_CHECK(status == CaptureStatus::None,
47
+ "Current cudaStreamCaptureStatus: ",
48
+ status,
49
+ "\nCapturing ",
50
+ version_specific,
51
+ "is prohibited. Possible causes of this error:\n"
52
+ "1. No warmup iterations occurred before capture.\n"
53
+ "2. The convolutions you're trying to capture use dynamic shapes, "
54
+ "in which case capturing them is generally prohibited.");
55
+ }
56
+
57
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #if defined(USE_ROCM)
5
+ #include <hipsparse/hipsparse-version.h>
6
+ #define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch)
7
+ #endif
8
+
9
+ // cuSparse Generic API added in CUDA 10.1
10
+ // Windows support added in CUDA 11.0
11
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32)))
12
+ #define AT_USE_CUSPARSE_GENERIC_API() 1
13
+ #else
14
+ #define AT_USE_CUSPARSE_GENERIC_API() 0
15
+ #endif
16
+
17
+ // cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0
18
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
19
+ (CUSPARSE_VERSION < 12000)
20
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1
21
+ #else
22
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0
23
+ #endif
24
+
25
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
26
+ (CUSPARSE_VERSION >= 12000)
27
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1
28
+ #else
29
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0
30
+ #endif
31
+
32
+ #if defined(USE_ROCM)
33
+ // hipSparse const API added in v2.4.0
34
+ #if HIPSPARSE_VERSION >= 200400
35
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1
36
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
37
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
38
+ #else
39
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
40
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1
41
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
42
+ #endif
43
+ #else // USE_ROCM
44
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
45
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
46
+ #define AT_USE_HIPSPARSE_GENERIC_API() 0
47
+ #endif // USE_ROCM
48
+
49
+ // cuSparse Generic API spsv function was added in CUDA 11.3.0
50
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500)
51
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 1
52
+ #else
53
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 0
54
+ #endif
55
+
56
+ // cuSparse Generic API spsm function was added in CUDA 11.3.1
57
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600)
58
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 1
59
+ #else
60
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 0
61
+ #endif
62
+
63
+ // cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400)
64
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400)
65
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 1
66
+ #else
67
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 0
68
+ #endif
69
+
70
+ // BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0)
71
+ #if defined(CUDART_VERSION) || \
72
+ (defined(USE_ROCM) && ROCM_VERSION >= 40500 )
73
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1
74
+ #else
75
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0
76
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /*
4
+ Provides a subset of cuSPARSE functions as templates:
5
+
6
+ csrgeam2<scalar_t>(...)
7
+
8
+ where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
9
+ The functions are available in at::cuda::sparse namespace.
10
+ */
11
+
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/CUDASparse.h>
14
+
15
+ namespace at::cuda::sparse {
16
+
17
+ #define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
18
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
19
+ const cusparseMatDescr_t descrA, int nnzA, \
20
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
21
+ const int *csrSortedColIndA, const scalar_t *beta, \
22
+ const cusparseMatDescr_t descrB, int nnzB, \
23
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
24
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
25
+ const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \
26
+ const int *csrSortedColIndC, size_t *pBufferSizeInBytes
27
+
28
+ template <typename scalar_t>
29
+ inline void csrgeam2_bufferSizeExt(
30
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) {
31
+ TORCH_INTERNAL_ASSERT(
32
+ false,
33
+ "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ",
34
+ typeid(scalar_t).name());
35
+ }
36
+
37
+ template <>
38
+ void csrgeam2_bufferSizeExt<float>(
39
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float));
40
+ template <>
41
+ void csrgeam2_bufferSizeExt<double>(
42
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double));
43
+ template <>
44
+ void csrgeam2_bufferSizeExt<c10::complex<float>>(
45
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>));
46
+ template <>
47
+ void csrgeam2_bufferSizeExt<c10::complex<double>>(
48
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>));
49
+
50
+ #define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \
51
+ cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \
52
+ int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \
53
+ const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \
54
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
55
+ int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace
56
+
57
+ template <typename scalar_t>
58
+ inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) {
59
+ TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz(
60
+ handle,
61
+ m,
62
+ n,
63
+ descrA,
64
+ nnzA,
65
+ csrSortedRowPtrA,
66
+ csrSortedColIndA,
67
+ descrB,
68
+ nnzB,
69
+ csrSortedRowPtrB,
70
+ csrSortedColIndB,
71
+ descrC,
72
+ csrSortedRowPtrC,
73
+ nnzTotalDevHostPtr,
74
+ workspace));
75
+ }
76
+
77
+ #define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \
78
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
79
+ const cusparseMatDescr_t descrA, int nnzA, \
80
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
81
+ const int *csrSortedColIndA, const scalar_t *beta, \
82
+ const cusparseMatDescr_t descrB, int nnzB, \
83
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
84
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
85
+ scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \
86
+ void *pBuffer
87
+
88
+ template <typename scalar_t>
89
+ inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) {
90
+ TORCH_INTERNAL_ASSERT(
91
+ false,
92
+ "at::cuda::sparse::csrgeam2: not implemented for ",
93
+ typeid(scalar_t).name());
94
+ }
95
+
96
+ template <>
97
+ void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float));
98
+ template <>
99
+ void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double));
100
+ template <>
101
+ void csrgeam2<c10::complex<float>>(
102
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>));
103
+ template <>
104
+ void csrgeam2<c10::complex<double>>(
105
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>));
106
+
107
+ #define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \
108
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
109
+ cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \
110
+ int kb, int nnzb, const scalar_t *alpha, \
111
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
112
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
113
+ const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc
114
+
115
+ template <typename scalar_t>
116
+ inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) {
117
+ TORCH_INTERNAL_ASSERT(
118
+ false,
119
+ "at::cuda::sparse::bsrmm: not implemented for ",
120
+ typeid(scalar_t).name());
121
+ }
122
+
123
+ template <>
124
+ void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float));
125
+ template <>
126
+ void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double));
127
+ template <>
128
+ void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>));
129
+ template <>
130
+ void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>));
131
+
132
+ #define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \
133
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
134
+ cusparseOperation_t transA, int mb, int nb, int nnzb, \
135
+ const scalar_t *alpha, const cusparseMatDescr_t descrA, \
136
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
137
+ int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y
138
+
139
+ template <typename scalar_t>
140
+ inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) {
141
+ TORCH_INTERNAL_ASSERT(
142
+ false,
143
+ "at::cuda::sparse::bsrmv: not implemented for ",
144
+ typeid(scalar_t).name());
145
+ }
146
+
147
+ template <>
148
+ void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float));
149
+ template <>
150
+ void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double));
151
+ template <>
152
+ void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>));
153
+ template <>
154
+ void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>));
155
+
156
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
157
+
158
+ #define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \
159
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
160
+ cusparseOperation_t transA, int mb, int nnzb, \
161
+ const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
162
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
163
+ bsrsv2Info_t info, int *pBufferSizeInBytes
164
+
165
+ template <typename scalar_t>
166
+ inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) {
167
+ TORCH_INTERNAL_ASSERT(
168
+ false,
169
+ "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ",
170
+ typeid(scalar_t).name());
171
+ }
172
+
173
+ template <>
174
+ void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float));
175
+ template <>
176
+ void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double));
177
+ template <>
178
+ void bsrsv2_bufferSize<c10::complex<float>>(
179
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>));
180
+ template <>
181
+ void bsrsv2_bufferSize<c10::complex<double>>(
182
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>));
183
+
184
+ #define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \
185
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
186
+ cusparseOperation_t transA, int mb, int nnzb, \
187
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
188
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
189
+ bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
190
+
191
+ template <typename scalar_t>
192
+ inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) {
193
+ TORCH_INTERNAL_ASSERT(
194
+ false,
195
+ "at::cuda::sparse::bsrsv2_analysis: not implemented for ",
196
+ typeid(scalar_t).name());
197
+ }
198
+
199
+ template <>
200
+ void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float));
201
+ template <>
202
+ void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double));
203
+ template <>
204
+ void bsrsv2_analysis<c10::complex<float>>(
205
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>));
206
+ template <>
207
+ void bsrsv2_analysis<c10::complex<double>>(
208
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>));
209
+
210
+ #define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \
211
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
212
+ cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \
213
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
214
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
215
+ bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \
216
+ cusparseSolvePolicy_t policy, void *pBuffer
217
+
218
+ template <typename scalar_t>
219
+ inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) {
220
+ TORCH_INTERNAL_ASSERT(
221
+ false,
222
+ "at::cuda::sparse::bsrsv2_solve: not implemented for ",
223
+ typeid(scalar_t).name());
224
+ }
225
+
226
+ template <>
227
+ void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float));
228
+ template <>
229
+ void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double));
230
+ template <>
231
+ void bsrsv2_solve<c10::complex<float>>(
232
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>));
233
+ template <>
234
+ void bsrsv2_solve<c10::complex<double>>(
235
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>));
236
+
237
+ #define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \
238
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
239
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
240
+ int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
241
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
242
+ bsrsm2Info_t info, int *pBufferSizeInBytes
243
+
244
+ template <typename scalar_t>
245
+ inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) {
246
+ TORCH_INTERNAL_ASSERT(
247
+ false,
248
+ "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ",
249
+ typeid(scalar_t).name());
250
+ }
251
+
252
+ template <>
253
+ void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float));
254
+ template <>
255
+ void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double));
256
+ template <>
257
+ void bsrsm2_bufferSize<c10::complex<float>>(
258
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>));
259
+ template <>
260
+ void bsrsm2_bufferSize<c10::complex<double>>(
261
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>));
262
+
263
+ #define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \
264
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
265
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
266
+ int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
267
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
268
+ bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
269
+
270
+ template <typename scalar_t>
271
+ inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) {
272
+ TORCH_INTERNAL_ASSERT(
273
+ false,
274
+ "at::cuda::sparse::bsrsm2_analysis: not implemented for ",
275
+ typeid(scalar_t).name());
276
+ }
277
+
278
+ template <>
279
+ void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float));
280
+ template <>
281
+ void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double));
282
+ template <>
283
+ void bsrsm2_analysis<c10::complex<float>>(
284
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>));
285
+ template <>
286
+ void bsrsm2_analysis<c10::complex<double>>(
287
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>));
288
+
289
+ #define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \
290
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
291
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
292
+ int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \
293
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
294
+ int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \
295
+ scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer
296
+
297
+ template <typename scalar_t>
298
+ inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) {
299
+ TORCH_INTERNAL_ASSERT(
300
+ false,
301
+ "at::cuda::sparse::bsrsm2_solve: not implemented for ",
302
+ typeid(scalar_t).name());
303
+ }
304
+
305
+ template <>
306
+ void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float));
307
+ template <>
308
+ void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double));
309
+ template <>
310
+ void bsrsm2_solve<c10::complex<float>>(
311
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>));
312
+ template <>
313
+ void bsrsm2_solve<c10::complex<double>>(
314
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>));
315
+
316
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
317
+
318
+ } // namespace at::cuda::sparse
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ #include <cuda.h>
7
+ #include <cuda_runtime.h>
8
+ #include <cuda_fp16.h>
9
+
10
+ namespace at {
11
+ template <>
12
+ inline __half* Tensor::data() const {
13
+ return reinterpret_cast<__half*>(data<Half>());
14
+ }
15
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ // Check if every tensor in a list of tensors matches the current
8
+ // device.
9
+ inline bool check_device(ArrayRef<Tensor> ts) {
10
+ if (ts.empty()) {
11
+ return true;
12
+ }
13
+ Device curDevice = Device(kCUDA, current_device());
14
+ for (const Tensor& t : ts) {
15
+ if (t.device() != curDevice) return false;
16
+ }
17
+ return true;
18
+ }
19
+
20
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAStream.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ //
9
+ // A caching allocator for CUDA host allocations (pinned memory).
10
+ //
11
+ // This provides a drop-in replacement for THCudaHostAllocator, which re-uses
12
+ // freed pinned (page-locked) memory allocations. This avoids device
13
+ // synchronizations due to cudaFreeHost calls.
14
+ //
15
+ // To ensure correct behavior, THCCachingHostAllocator_recordEvent must be
16
+ // called anytime a pointer from this allocator is used in a cudaMemcpyAsync
17
+ // call between host and device, and passed the corresponding context from the
18
+ // allocation. This is currently invoked by at::native::copy_kernel_cuda.
19
+ //
20
+ // Note that this allocator does not split larger allocations into smaller
21
+ // blocks, unlike the caching device allocator.
22
+ //
23
+ TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator();
24
+
25
+ // Records an event in the specified stream. The allocation corresponding to the
26
+ // input `ptr`/`ctx` will not be re-used until the event has occurred.
27
+ TORCH_CUDA_CPP_API bool
28
+ CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream);
29
+
30
+ // Releases cached pinned memory allocations via cudaHostFree
31
+ TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache();
32
+
33
+ inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) {
34
+ return getCachingHostAllocator()->allocate(size);
35
+ }
36
+
37
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/complex.h>
5
+ #include <c10/util/Half.h>
6
+
7
+ __device__ __forceinline__ unsigned int ACTIVE_MASK()
8
+ {
9
+ #if !defined(USE_ROCM)
10
+ return __activemask();
11
+ #else
12
+ // will be ignored anyway
13
+ return 0xffffffff;
14
+ #endif
15
+ }
16
+
17
+ __device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) {
18
+ #if !defined(USE_ROCM)
19
+ return __syncwarp(mask);
20
+ #endif
21
+ }
22
+
23
+ #if defined(USE_ROCM)
24
+ __device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
25
+ {
26
+ return __ballot(predicate);
27
+ }
28
+ #else
29
+ __device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
30
+ {
31
+ #if !defined(USE_ROCM)
32
+ return __ballot_sync(mask, predicate);
33
+ #else
34
+ return __ballot(predicate);
35
+ #endif
36
+ }
37
+ #endif
38
+
39
+ template <typename T>
40
+ __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
41
+ {
42
+ #if !defined(USE_ROCM)
43
+ return __shfl_xor_sync(mask, value, laneMask, width);
44
+ #else
45
+ return __shfl_xor(value, laneMask, width);
46
+ #endif
47
+ }
48
+
49
+ template <typename T>
50
+ __device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff)
51
+ {
52
+ #if !defined(USE_ROCM)
53
+ return __shfl_sync(mask, value, srcLane, width);
54
+ #else
55
+ return __shfl(value, srcLane, width);
56
+ #endif
57
+ }
58
+
59
+ template <typename T>
60
+ __device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
61
+ {
62
+ #if !defined(USE_ROCM)
63
+ return __shfl_up_sync(mask, value, delta, width);
64
+ #else
65
+ return __shfl_up(value, delta, width);
66
+ #endif
67
+ }
68
+
69
+ template <typename T>
70
+ __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
71
+ {
72
+ #if !defined(USE_ROCM)
73
+ return __shfl_down_sync(mask, value, delta, width);
74
+ #else
75
+ return __shfl_down(value, delta, width);
76
+ #endif
77
+ }
78
+
79
+ #if defined(USE_ROCM)
80
+ template<>
81
+ __device__ __forceinline__ int64_t WARP_SHFL_DOWN<int64_t>(int64_t value, unsigned int delta, int width , unsigned int mask)
82
+ {
83
+ //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
84
+ int2 a = *reinterpret_cast<int2*>(&value);
85
+ a.x = __shfl_down(a.x, delta);
86
+ a.y = __shfl_down(a.y, delta);
87
+ return *reinterpret_cast<int64_t*>(&a);
88
+ }
89
+ #endif
90
+
91
+ template<>
92
+ __device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half value, unsigned int delta, int width, unsigned int mask)
93
+ {
94
+ return c10::Half(WARP_SHFL_DOWN<unsigned short>(value.x, delta, width, mask), c10::Half::from_bits_t{});
95
+ }
96
+
97
+ template <typename T>
98
+ __device__ __forceinline__ c10::complex<T> WARP_SHFL_DOWN(c10::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
99
+ {
100
+ #if !defined(USE_ROCM)
101
+ return c10::complex<T>(
102
+ __shfl_down_sync(mask, value.real_, delta, width),
103
+ __shfl_down_sync(mask, value.imag_, delta, width));
104
+ #else
105
+ return c10::complex<T>(
106
+ __shfl_down(value.real_, delta, width),
107
+ __shfl_down(value.imag_, delta, width));
108
+ #endif
109
+ }
110
+
111
+ /**
112
+ * For CC 3.5+, perform a load using __ldg
113
+ */
114
+ template <typename T>
115
+ __device__ __forceinline__ T doLdg(const T* p) {
116
+ #if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM)
117
+ return __ldg(p);
118
+ #else
119
+ return *p;
120
+ #endif
121
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+
4
+ namespace at::detail {
5
+
6
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
7
+ IntArrayRef size,
8
+ ScalarType dtype,
9
+ c10::optional<Device> device_opt,
10
+ c10::optional<c10::MemoryFormat> memory_format_opt);
11
+
12
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
13
+ IntArrayRef size,
14
+ c10::optional<ScalarType> dtype_opt,
15
+ c10::optional<Layout> layout_opt,
16
+ c10::optional<Device> device_opt,
17
+ c10::optional<bool> pin_memory_opt,
18
+ c10::optional<c10::MemoryFormat> memory_format_opt);
19
+
20
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
21
+ IntArrayRef size,
22
+ const TensorOptions &options);
23
+
24
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ ScalarType dtype,
28
+ c10::optional<Device> device_opt);
29
+
30
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
31
+ IntArrayRef size,
32
+ IntArrayRef stride,
33
+ c10::optional<ScalarType> dtype_opt,
34
+ c10::optional<Layout> layout_opt,
35
+ c10::optional<Device> device_opt,
36
+ c10::optional<bool> pin_memory_opt);
37
+
38
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
39
+ IntArrayRef size,
40
+ IntArrayRef stride,
41
+ const TensorOptions &options);
42
+
43
+
44
+ } // namespace at::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <limits.h>
5
+ #include <math.h>
6
+ #include <float.h>
7
+
8
+ // NumericLimits.cuh is a holder for numeric limits definitions of commonly used
9
+ // types. This header is very specific to ROCm HIP and may be removed in the future.
10
+ // This header is derived from the legacy THCNumerics.cuh.
11
+
12
+ // The lower_bound and upper_bound constants are same as lowest and max for
13
+ // integral types, but are -inf and +inf for floating point types. They are
14
+ // useful in implementing min, max, etc.
15
+
16
+ namespace at {
17
+
18
+ template <typename T>
19
+ struct numeric_limits {
20
+ };
21
+
22
+ // WARNING: the following at::numeric_limits definitions are there only to support
23
+ // HIP compilation for the moment. Use std::numeric_limits if you are not
24
+ // compiling for ROCm.
25
+ // from @colesbury: "The functions on numeric_limits aren't marked with
26
+ // __device__ which is why they don't work with ROCm. CUDA allows them
27
+ // because they're constexpr."
28
+
29
+ namespace {
30
+ // ROCm doesn't like INFINITY too.
31
+ constexpr double inf = INFINITY;
32
+ }
33
+
34
+ template <>
35
+ struct numeric_limits<bool> {
36
+ static inline __host__ __device__ bool lowest() { return false; }
37
+ static inline __host__ __device__ bool max() { return true; }
38
+ static inline __host__ __device__ bool lower_bound() { return false; }
39
+ static inline __host__ __device__ bool upper_bound() { return true; }
40
+ };
41
+
42
+ template <>
43
+ struct numeric_limits<uint8_t> {
44
+ static inline __host__ __device__ uint8_t lowest() { return 0; }
45
+ static inline __host__ __device__ uint8_t max() { return UINT8_MAX; }
46
+ static inline __host__ __device__ uint8_t lower_bound() { return 0; }
47
+ static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; }
48
+ };
49
+
50
+ template <>
51
+ struct numeric_limits<int8_t> {
52
+ static inline __host__ __device__ int8_t lowest() { return INT8_MIN; }
53
+ static inline __host__ __device__ int8_t max() { return INT8_MAX; }
54
+ static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; }
55
+ static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
56
+ };
57
+
58
+ template <>
59
+ struct numeric_limits<int16_t> {
60
+ static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
61
+ static inline __host__ __device__ int16_t max() { return INT16_MAX; }
62
+ static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; }
63
+ static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
64
+ };
65
+
66
+ template <>
67
+ struct numeric_limits<int32_t> {
68
+ static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
69
+ static inline __host__ __device__ int32_t max() { return INT32_MAX; }
70
+ static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; }
71
+ static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
72
+ };
73
+
74
+ template <>
75
+ struct numeric_limits<int64_t> {
76
+ #ifdef _MSC_VER
77
+ static inline __host__ __device__ int64_t lowest() { return _I64_MIN; }
78
+ static inline __host__ __device__ int64_t max() { return _I64_MAX; }
79
+ static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; }
80
+ static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; }
81
+ #else
82
+ static inline __host__ __device__ int64_t lowest() { return INT64_MIN; }
83
+ static inline __host__ __device__ int64_t max() { return INT64_MAX; }
84
+ static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; }
85
+ static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; }
86
+ #endif
87
+ };
88
+
89
+ template <>
90
+ struct numeric_limits<at::Half> {
91
+ static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); }
92
+ static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); }
93
+ static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); }
94
+ static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); }
95
+ };
96
+
97
+ template <>
98
+ struct numeric_limits<at::BFloat16> {
99
+ static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); }
100
+ static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); }
101
+ static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); }
102
+ static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); }
103
+ };
104
+
105
+ template <>
106
+ struct numeric_limits<float> {
107
+ static inline __host__ __device__ float lowest() { return -FLT_MAX; }
108
+ static inline __host__ __device__ float max() { return FLT_MAX; }
109
+ static inline __host__ __device__ float lower_bound() { return -static_cast<float>(inf); }
110
+ static inline __host__ __device__ float upper_bound() { return static_cast<float>(inf); }
111
+ };
112
+
113
+ template <>
114
+ struct numeric_limits<double> {
115
+ static inline __host__ __device__ double lowest() { return -DBL_MAX; }
116
+ static inline __host__ __device__ double max() { return DBL_MAX; }
117
+ static inline __host__ __device__ double lower_bound() { return -inf; }
118
+ static inline __host__ __device__ double upper_bound() { return inf; }
119
+ };
120
+
121
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <cstdint>
3
+
4
+ namespace at::cuda {
5
+ namespace detail {
6
+ void init_p2p_access_cache(int64_t num_devices);
7
+ }
8
+
9
+ TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev);
10
+
11
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ #include <ATen/cuda/detail/PhiloxCudaStateRaw.cuh>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/PhiloxCudaState.h>
4
+ #include <ATen/cuda/detail/UnpackRaw.cuh>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ceil_div.h>
4
+ #include <ATen/cuda/DeviceUtils.cuh>
5
+ #include <ATen/cuda/AsmUtils.cuh>
6
+ #include <c10/macros/Macros.h>
7
+
8
+ // Collection of in-kernel scan / prefix sum utilities
9
+
10
+ namespace at::cuda {
11
+
12
+ // Inclusive prefix sum for binary vars using intra-warp voting +
13
+ // shared memory
14
+ template <typename T, bool KillWARDependency, class BinaryFunction>
15
+ __device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
16
+ // Within-warp, we use warp voting.
17
+ #if defined (USE_ROCM)
18
+ unsigned long long int vote = WARP_BALLOT(in);
19
+ T index = __popcll(getLaneMaskLe() & vote);
20
+ T carry = __popcll(vote);
21
+ #else
22
+ T vote = WARP_BALLOT(in);
23
+ T index = __popc(getLaneMaskLe() & vote);
24
+ T carry = __popc(vote);
25
+ #endif
26
+
27
+ int warp = threadIdx.x / C10_WARP_SIZE;
28
+
29
+ // Per each warp, write out a value
30
+ if (getLaneId() == 0) {
31
+ smem[warp] = carry;
32
+ }
33
+
34
+ __syncthreads();
35
+
36
+ // Sum across warps in one thread. This appears to be faster than a
37
+ // warp shuffle scan for CC 3.0+
38
+ if (threadIdx.x == 0) {
39
+ int current = 0;
40
+ for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
41
+ T v = smem[i];
42
+ smem[i] = binop(smem[i], current);
43
+ current = binop(current, v);
44
+ }
45
+ }
46
+
47
+ __syncthreads();
48
+
49
+ // load the carry from the preceding warp
50
+ if (warp >= 1) {
51
+ index = binop(index, smem[warp - 1]);
52
+ }
53
+
54
+ *out = index;
55
+
56
+ if (KillWARDependency) {
57
+ __syncthreads();
58
+ }
59
+ }
60
+
61
+ // Exclusive prefix sum for binary vars using intra-warp voting +
62
+ // shared memory
63
+ template <typename T, bool KillWARDependency, class BinaryFunction>
64
+ __device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
65
+ inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
66
+
67
+ // Inclusive to exclusive
68
+ *out -= (T) in;
69
+
70
+ // The outgoing carry for all threads is the last warp's sum
71
+ *carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
72
+
73
+ if (KillWARDependency) {
74
+ __syncthreads();
75
+ }
76
+ }
77
+
78
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <cstdint>
4
+
5
+ namespace at::cuda {
6
+
7
+ // enqueues a kernel that spins for the specified number of cycles
8
+ TORCH_CUDA_CU_API void sleep(int64_t cycles);
9
+
10
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <c10/cuda/CUDACachingAllocator.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ /// Allocator for Thrust to re-route its internal device allocations
9
+ /// to the THC allocator
10
+ class ThrustAllocator {
11
+ public:
12
+ typedef char value_type;
13
+
14
+ char* allocate(std::ptrdiff_t size) {
15
+ return static_cast<char*>(c10::cuda::CUDACachingAllocator::raw_alloc(size));
16
+ }
17
+
18
+ void deallocate(char* p, size_t size) {
19
+ c10::cuda::CUDACachingAllocator::raw_delete(p);
20
+ }
21
+ };
22
+
23
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/cub.h>
3
+
4
+ #include <cstddef>
5
+ #include <type_traits>
6
+ #include <iterator>
7
+ #include <limits>
8
+
9
+ #include <ATen/cuda/cub_definitions.cuh>
10
+
11
+ #if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
12
+
13
+ #include <cub/cub.cuh>
14
+
15
+ #else
16
+
17
+ // include cub in a safe manner, see:
18
+ // https://github.com/pytorch/pytorch/pull/55292
19
+ #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings
20
+ #undef CUB_NS_PREFIX
21
+ #undef CUB_NS_QUALIFIER
22
+ #define CUB_NS_PREFIX namespace at_cuda_detail {
23
+ #define CUB_NS_POSTFIX }
24
+ #define CUB_NS_QUALIFIER ::at_cuda_detail::cub
25
+ #include <cub/cub.cuh>
26
+ #undef CUB_NS_POSTFIX
27
+ #undef CUB_NS_PREFIX
28
+ #undef CUB_NS_QUALIFIER
29
+
30
+ #endif
31
+
32
+ #include <ATen/cuda/Exceptions.h>
33
+ #include <c10/cuda/CUDACachingAllocator.h>
34
+ #include <c10/cuda/CUDAStream.h>
35
+
36
+ // handle the temporary storage and 'twice' calls for cub API
37
+ #define CUB_WRAPPER(func, ...) do { \
38
+ size_t temp_storage_bytes = 0; \
39
+ func(nullptr, temp_storage_bytes, __VA_ARGS__); \
40
+ auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \
41
+ auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \
42
+ func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \
43
+ AT_CUDA_CHECK(cudaGetLastError()); \
44
+ } while (false)
45
+
46
+ #ifdef USE_ROCM
47
+ #define NO_ROCM(x)
48
+ #define ROCM_HIPCUB(x) ::hipcub
49
+ #else
50
+ #define NO_ROCM(x) x
51
+ #define ROCM_HIPCUB(x) x
52
+ #endif
53
+
54
+ #if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || \
55
+ (defined(USE_ROCM) && ROCM_VERSION >= 40500)
56
+
57
+ #if !defined(USE_ROCM)
58
+ namespace at_cuda_detail {
59
+ #endif
60
+
61
+ // backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16
62
+
63
+ template <>
64
+ struct ROCM_HIPCUB(cub)::FpLimits<c10::BFloat16>
65
+ {
66
+ static __host__ __device__ __forceinline__ c10::BFloat16 Max() {
67
+ unsigned short max_word = 0x7F7F;
68
+ return reinterpret_cast<c10::BFloat16&>(max_word);
69
+ }
70
+
71
+ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() {
72
+ unsigned short lowest_word = 0xFF7F;
73
+ return reinterpret_cast<c10::BFloat16&>(lowest_word);
74
+ }
75
+ };
76
+
77
+ template <>
78
+ struct ROCM_HIPCUB(cub)::NumericTraits<c10::BFloat16>:
79
+ ROCM_HIPCUB(cub)::BaseTraits<ROCM_HIPCUB(cub)::FLOATING_POINT, true, false, unsigned short, c10::BFloat16> {};
80
+
81
+ #if !defined(USE_ROCM)
82
+ } // namespace at_cuda_detail
83
+ #endif
84
+
85
+ #endif
86
+
87
+ #if !defined(USE_ROCM)
88
+ namespace at::native {
89
+ namespace cub = ::at_cuda_detail::cub;
90
+ } // namespace at::native
91
+ #endif
92
+
93
+ namespace at::cuda::cub {
94
+
95
+ namespace detail {
96
+
97
+ template<typename T>
98
+ struct cuda_type {
99
+ using type = T;
100
+ };
101
+ template<>
102
+ struct cuda_type<c10::Half> {
103
+ using type = __half;
104
+ };
105
+
106
+ #if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16()
107
+
108
+ template<>
109
+ struct cuda_type<c10::BFloat16> {
110
+ using type = __nv_bfloat16;
111
+ };
112
+
113
+ #elif (defined(USE_ROCM) && ROCM_VERSION >= 40500)
114
+
115
+ template<>
116
+ struct cuda_type<c10::BFloat16> {
117
+ using type = hip_bfloat16;
118
+ };
119
+
120
+ #endif
121
+
122
+ } // namespace detail
123
+
124
+ template<typename key_t, typename value_t, typename OffsetIteratorT>
125
+ inline void segmented_sort_pairs(
126
+ const key_t *keys_in, key_t *keys_out,
127
+ const value_t *values_in, value_t *values_out,
128
+ int64_t num_elements, int64_t num_segments,
129
+ OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
130
+ bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8
131
+ ) {
132
+ TORCH_CHECK(num_elements <= std::numeric_limits<int>::max(),
133
+ "cub sort does not support sorting more than INT_MAX elements");
134
+ TORCH_CHECK(num_segments <= std::numeric_limits<int>::max(),
135
+ "cub sort does not support sorting more than INT_MAX elements");
136
+ using key_t_ = typename detail::cuda_type<key_t>::type;
137
+
138
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
139
+ c10::DataPtr keys_out_owner;
140
+
141
+ if (keys_out == nullptr) {
142
+ keys_out_owner = allocator->allocate(num_elements * sizeof(key_t));
143
+ keys_out = reinterpret_cast<key_t *>(keys_out_owner.get());
144
+ }
145
+
146
+ const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
147
+ key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
148
+
149
+ if (descending) {
150
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending,
151
+ keys_in_, keys_out_, values_in, values_out,
152
+ num_elements, num_segments, begin_offsets, end_offsets,
153
+ begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
154
+ } else {
155
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs,
156
+ keys_in_, keys_out_, values_in, values_out,
157
+ num_elements, num_segments, begin_offsets, end_offsets,
158
+ begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
159
+ }
160
+ }
161
+
162
+ #if CUB_SUPPORTS_UNIQUE_BY_KEY()
163
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename KeysOutputIteratorT, typename ValuesOutputIteratorT, typename NumSelectedIteratorT>
164
+ inline void unique_by_key(
165
+ KeysInputIteratorT keys_in, ValuesInputIteratorT values_in,
166
+ KeysOutputIteratorT keys_out, ValuesOutputIteratorT values_out,
167
+ NumSelectedIteratorT num_selected, int64_t num_input_items)
168
+ {
169
+ // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed.
170
+ constexpr bool null_keys_out = std::is_same<KeysOutputIteratorT, std::nullptr_t>::value;
171
+ using KeyT = typename std::iterator_traits<KeysInputIteratorT>::value_type;
172
+ using RealKeysOutputIteratorT = typename std::conditional<null_keys_out, KeyT *, KeysOutputIteratorT>::type;
173
+ RealKeysOutputIteratorT keys_out_;
174
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
175
+ c10::DataPtr keys_out_owner;
176
+ if constexpr (null_keys_out) {
177
+ keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT));
178
+ keys_out_ = static_cast<KeyT *>(keys_out_owner.get());
179
+ } else {
180
+ keys_out_ = keys_out;
181
+ }
182
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey,
183
+ keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream());
184
+ }
185
+ #endif
186
+
187
+ namespace impl {
188
+
189
+ template<typename InputIteratorT1, typename InputIteratorT2, typename OutputIteratorT, class ScanOpT>
190
+ C10_LAUNCH_BOUNDS_1(1)
191
+ __global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){
192
+ // NOTE: out here not the final scan output, but an intermediate of the accumulation type.
193
+ using acc_t = typename std::iterator_traits<OutputIteratorT>::value_type;
194
+ *out = scan_op(static_cast<acc_t>(*a), static_cast<acc_t>(*b));
195
+ }
196
+
197
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
198
+ template<typename ValueT, typename InputIteratorT>
199
+ struct chained_iterator {
200
+ using iterator_category = std::random_access_iterator_tag;
201
+ using difference_type = std::ptrdiff_t;
202
+ using value_type = ValueT;
203
+ using pointer = ValueT*;
204
+ using reference = ValueT&;
205
+
206
+ InputIteratorT iter;
207
+ ValueT *first;
208
+ difference_type offset = 0;
209
+
210
+ __device__ ValueT operator[](difference_type i) {
211
+ i += offset;
212
+ if (i == 0) {
213
+ return *first;
214
+ } else {
215
+ return ValueT(iter[i - 1]);
216
+ }
217
+ }
218
+ __device__ chained_iterator operator+(difference_type i) {
219
+ return chained_iterator{iter, first, i};
220
+ }
221
+ __device__ ValueT operator*() {
222
+ return (*this)[0];
223
+ }
224
+ };
225
+ #endif
226
+
227
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
228
+ // so split at int_max/2
229
+ constexpr int max_cub_size = std::numeric_limits<int>::max() / 2 + 1; // 2**30
230
+ }
231
+
232
+ // non synchronizing cub call
233
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
234
+ // so split at int_max/2
235
+ template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, int max_cub_size=impl::max_cub_size>
236
+ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
237
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 50000)
238
+ //For ROCm, use hipCUB chained iterators
239
+ CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan,
240
+ input,
241
+ output,
242
+ scan_op,
243
+ num_items,
244
+ at::cuda::getCurrentCUDAStream());
245
+ C10_HIP_KERNEL_LAUNCH_CHECK();
246
+ #else
247
+ // non synchronizing cub call
248
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
249
+ // so split at int_max/2
250
+ int size_cub = std::min<int64_t>(num_items, max_cub_size);
251
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
252
+ input,
253
+ output,
254
+ scan_op,
255
+ size_cub,
256
+ at::cuda::getCurrentCUDAStream());
257
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
258
+ using input_t = typename std::iterator_traits<InputIteratorT>::value_type;
259
+ for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
260
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
261
+ c10::DataPtr first_elem = allocator->allocate(sizeof(input_t));
262
+ auto first_elem_ptr = reinterpret_cast<input_t *>(first_elem.get());
263
+
264
+ size_cub = std::min<int64_t>(num_items - i, max_cub_size);
265
+ impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
266
+ output + i - 1,
267
+ input + i,
268
+ first_elem_ptr,
269
+ scan_op);
270
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
271
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
272
+ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator<InputIteratorT>;
273
+ using tuple = typename ArgIndexInputIterator::value_type;
274
+ auto input_iter_transform = [=] __device__ (const tuple &x)->input_t {
275
+ if (x.key == 0) {
276
+ return *first_elem_ptr;
277
+ } else {
278
+ return x.value;
279
+ }
280
+ };
281
+ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator<input_t, decltype(input_iter_transform), ArgIndexInputIterator>(
282
+ ArgIndexInputIterator(input + i), input_iter_transform);
283
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
284
+ input_,
285
+ output + i,
286
+ scan_op,
287
+ size_cub,
288
+ at::cuda::getCurrentCUDAStream());
289
+ #else
290
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
291
+ input + i + 1,
292
+ output + i,
293
+ scan_op,
294
+ ::at_cuda_detail::cub::FutureValue<input_t>(first_elem_ptr),
295
+ size_cub,
296
+ at::cuda::getCurrentCUDAStream());
297
+ #endif
298
+ }
299
+ #endif
300
+ }
301
+
302
+ template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitValueT, int max_cub_size=impl::max_cub_size>
303
+ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) {
304
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 50000)
305
+ //For ROCm, use hipCUB chained iterators
306
+ CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan,
307
+ input,
308
+ output,
309
+ scan_op,
310
+ init_value,
311
+ num_items,
312
+ at::cuda::getCurrentCUDAStream());
313
+ C10_HIP_KERNEL_LAUNCH_CHECK();
314
+ #else
315
+ // non synchronizing cub call
316
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
317
+ // so split at int_max/2
318
+ int size_cub = std::min<int64_t>(num_items, max_cub_size);
319
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
320
+ input,
321
+ output,
322
+ scan_op,
323
+ init_value,
324
+ size_cub,
325
+ at::cuda::getCurrentCUDAStream());
326
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
327
+ for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
328
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
329
+ c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT));
330
+ auto first_elem_ptr = reinterpret_cast<InitValueT *>(first_elem.get());
331
+
332
+ size_cub = std::min<int64_t>(num_items - i, max_cub_size);
333
+ impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
334
+ output + i - 1,
335
+ input + i - 1,
336
+ first_elem_ptr,
337
+ scan_op);
338
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
339
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
340
+ auto input_ = impl::chained_iterator<InitValueT, InputIteratorT>{
341
+ input + i, first_elem_ptr};
342
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
343
+ input_,
344
+ output + i,
345
+ scan_op,
346
+ size_cub,
347
+ at::cuda::getCurrentCUDAStream());
348
+ #else
349
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
350
+ input + i,
351
+ output + i,
352
+ scan_op,
353
+ ::at_cuda_detail::cub::FutureValue<InitValueT>(first_elem_ptr),
354
+ size_cub,
355
+ at::cuda::getCurrentCUDAStream());
356
+ #endif
357
+ }
358
+ #endif
359
+ }
360
+
361
+ #if CUB_SUPPORTS_SCAN_BY_KEY()
362
+
363
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT>
364
+ inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) {
365
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
366
+ "cub InclusiveSumByKey does not support more than INT_MAX elements");
367
+ CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey,
368
+ keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
369
+ }
370
+
371
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT, typename ScanOpT>
372
+ inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
373
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
374
+ "cub InclusiveSumByKey does not support more than INT_MAX elements");
375
+ CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey,
376
+ keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
377
+ }
378
+
379
+ #endif
380
+
381
+ template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT>
382
+ void unique(InputIteratorT input, OutputIteratorT output,
383
+ NumSelectedIteratorT num_selected_out, int64_t num_items) {
384
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
385
+ "cub unique does not support more than INT_MAX elements");
386
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique,
387
+ input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream());
388
+ }
389
+
390
+ template <typename InputIteratorT, typename OutputIteratorT, typename CountsOutputIteratorT,
391
+ typename LengthOutputIteratorT>
392
+ void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out,
393
+ LengthOutputIteratorT length_out, int64_t num_items) {
394
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
395
+ "cub run_length_encode does not support more than INT_MAX elements");
396
+ CUB_WRAPPER(
397
+ NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode,
398
+ input, output, counts_out, length_out, num_items,
399
+ at::cuda::getCurrentCUDAStream());
400
+ }
401
+
402
+ template <typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T>
403
+ void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) {
404
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
405
+ "cub reduce does not support more than INT_MAX elements");
406
+ CUB_WRAPPER(
407
+ NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce,
408
+ input, output, num_items, op, init,
409
+ at::cuda::getCurrentCUDAStream());
410
+
411
+ }
412
+
413
+ } // namespace at::cuda::cub
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <c10/core/ScalarType.h>
4
+ #include <ATen/cuda/CUDAConfig.h>
5
+
6
+ // NOTE: These templates are intentionally not defined in this header,
7
+ // which aviods re-compiling them for each translation unit. If you get
8
+ // a link error, you need to add an explicit instantiation for your
9
+ // types in cub.cu
10
+
11
+ namespace at::cuda::cub {
12
+
13
+ inline int get_num_bits(uint64_t max_key) {
14
+ int num_bits = 1;
15
+ while (max_key > 1) {
16
+ max_key >>= 1;
17
+ num_bits++;
18
+ }
19
+ return num_bits;
20
+ }
21
+
22
+ namespace detail {
23
+
24
+ // radix_sort_pairs doesn't interact with value_t other than to copy
25
+ // the data, so we can save template instantiations by reinterpreting
26
+ // it as an opaque type.
27
+ template <int N> struct alignas(N) OpaqueType { char data[N]; };
28
+
29
+ template<typename key_t, int value_size>
30
+ void radix_sort_pairs_impl(
31
+ const key_t *keys_in, key_t *keys_out,
32
+ const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out,
33
+ int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
34
+
35
+ } // namespace detail
36
+
37
+ template<typename key_t, typename value_t>
38
+ void radix_sort_pairs(
39
+ const key_t *keys_in, key_t *keys_out,
40
+ const value_t *values_in, value_t *values_out,
41
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) {
42
+ static_assert(std::is_trivially_copyable<value_t>::value ||
43
+ AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types
44
+ "radix_sort_pairs value type must be trivially copyable");
45
+ // Make value type opaque, so all inputs of a certain size use the same template instantiation
46
+ using opaque_t = detail::OpaqueType<sizeof(value_t)>;
47
+ static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0,
48
+ "This size of value_t is not instantiated. Please instantiate it in cub.cu"
49
+ " and modify this check.");
50
+ static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned");
51
+ detail::radix_sort_pairs_impl(
52
+ keys_in, keys_out,
53
+ reinterpret_cast<const opaque_t*>(values_in),
54
+ reinterpret_cast<opaque_t*>(values_out),
55
+ n, descending, begin_bit, end_bit);
56
+ }
57
+
58
+ template<typename key_t>
59
+ void radix_sort_keys(
60
+ const key_t *keys_in, key_t *keys_out,
61
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8);
62
+
63
+ // NOTE: Intermediate sums will be truncated to input_t precision
64
+ template <typename input_t, typename output_t>
65
+ void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n);
66
+
67
+ template <typename scalar_t>
68
+ void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
69
+ return inclusive_sum_truncating(input, output, n);
70
+ }
71
+
72
+ // NOTE: Sums are done is common_type<input_t, output_t>
73
+ template <typename input_t, typename output_t>
74
+ void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n);
75
+
76
+ template <typename scalar_t>
77
+ void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
78
+ return exclusive_sum_in_common_type(input, output, n);
79
+ }
80
+
81
+ void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n);
82
+ inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) {
83
+ return mask_exclusive_sum(
84
+ reinterpret_cast<const uint8_t*>(mask), output_idx, n);
85
+ }
86
+
87
+ } // namespace at::cuda::cub
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/detail/CUDAHooksInterface.h>
4
+
5
+ #include <ATen/Generator.h>
6
+ #include <c10/util/Optional.h>
7
+
8
+ // TODO: No need to have this whole header, we can just put it all in
9
+ // the cpp file
10
+
11
+ namespace at::cuda::detail {
12
+
13
+ // Set the callback to initialize Magma, which is set by
14
+ // torch_cuda_cu. This indirection is required so magma_init is called
15
+ // in the same library where Magma will be used.
16
+ TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)());
17
+
18
+
19
+ // The real implementation of CUDAHooksInterface
20
+ struct CUDAHooks : public at::CUDAHooksInterface {
21
+ CUDAHooks(at::CUDAHooksArgs) {}
22
+ void initCUDA() const override;
23
+ Device getDeviceFromPtr(void* data) const override;
24
+ bool isPinnedPtr(const void* data) const override;
25
+ const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override;
26
+ bool hasCUDA() const override;
27
+ bool hasMAGMA() const override;
28
+ bool hasCuDNN() const override;
29
+ bool hasCuSOLVER() const override;
30
+ bool hasROCM() const override;
31
+ const at::cuda::NVRTC& nvrtc() const override;
32
+ DeviceIndex current_device() const override;
33
+ bool hasPrimaryContext(DeviceIndex device_index) const override;
34
+ Allocator* getCUDADeviceAllocator() const override;
35
+ Allocator* getPinnedMemoryAllocator() const override;
36
+ bool compiledWithCuDNN() const override;
37
+ bool compiledWithMIOpen() const override;
38
+ bool supportsDilatedConvolutionWithCuDNN() const override;
39
+ bool supportsDepthwiseConvolutionWithCuDNN() const override;
40
+ bool supportsBFloat16ConvolutionWithCuDNNv8() const override;
41
+ bool hasCUDART() const override;
42
+ long versionCUDART() const override;
43
+ long versionCuDNN() const override;
44
+ std::string showConfig() const override;
45
+ double batchnormMinEpsilonCuDNN() const override;
46
+ int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex device_index) const override;
47
+ void cuFFTSetPlanCacheMaxSize(DeviceIndex device_index, int64_t max_size) const override;
48
+ int64_t cuFFTGetPlanCacheSize(DeviceIndex device_index) const override;
49
+ void cuFFTClearPlanCache(DeviceIndex device_index) const override;
50
+ int getNumGPUs() const override;
51
+ void deviceSynchronize(DeviceIndex device_index) const override;
52
+ };
53
+
54
+ } // at::cuda::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/cuda/detail/TensorInfo.cuh>
5
+ #include <ATen/native/CanUse32BitIndexMath.h>
6
+
7
+ namespace at::cuda::detail {
8
+
9
+ TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
10
+ using at::native::canUse32BitIndexMath;
11
+
12
+ template <typename scalar, typename IndexType>
13
+ TensorInfo<scalar, IndexType>
14
+ getTensorInfo(const at::TensorBase &t) {
15
+ IndexType sz[MAX_TENSORINFO_DIMS];
16
+ IndexType st[MAX_TENSORINFO_DIMS];
17
+
18
+ int dims = t.dim();
19
+ for (int i = 0; i < dims; ++i) {
20
+ sz[i] = t.size(i);
21
+ st[i] = t.stride(i);
22
+ }
23
+
24
+ scalar* data_ptr = nullptr;
25
+
26
+ if constexpr (std::is_const<scalar>::value) {
27
+ data_ptr = t.const_data_ptr<scalar>();
28
+ } else {
29
+ data_ptr = t.mutable_data_ptr<scalar>();
30
+ }
31
+
32
+ return TensorInfo<scalar, IndexType>(
33
+ data_ptr, dims, sz, st);
34
+ }
35
+
36
+ } // namespace at::cuda::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <assert.h>
4
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
5
+ #include <cuda_runtime.h>
6
+ #endif
7
+
8
+ namespace at::cuda::detail {
9
+
10
+ // A utility class to implement integer division by multiplication, given a fixed
11
+ // divisor.
12
+ //
13
+ // WARNING: The fast divider algorithm is only implemented for unsigned int;
14
+ // otherwise we default to plain integer division. For unsigned int,
15
+ // we further assume that the dividend is at most INT32_MAX. Thus,
16
+ // IntDivider must NOT be used for general integer division.
17
+ //
18
+ // This reduced range is enough for our purpose, and it allows us to
19
+ // slightly simplify the computation.
20
+ //
21
+ // (NOTE: Below, "2^k" denotes exponentiation, i.e., 1<<k.)
22
+ //
23
+ // For any N-bit unsigned integer d (> 0), we can find a "magic number" m (2^N
24
+ // <= m < 2^(N+1)) and shift s such that:
25
+ //
26
+ // \floor(n / d) = \floor((m * n) / 2^(N+s)).
27
+ //
28
+ // Given such m and s, the integer division can be then implemented as:
29
+ //
30
+ // let m' = m - 2^N // 0 <= m' < 2^N
31
+ //
32
+ // fast_integer_division(n):
33
+ // // Multiply two N-bit unsigned integers: the result is a 2N-bit unsigned
34
+ // // integer. Then take the higher N bits.
35
+ // t = (m' * n) >> N
36
+ //
37
+ // // Here we use the fact that n is less than 2^(N-1): otherwise the value
38
+ // // of (t + n) may not fit in an N-bit integer.
39
+ // return (t + n) >> s
40
+ //
41
+ // Finding such a magic number is surprisingly easy:
42
+ //
43
+ // s = \ceil(\log_2 d)
44
+ // m' = \floor(2^N * (2^s - d) / d) + 1 // Need 2N-bit integer arithmetic.
45
+ //
46
+ // See also:
47
+ // - Division by Invariant Integers Using Multiplication,
48
+ // Torbjörn Granlund and Peter L. Montgomery, 1994.
49
+ //
50
+ // - http://www.hackersdelight.org/magic.htm
51
+ //
52
+ // - http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html
53
+
54
+ // Result of div/mod operation stored together.
55
+ template <typename Value>
56
+ struct DivMod {
57
+ Value div, mod;
58
+
59
+ C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
60
+ };
61
+
62
+ // Base case: we only have an implementation for uint32_t for now. For
63
+ // everything else, we use plain division.
64
+ template <typename Value>
65
+ struct IntDivider {
66
+ IntDivider() = default;
67
+ IntDivider(Value d) : divisor(d) { }
68
+
69
+ C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
70
+ C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
71
+ C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
72
+ return DivMod<Value>(n / divisor, n % divisor);
73
+ }
74
+
75
+ Value divisor;
76
+ };
77
+
78
+ // Implement fast integer division.
79
+ template <>
80
+ struct IntDivider<unsigned int> {
81
+ static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
82
+
83
+ IntDivider() = default;
84
+
85
+ IntDivider(unsigned int d) : divisor(d) {
86
+ assert(divisor >= 1 && divisor <= INT32_MAX);
87
+
88
+ // TODO: gcc/clang has __builtin_clz() but it's not portable.
89
+ for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
90
+
91
+ uint64_t one = 1;
92
+ uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
93
+ m1 = magic;
94
+ assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits.
95
+ }
96
+
97
+ C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
98
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
99
+ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and
100
+ // 'm1'.
101
+ unsigned int t = __umulhi(n, m1);
102
+ return (t + n) >> shift;
103
+ #else
104
+ // Using uint64_t so that the addition does not overflow.
105
+ uint64_t t = ((uint64_t) n * m1) >> 32;
106
+ return (t + n) >> shift;
107
+ #endif
108
+ }
109
+
110
+ C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
111
+ return n - div(n) * divisor;
112
+ }
113
+
114
+ C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
115
+ unsigned int q = div(n);
116
+ return DivMod<unsigned int>(q, n - q * divisor);
117
+ }
118
+
119
+ unsigned int divisor; // d above.
120
+ unsigned int m1; // Magic number: m' above.
121
+ unsigned int shift; // Shift amounts.
122
+ };
123
+
124
+ } // namespace at::cuda::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <limits>
4
+ #include <c10/util/Exception.h>
5
+
6
+ namespace at::cuda::detail {
7
+
8
+ // CUDA: grid stride looping
9
+ //
10
+ // int64_t _i_n_d_e_x specifically prevents overflow in the loop increment.
11
+ // If input.numel() < INT_MAX, _i_n_d_e_x < INT_MAX, except after the final
12
+ // iteration of the loop where _i_n_d_e_x += blockDim.x * gridDim.x can be
13
+ // greater than INT_MAX. But in that case _i_n_d_e_x >= n, so there are no
14
+ // further iterations and the overflowed value in i=_i_n_d_e_x is not used.
15
+ #define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \
16
+ int64_t _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \
17
+ for (index_type i=_i_n_d_e_x; _i_n_d_e_x < (n); _i_n_d_e_x+=blockDim.x * gridDim.x, i=_i_n_d_e_x)
18
+
19
+ #define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int)
20
+
21
+
22
+ // Use 1024 threads per block, which requires cuda sm_2x or above
23
+ constexpr int CUDA_NUM_THREADS = 1024;
24
+
25
+ // CUDA: number of blocks for threads.
26
+ inline int GET_BLOCKS(const int64_t N, const int64_t max_threads_per_block=CUDA_NUM_THREADS) {
27
+ TORCH_INTERNAL_ASSERT(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N);
28
+ constexpr int64_t max_int = std::numeric_limits<int>::max();
29
+
30
+ // Round up division for positive number that cannot cause integer overflow
31
+ auto block_num = (N - 1) / max_threads_per_block + 1;
32
+ TORCH_INTERNAL_ASSERT(block_num <= max_int, "Can't schedule too many blocks on CUDA device");
33
+
34
+ return static_cast<int>(block_num);
35
+ }
36
+
37
+ } // namespace at::cuda::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/detail/CUDAHooksInterface.h>
3
+ namespace at::cuda {
4
+ // Forward-declares at::cuda::NVRTC
5
+ struct NVRTC;
6
+
7
+ namespace detail {
8
+ extern NVRTC lazyNVRTC;
9
+ } // namespace detail
10
+
11
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+ #include <c10/macros/Macros.h>
7
+ #include <ATen/core/Array.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/cuda/detail/IntegerDivider.cuh>
10
+
11
+ // If element_sizes is nullptr, then the strides will be in bytes, otherwise
12
+ // the strides will be in # of elements.
13
+ // Operands that share the same shape, but may have different strides.
14
+ // OffsetCalculator iterates the tensor in a column-major order
15
+
16
+ #if defined(USE_ROCM)
17
+ constexpr int MAX_DIMS = 16;
18
+ #else
19
+ constexpr int MAX_DIMS = 25;
20
+ #endif
21
+
22
+ template <int NARGS, typename index_t = uint32_t, bool signed_strides = false>
23
+ struct OffsetCalculator {
24
+ // We allow having negative strides to implement some operations like torch.flip
25
+ using stride_t = std::conditional_t<signed_strides,
26
+ std::make_signed_t<index_t>,
27
+ index_t>;
28
+ // The offset for each argument. Wrapper around fixed-size array.
29
+ // On CUDA, zero sized array is not allowed, so when we are handling nullary
30
+ // operators, we need to create a size 1 offset to avoid compiler failure.
31
+ // This size 1 offset is just a placeholder, and we will not use it.
32
+ using offset_type = at::detail::Array<stride_t, std::max<int>(NARGS, 1)>;
33
+
34
+ // if element_sizes is nullptr, then the strides will be in bytes, otherwise
35
+ // the strides will be in # of elements.
36
+ OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides, const int64_t* element_sizes=nullptr) : dims(dims) {
37
+ TORCH_CHECK(dims <= MAX_DIMS, "tensor has too many (>", MAX_DIMS, ") dims");
38
+ for (int i=0; i < dims; i++){
39
+ sizes_[i] = at::cuda::detail::IntDivider<index_t>(sizes[i]);
40
+ for (int arg = 0; arg < NARGS; arg++) {
41
+ int64_t element_size = (element_sizes == nullptr ? 1LL : element_sizes[arg]);
42
+ strides_[i][arg] = strides[arg][i] / element_size;
43
+ }
44
+ }
45
+ }
46
+
47
+ C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
48
+ offset_type offsets;
49
+ #pragma unroll
50
+ for (int arg = 0; arg < NARGS; arg++) {
51
+ offsets[arg] = 0;
52
+ }
53
+
54
+ #pragma unroll
55
+ for (int dim = 0; dim < MAX_DIMS; ++dim) {
56
+ if (dim == dims) {
57
+ break;
58
+ }
59
+ auto divmod = sizes_[dim].divmod(linear_idx);
60
+ linear_idx = divmod.div;
61
+
62
+ #pragma unroll
63
+ for (int arg = 0; arg < NARGS; arg++) {
64
+ offsets[arg] += divmod.mod * strides_[dim][arg];
65
+ }
66
+
67
+ }
68
+ return offsets;
69
+ }
70
+
71
+ int dims;
72
+ at::cuda::detail::IntDivider<index_t> sizes_[MAX_DIMS];
73
+ stride_t strides_[MAX_DIMS][std::max<int>(NARGS, 1)];
74
+ };
75
+
76
+ template <int NARGS, typename index_t = uint32_t>
77
+ struct TrivialOffsetCalculator {
78
+ // The offset for each argument. Wrapper around fixed-size array.
79
+ // The offsets are in # of elements, not in bytes.
80
+ // On CUDA, zero sized array is not allowed, so when we are handling nullary
81
+ // operators, we need to create a size 1 offset to avoid compiler failure.
82
+ // This size 1 offset is just a placeholder, and we will not use it.
83
+ using offset_type = at::detail::Array<index_t, std::max<int>(NARGS, 1)>;
84
+
85
+ C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
86
+ offset_type offsets;
87
+ #pragma unroll
88
+ for (int arg = 0; arg < NARGS; arg++) {
89
+ offsets[arg] = linear_idx;
90
+ }
91
+ return offsets;
92
+ }
93
+ };
94
+
95
+ // Make an OffsetCalculator with byte offsets
96
+ template<int N, bool signed_strides = false>
97
+ static OffsetCalculator<N, uint32_t, signed_strides> make_offset_calculator(const at::TensorIteratorBase& iter) {
98
+ TORCH_INTERNAL_ASSERT(N <= iter.ntensors());
99
+ std::array<const int64_t*, N> strides;
100
+ for (int i = 0; i < N; i++) {
101
+ strides[i] = iter.strides(i).data();
102
+ }
103
+ return OffsetCalculator<N, uint32_t, signed_strides>(iter.ndim(), iter.shape().data(), strides.data());
104
+ }
105
+
106
+ // Make an OffsetCalculator with element offsets
107
+ template<int N, bool signed_strides = false>
108
+ static OffsetCalculator<N, uint32_t, signed_strides> make_element_offset_calculator(
109
+ const at::TensorIteratorBase& iter) {
110
+ TORCH_INTERNAL_ASSERT(N <= iter.ntensors());
111
+ std::array<const int64_t*, N> strides;
112
+ std::array<int64_t, N> element_sizes;
113
+ for (int i = 0; i < N; i++) {
114
+ strides[i] = iter.strides(i).data();
115
+ element_sizes[i] = iter.element_size(i);
116
+ }
117
+ return OffsetCalculator<N, uint32_t, signed_strides>(
118
+ iter.ndim(), iter.shape().data(), strides.data(), element_sizes.data());
119
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // No "#pragma once" because this is a raw definition that can be copied by jit codegen.
2
+ // Eager mode clients should not include this file directly, instead,
3
+ // they should #include <ATen/cuda/PhiloxCudaState.h>, which has a #pragma once.
4
+
5
+ // Stores RNG state values. Passed as a kernel argument.
6
+ // See Note [CUDA Graph-safe RNG states].
7
+ //
8
+ // The raw definition lives in its own file so jit codegen can easily copy it.
9
+ namespace at {
10
+
11
+ struct PhiloxCudaState {
12
+ PhiloxCudaState() = default;
13
+ // Called if graph capture is not underway
14
+ PhiloxCudaState(uint64_t seed,
15
+ uint64_t offset) {
16
+ seed_.val = seed;
17
+ offset_.val = offset;
18
+ }
19
+ // Called if graph capture is underway
20
+ PhiloxCudaState(int64_t* seed,
21
+ int64_t* offset_extragraph,
22
+ uint32_t offset_intragraph) {
23
+ seed_.ptr = seed;
24
+ offset_.ptr = offset_extragraph;
25
+ offset_intragraph_ = offset_intragraph;
26
+ captured_ = true;
27
+ }
28
+
29
+ // Public members, directly accessible by at::cuda::philox::unpack.
30
+ // If we made them private with getters/setters, the getters/setters
31
+ // would have to be __device__, and we can't declare __device__ in ATen.
32
+ union Payload {
33
+ uint64_t val;
34
+ int64_t* ptr;
35
+ };
36
+
37
+ Payload seed_;
38
+ Payload offset_;
39
+ uint32_t offset_intragraph_ = 0;
40
+ bool captured_ = false;
41
+ };
42
+
43
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CollapseDims.h>
4
+
5
+ namespace at::cuda::detail {
6
+
7
+ #define MAX_TENSORINFO_DIMS 25
8
+
9
+ // CUDA kernel argument that defines tensor layout
10
+ template <typename T, typename IndexType>
11
+ struct TensorInfo {
12
+ TensorInfo();
13
+ TensorInfo(T* p,
14
+ int dim,
15
+ IndexType sz[MAX_TENSORINFO_DIMS],
16
+ IndexType st[MAX_TENSORINFO_DIMS]);
17
+
18
+ // Set the size of the given dimension to 1, as if it were a
19
+ // reduction dim (allows you to calculate offsets of the reduction
20
+ // slice)
21
+ void reduceDim(int dim);
22
+
23
+ // See note on [collapse dims].
24
+ int collapseDims(const int excludeDim = -1);
25
+
26
+ // Contiguous tensors of more than one dimension are collapsed down
27
+ // to one tensor
28
+ __host__ __device__ inline bool isContiguous() const {
29
+ return (dims == 1 && strides[0] == 1);
30
+ }
31
+
32
+ T* data;
33
+ IndexType sizes[MAX_TENSORINFO_DIMS];
34
+ IndexType strides[MAX_TENSORINFO_DIMS];
35
+ int dims;
36
+ };
37
+
38
+ template <typename T, typename IndexType>
39
+ TensorInfo<T, IndexType>::TensorInfo() {
40
+ data = nullptr;
41
+ dims = 0;
42
+ }
43
+
44
+ template <typename T, typename IndexType>
45
+ TensorInfo<T, IndexType>::TensorInfo(T* p,
46
+ int dim,
47
+ IndexType sz[MAX_TENSORINFO_DIMS],
48
+ IndexType st[MAX_TENSORINFO_DIMS]) {
49
+ data = p;
50
+ dims = dim;
51
+ TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "CUDA Tensors cannot have more than 25 dimensions");
52
+
53
+ for (int i = 0; i < dim; ++i) {
54
+ sizes[i] = sz[i];
55
+ strides[i] = st[i];
56
+ }
57
+ }
58
+
59
+ template <typename T, typename IndexType>
60
+ void
61
+ TensorInfo<T, IndexType>::reduceDim(int dim) {
62
+ TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
63
+ sizes[dim] = 1;
64
+ }
65
+
66
+ template <typename T, typename IndexType>
67
+ int
68
+ TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
69
+ auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
70
+ dims = std::get<1>(result);
71
+ return std::get<0>(result);
72
+ }
73
+
74
+ // Translate a linear index for the apply to a T* offset;
75
+ // specialized on `Dims` to reduce nvcc compilation time
76
+ template <typename T, typename IndexType, int Dims>
77
+ struct IndexToOffset {
78
+ static __host__ __device__ IndexType get(
79
+ IndexType linearId,
80
+ const TensorInfo<T, IndexType>& info) {
81
+
82
+ IndexType offset = 0;
83
+
84
+ // Uses static dims
85
+ for (int i = Dims - 1; i > 0; --i) {
86
+ IndexType curDimIndex = linearId % info.sizes[i];
87
+ IndexType curDimOffset = curDimIndex * info.strides[i];
88
+ offset += curDimOffset;
89
+ linearId /= info.sizes[i];
90
+ }
91
+
92
+ return offset + linearId * info.strides[0];
93
+ }
94
+ };
95
+
96
+ // Uses dynamic (runtime) instead of static (compiletime) dims
97
+ template <typename T, typename IndexType>
98
+ struct IndexToOffset<T, IndexType, -1> {
99
+ static inline __host__ __device__ IndexType get(
100
+ IndexType linearId,
101
+ const TensorInfo<T, IndexType>& info) {
102
+
103
+ IndexType offset = 0;
104
+
105
+ for (int i = info.dims - 1; i > 0; --i) {
106
+ IndexType curDimIndex = linearId % info.sizes[i];
107
+ IndexType curDimOffset = curDimIndex * info.strides[i];
108
+ offset += curDimOffset;
109
+ linearId /= info.sizes[i];
110
+ }
111
+
112
+ return offset + linearId * info.strides[0];
113
+ }
114
+ };
115
+
116
+ } // namespace at::cuda::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // No "#pragma once" because this is a raw definition that can be copied by jit codegen.
2
+ // Eager mode clients should not include this file directly, instead,
3
+ // they should #include <ATen/cuda/PhiloxUtils.cuh>, which has a #pragma once.
4
+
5
+ namespace at::cuda::philox {
6
+
7
+ // In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether
8
+ // that instance was created with graph capture underway or not.
9
+ // See Note [CUDA Graph-safe RNG states].
10
+ //
11
+ // We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen.
12
+ // Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable.
13
+ // Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
14
+ //
15
+ // The raw definition lives in its own file so jit codegen can easily copy it.
16
+ __host__ __device__ __forceinline__ std::tuple<uint64_t, uint64_t>
17
+ unpack(at::PhiloxCudaState arg) {
18
+ if (arg.captured_) {
19
+ // static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
20
+ // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
21
+ // For most threads' reads it will hit in cache, so it shouldn't hurt performance.
22
+ return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
23
+ } else {
24
+ return std::make_tuple(arg.seed_.val, arg.offset_.val);
25
+ }
26
+ }
27
+
28
+ } // namespace at::cuda::philox
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ #if AT_USE_JITERATOR()
5
+
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/SmallVector.h>
8
+ #include <ATen/core/Tensor.h>
9
+
10
+ #include <string>
11
+ #include <vector>
12
+
13
+ namespace at::cuda {
14
+
15
+ TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
16
+ const std::string& code_string,
17
+ const std::string& kernel_name,
18
+ const int num_outputs,
19
+ const c10::SmallVector<at::Tensor>& tensors,
20
+ const c10::SmallVector<at::Scalar>& extra_args,
21
+ bool return_by_ref);
22
+
23
+ } // namespace at::cuda
24
+
25
+ #else
26
+
27
+ namespace at::cuda {
28
+
29
+ TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
30
+ const std::string& code_string,
31
+ const std::string& kernel_name,
32
+ const int num_outputs,
33
+ const c10::SmallVector<at::Tensor>& tensors,
34
+ const c10::SmallVector<at::Scalar>& extra_args,
35
+ bool return_by_ref) {
36
+ TORCH_CHECK(false, "Jiterator is not supported");
37
+ }
38
+ } // namespace at::cuda
39
+
40
+ #endif // AT_USE_JITERATOR()
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ #if AT_USE_JITERATOR()
5
+
6
+ #include <ATen/native/TensorIterator.h>
7
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
8
+ #include <ATen/native/cuda/jit_utils.h>
9
+ #include <ATen/native/cuda/MemoryAccess.cuh>
10
+ #include <ATen/native/cuda/JitLoops.cuh>
11
+
12
+ #include <string>
13
+ #include <variant>
14
+ #include <vector>
15
+
16
+ namespace at::native {
17
+
18
+
19
+ #define AT_FOR_8_CASES(_) \
20
+ _(1) \
21
+ _(2) \
22
+ _(3) \
23
+ _(4) \
24
+ _(5) \
25
+ _(6) \
26
+ _(7) \
27
+ _(8)
28
+
29
+ #define AT_FOR_8_CASES_WITH_COMMA(_) \
30
+ _(1) , \
31
+ _(2) , \
32
+ _(3) , \
33
+ _(4) , \
34
+ _(5) , \
35
+ _(6) , \
36
+ _(7) , \
37
+ _(8)
38
+
39
+ c10::SmallVector<std::string> get_extra_args_typenames(const c10::SmallVector<at::Scalar>& extra_args) {
40
+ c10::SmallVector<std::string> args_typenames(extra_args.size());
41
+ for (const auto i : c10::irange(extra_args.size())) {
42
+ args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type());
43
+ }
44
+ return args_typenames;
45
+ }
46
+
47
+ int can_vectorize_up_to(at::ScalarType type, char* pointer) {
48
+ switch(type) {
49
+ #define DEFINE_CASE(ctype, scalartype) \
50
+ case ScalarType::scalartype : return memory::can_vectorize_up_to<ctype>(pointer);
51
+
52
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
53
+ #undef DEFINE_CASE
54
+
55
+ default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
56
+ }
57
+ }
58
+
59
+ // jitted version of the above
60
+ // See Note [Jiterator], this relies on the assumptions enumerated there
61
+ int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) {
62
+ const at::ScalarType common_dtype = iter.common_dtype();
63
+ const at::ScalarType result_dtype = common_dtype;
64
+
65
+ // Deals with output
66
+ int result = can_vectorize_up_to(result_dtype, static_cast<char*>(iter.data_ptr(0)));
67
+
68
+ // Incorporates input(s)
69
+ for (auto i = 1; i < iter.ntensors(); ++i) {
70
+ result = std::min<int>(result, can_vectorize_up_to(common_dtype, static_cast<char*>(iter.data_ptr(i))));
71
+ }
72
+
73
+ return result;
74
+ }
75
+
76
+ template<bool IS_INPUT, int N>
77
+ static std::unique_ptr<OffsetCalculator<N>> make_unique_offset_calculator(
78
+ const TensorIteratorBase& iter) {
79
+ // array size can not be 0, this happens when N == 0
80
+ constexpr int array_size = std::max<int>(N, 1);
81
+ TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs()));
82
+
83
+ std::array<const int64_t*, array_size> strides;
84
+ int64_t element_sizes[array_size];
85
+ for (int i = 0; i < N; i++) {
86
+ int index = IS_INPUT ? i + iter.noutputs() : i;
87
+ strides[i] = iter.strides(index).data();
88
+ element_sizes[i] = iter.element_size(index);
89
+ }
90
+ return std::make_unique<OffsetCalculator<N>>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes);
91
+ }
92
+
93
+ template <bool IS_INPUT>
94
+ struct OffsetCalculatorVariant {
95
+ #define DEFINE_CASE(index) std::unique_ptr<OffsetCalculator<index>>
96
+ using OffsetCalculatorTypes = std::variant<
97
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
98
+ >;
99
+ #undef DEFINE_CASE
100
+
101
+ OffsetCalculatorVariant(const TensorIteratorBase& iter) {
102
+ int num = IS_INPUT ? iter.ninputs() : iter.noutputs();
103
+
104
+ switch(num) {
105
+ #define DEFINE_CASE(index) \
106
+ case index : v = make_unique_offset_calculator<IS_INPUT, index>(iter); break;
107
+
108
+ AT_FOR_8_CASES(DEFINE_CASE)
109
+ #undef DEFINE_CASE
110
+ default:
111
+ TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num);
112
+ }
113
+ }
114
+
115
+ void* data_ptr() {
116
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
117
+ }
118
+
119
+ private:
120
+ OffsetCalculatorTypes v;
121
+ };
122
+
123
+ struct ArrayVariant {
124
+ // works for up to 8 input + 8 outputs
125
+ #define DEFINE_CASE(index) at::detail::Array<char*, index>, at::detail::Array<char*, index+8>
126
+ using ArrayTypes = std::variant<
127
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
128
+ >;
129
+ #undef DEFINE_CASE
130
+
131
+ ArrayVariant(const TensorIteratorBase& iter) {
132
+ int ntensors = iter.ntensors();
133
+ switch(ntensors) {
134
+ #define DEFINE_CASE(index) \
135
+ case index: array = at::detail::Array<char*, index>{}; break; \
136
+ case index+8: array = at::detail::Array<char*, index+8>{}; break;
137
+
138
+ AT_FOR_8_CASES(DEFINE_CASE)
139
+ #undef DEFINE_CASE
140
+
141
+ default:
142
+ TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors);
143
+ }
144
+
145
+ std::visit([&](auto& a) {
146
+ for (auto i = 0; i < ntensors; ++i) {
147
+ a[i] = (char*)iter.data_ptr(i);
148
+ }
149
+ }, array);
150
+ }
151
+
152
+ void* data_ptr() {
153
+ return std::visit([](auto & a){ return static_cast<void*>(&a); }, array);
154
+ }
155
+
156
+ private:
157
+ ArrayTypes array;
158
+ };
159
+
160
+ struct TrivialOffsetCalculatorVariant {
161
+ #define DEFINE_CASE(index) TrivialOffsetCalculator<index>
162
+ using TrivialOffsetCalculatorTypes = std::variant<
163
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
164
+ >;
165
+ #undef DEFINE_CASE
166
+
167
+ TrivialOffsetCalculatorVariant(int num) {
168
+ switch(num) {
169
+ #define DEFINE_CASE(index) \
170
+ case index: v = TrivialOffsetCalculator<index>(); break;
171
+
172
+ AT_FOR_8_CASES(DEFINE_CASE)
173
+ #undef DEFINE_CASE
174
+
175
+ default:
176
+ TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num);
177
+ }
178
+ }
179
+
180
+ void* data_ptr() {
181
+ return std::visit([](auto & v){ return static_cast<void*>(&v); }, v);
182
+ }
183
+
184
+ private:
185
+ TrivialOffsetCalculatorTypes v;
186
+ };
187
+
188
+ struct LoadWithCastVariant {
189
+ #define DEFINE_CASE(index) std::unique_ptr<memory::LoadWithCast<index>>
190
+ using LoadWithCastPtr = std::variant<
191
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
192
+ >;
193
+ #undef DEFINE_CASE
194
+
195
+ LoadWithCastVariant(const TensorIteratorBase& iter) {
196
+ int arity = iter.ninputs();
197
+ switch(arity) {
198
+ #define DEFINE_CASE(index) \
199
+ case index: v = std::make_unique<memory::LoadWithCast<index>>(iter); break;
200
+
201
+ AT_FOR_8_CASES(DEFINE_CASE)
202
+ #undef DEFINE_CASE
203
+
204
+ default:
205
+ TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity);
206
+ }
207
+ }
208
+
209
+ void* data_ptr() {
210
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
211
+ }
212
+
213
+ private:
214
+ LoadWithCastPtr v;
215
+ };
216
+
217
+ struct StoreWithCastVariant {
218
+ #define DEFINE_CASE(index) std::unique_ptr<memory::StoreWithCast<index>>
219
+ using StoreWithCastPtr = std::variant<
220
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
221
+ >;
222
+ #undef DEFINE_CASE
223
+
224
+ StoreWithCastVariant(const TensorIteratorBase& iter) {
225
+ int num = iter.noutputs();
226
+ switch(num) {
227
+ #define DEFINE_CASE(index) \
228
+ case index: v = std::make_unique<memory::StoreWithCast<index>>(iter); break;
229
+
230
+ AT_FOR_8_CASES(DEFINE_CASE)
231
+ #undef DEFINE_CASE
232
+
233
+ default:
234
+ TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num);
235
+ }
236
+ }
237
+
238
+ void* data_ptr() {
239
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
240
+ }
241
+
242
+ private:
243
+ StoreWithCastPtr v;
244
+ };
245
+
246
+ } // namespace at::native
247
+
248
+
249
+ #endif // AT_USE_JITERATOR()
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ TORCH_CUDA_CPP_API const std::string &get_traits_string();
9
+ TORCH_CUDA_CPP_API const std::string &get_cmath_string();
10
+ TORCH_CUDA_CPP_API const std::string &get_complex_body_string();
11
+ TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string();
12
+ TORCH_CUDA_CPP_API const std::string &get_complex_math_string();
13
+
14
+ } // namespace at::cuda
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <string>
13
+
14
+ #include <ATen/cuda/tunable/TunableOp.h>
15
+ #include <ATen/cuda/Exceptions.h>
16
+ #include <c10/util/StringUtil.h>
17
+
18
+ namespace at::cuda::tunable {
19
+
20
+ enum class BlasOp {
21
+ N = 0,
22
+ T = 1
23
+ };
24
+
25
+ inline std::string BlasOpToString(BlasOp op) {
26
+ switch (op) {
27
+ case BlasOp::N:
28
+ return "N";
29
+ case BlasOp::T:
30
+ return "T";
31
+ }
32
+ TORCH_CHECK(false, "unrecognized BlasOp");
33
+ return "N";
34
+ }
35
+
36
+ template <typename T>
37
+ struct GemmParams : OpParams {
38
+ std::string Signature() const override {
39
+ return c10::str(transa, transb, "_", m, "_", n, "_", k);
40
+ }
41
+
42
+ GemmParams* DeepCopy() const {
43
+ GemmParams* copy = new GemmParams;
44
+ *copy = *this;
45
+ c10::DeviceIndex device = 0;
46
+ AT_CUDA_CHECK(c10::cuda::GetDevice(&device));
47
+ size_t c_size = m * n * sizeof(T);
48
+ copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size));
49
+ AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync(
50
+ copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true));
51
+ return copy;
52
+ }
53
+
54
+ // only call on object returned by DeepCopy
55
+ void Delete() {
56
+ c10::cuda::CUDACachingAllocator::raw_delete(c);
57
+ }
58
+
59
+ TuningStatus NumericalCheck(GemmParams<T> *other) {
60
+ auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType<T>::value).device(at::kCUDA);
61
+ // comparison done as 1D tensor
62
+ at::Tensor ref = at::from_blob(c, {m*n}, options);
63
+ at::Tensor oth = at::from_blob(other->c, {m*n}, options);
64
+ at::Tensor ref_float = ref.to(at::kFloat);
65
+ at::Tensor oth_float = oth.to(at::kFloat);
66
+ std::vector<double> atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
67
+ std::vector<double> rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
68
+ double last_succeed_atol = 1;
69
+ double last_succeed_rtol = 1;
70
+ for (auto& atol : atols) {
71
+ for (auto& rtol : rtols) {
72
+ if (at::allclose(ref_float, oth_float, rtol, atol)) {
73
+ last_succeed_atol = atol;
74
+ last_succeed_rtol = rtol;
75
+ }
76
+ }
77
+ }
78
+ if (last_succeed_atol == 1) {
79
+ return FAIL;
80
+ }
81
+ else {
82
+ TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol);
83
+ }
84
+
85
+ return OK;
86
+ }
87
+
88
+ char transa;
89
+ char transb;
90
+ int64_t m;
91
+ int64_t n;
92
+ int64_t k;
93
+ at::opmath_type<T> alpha;
94
+ const T* a;
95
+ int64_t lda;
96
+ const T* b;
97
+ int64_t ldb;
98
+ at::opmath_type<T> beta;
99
+ T* c;
100
+ int64_t ldc;
101
+ };
102
+
103
+ template <typename T>
104
+ struct GemmStridedBatchedParams : OpParams {
105
+ std::string Signature() const override {
106
+ return c10::str(transa, transb, "_", m, "_", n, "_", k, "_B_", batch);
107
+ }
108
+
109
+ GemmStridedBatchedParams* DeepCopy() const {
110
+ GemmStridedBatchedParams* copy = new GemmStridedBatchedParams;
111
+ *copy = *this;
112
+ c10::DeviceIndex device = 0;
113
+ AT_CUDA_CHECK(c10::cuda::GetDevice(&device));
114
+ size_t c_size = batch * stride_c * sizeof(T);
115
+ copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size));
116
+ AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync(
117
+ copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true));
118
+ return copy;
119
+ }
120
+
121
+ // only call on object returned by DeepCopy
122
+ void Delete() {
123
+ c10::cuda::CUDACachingAllocator::raw_delete(c);
124
+ }
125
+
126
+ TuningStatus NumericalCheck(GemmStridedBatchedParams<T> *other) {
127
+ auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType<T>::value).device(at::kCUDA);
128
+ // comparison done as 1D tensor
129
+ at::Tensor ref = at::from_blob(c, {batch*stride_c}, options);
130
+ at::Tensor oth = at::from_blob(other->c, {batch*stride_c}, options);
131
+ at::Tensor ref_float = ref.to(at::kFloat);
132
+ at::Tensor oth_float = oth.to(at::kFloat);
133
+ std::vector<double> atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
134
+ std::vector<double> rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
135
+ double last_succeed_atol = 1;
136
+ double last_succeed_rtol = 1;
137
+ for (auto& atol : atols) {
138
+ for (auto& rtol : rtols) {
139
+ if (at::allclose(ref_float, oth_float, rtol, atol)) {
140
+ last_succeed_atol = atol;
141
+ last_succeed_rtol = rtol;
142
+ }
143
+ }
144
+ }
145
+ if (last_succeed_atol == 1) {
146
+ return FAIL;
147
+ }
148
+ else {
149
+ TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol);
150
+ }
151
+
152
+ return OK;
153
+ }
154
+
155
+ char transa;
156
+ char transb;
157
+ int64_t m;
158
+ int64_t n;
159
+ int64_t k;
160
+ at::opmath_type<T> alpha;
161
+ const T* a;
162
+ int64_t lda;
163
+ int64_t stride_a;
164
+ const T* b;
165
+ int64_t ldb;
166
+ int64_t stride_b;
167
+ at::opmath_type<T> beta;
168
+ T* c;
169
+ int64_t ldc;
170
+ int64_t stride_c;
171
+ int64_t batch;
172
+ };
173
+
174
+ } // namespace at::cuda::tunable
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ #include <ATen/cuda/CUDAContext.h>
7
+ #include <ATen/cuda/tunable/TunableOp.h>
8
+ #include <ATen/cuda/tunable/GemmCommon.h>
9
+ #include <c10/cuda/CUDACachingAllocator.h>
10
+ #include <c10/util/StringUtil.h>
11
+
12
+ #include <hipblaslt/hipblaslt.h>
13
+ #include <hipblaslt/hipblaslt-ext.hpp>
14
+
15
+ #define TORCH_HIPBLASLT_CHECK(EXPR) \
16
+ do { \
17
+ hipblasStatus_t __err = EXPR; \
18
+ TORCH_CHECK(__err == HIPBLAS_STATUS_SUCCESS, \
19
+ "hipblaslt error: ", \
20
+ hipblasStatusToString(__err), \
21
+ " when calling `" #EXPR "`"); \
22
+ } while (0)
23
+
24
+ namespace at::cuda::tunable {
25
+
26
+ #ifdef HIPBLASLT_HAS_GETINDEXFROMALGO
27
+ #define GETINDEXFROMALGO(algo) hipblaslt_ext::getIndexFromAlgo(algo)
28
+ #else
29
+ static int getIndexFromAlgo(hipblasLtMatmulAlgo_t& algo) {
30
+ int* algo_ptr = (int*)algo.data;
31
+ if(*algo_ptr < 0) {
32
+ return -1;
33
+ }
34
+ return *algo_ptr;
35
+ }
36
+ #define GETINDEXFROMALGO(algo) getIndexFromAlgo(algo)
37
+ #endif
38
+
39
+ #ifdef HIPBLASLT_CUSTOM_COMPUTE_TYPE
40
+ #define COMPUTE_TYPE_32 HIPBLASLT_COMPUTE_F32
41
+ #else
42
+ #define COMPUTE_TYPE_32 HIPBLAS_COMPUTE_32F
43
+ #endif
44
+
45
+ #ifdef HIPBLASLT_CUSTOM_DATA_TYPE
46
+
47
+ template <typename T>
48
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor();
49
+
50
+ template <>
51
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<float>() {
52
+ return HIPBLASLT_R_32F;
53
+ }
54
+
55
+ template <>
56
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<Half>() {
57
+ return HIPBLASLT_R_16F;
58
+ }
59
+
60
+ template <>
61
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<BFloat16>() {
62
+ return HIPBLASLT_R_16B;
63
+ }
64
+
65
+ template <>
66
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<double>() {
67
+ return HIPBLASLT_R_64F;
68
+ }
69
+
70
+ #define DATA_TYPE_R_32 HIPBLASLT_R_32F
71
+
72
+ #else
73
+
74
+ template <typename T>
75
+ constexpr hipblasDatatype_t HipBlasDataTypeFor();
76
+
77
+ template <>
78
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<float>() {
79
+ return HIPBLAS_R_32F;
80
+ }
81
+
82
+ template <>
83
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<Half>() {
84
+ return HIPBLAS_R_16F;
85
+ }
86
+
87
+ template <>
88
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<BFloat16>() {
89
+ return HIPBLAS_R_16B;
90
+ }
91
+
92
+ template <>
93
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<double>() {
94
+ return HIPBLAS_R_64F;
95
+ }
96
+
97
+ #ifdef HIPBLAS_V2
98
+ #define DATA_TYPE_R_32 HIP_R_32F
99
+ #else
100
+ #define DATA_TYPE_R_32 HIPBLAS_R_32F
101
+ #endif
102
+
103
+ #endif
104
+
105
+ template <typename T, typename ParamsT>
106
+ int GetBatchFromParams(const ParamsT* params) {
107
+ return 1;
108
+ }
109
+
110
+ template <typename T>
111
+ int GetBatchFromParams(const GemmStridedBatchedParams<T>* params) {
112
+ return params->batch;
113
+ }
114
+
115
+ template <typename T, typename ParamsT>
116
+ int GetStrideAFromParams(const ParamsT* params) {
117
+ return 1;
118
+ }
119
+
120
+ template <typename T>
121
+ int GetStrideAFromParams(const GemmStridedBatchedParams<T>* params) {
122
+ return params->stride_a;
123
+ }
124
+
125
+ template <typename T, typename ParamsT>
126
+ int GetStrideBFromParams(const ParamsT* params) {
127
+ return 1;
128
+ }
129
+
130
+ template <typename T>
131
+ int GetStrideBFromParams(const GemmStridedBatchedParams<T>* params) {
132
+ return params->stride_b;
133
+ }
134
+
135
+ template <typename T, typename ParamsT>
136
+ int GetStrideCFromParams(const ParamsT* params) {
137
+ return 1;
138
+ }
139
+
140
+ template <typename T>
141
+ int GetStrideCFromParams(const GemmStridedBatchedParams<T>* params) {
142
+ return params->stride_c;
143
+ }
144
+
145
+ static hipblasOperation_t _hipblasOpFromChar(char op) {
146
+ switch (op) {
147
+ case 'n':
148
+ case 'N':
149
+ return HIPBLAS_OP_N;
150
+ case 't':
151
+ case 'T':
152
+ return HIPBLAS_OP_T;
153
+ case 'c':
154
+ case 'C':
155
+ return HIPBLAS_OP_C;
156
+ }
157
+ AT_ERROR(
158
+ "_hipblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
159
+ }
160
+
161
+ static char _charFromhipblasOp(hipblasOperation_t op) {
162
+ switch (op) {
163
+ case HIPBLAS_OP_N:
164
+ return 'N';
165
+ case HIPBLAS_OP_T:
166
+ return 'T';
167
+ case HIPBLAS_OP_C:
168
+ return 'C';
169
+ }
170
+ AT_ERROR(
171
+ "_charFromhipblasOp input should be HIPBLAS_OP_N/T/C but got `", op, "`");
172
+ }
173
+
174
+ static hipblasOperation_t MapLayoutToHipBlasLt(BlasOp layout) {
175
+ if (layout == BlasOp::N) {
176
+ return HIPBLAS_OP_N;
177
+ }
178
+ return HIPBLAS_OP_T;
179
+ }
180
+
181
+ static size_t GetHipblasltWorkspaceSize() {
182
+ static const char * env = getenv("HIPBLASLT_WORKSPACE_SIZE");
183
+ // 256MB is max workspace size allowed for hipblaslt
184
+ // hipblaslt-bench uses 32MB
185
+ // recommendation from hipblaslt author was 76MB
186
+ size_t workspace_size = 2*128*1024*1024; // default 256MB
187
+ if (env) {
188
+ try {
189
+ workspace_size = std::stoi(env);
190
+ } catch(std::invalid_argument const& e) {
191
+ TORCH_WARN("invalid HIPBLASLT_WORKSPACE_SIZE,",
192
+ " using default workspace size of ", workspace_size, " bytes.");
193
+ } catch(std::out_of_range const& e) {
194
+ TORCH_WARN("HIPBLASLT_WORKSPACE_SIZE out of range,",
195
+ " using default workspace size of ", workspace_size, " bytes.");
196
+ }
197
+ }
198
+ return workspace_size;
199
+ }
200
+
201
+ template <typename T, BlasOp ALayout, BlasOp BLayout, typename ParamsT>
202
+ class HipblasltGemmOp : public Callable<ParamsT> {
203
+ public:
204
+ HipblasltGemmOp(hipblasLtMatmulAlgo_t algo) : algo_{algo} {}
205
+
206
+ TuningStatus Call(const ParamsT* params) override {
207
+ hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout);
208
+ hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout);
209
+ auto in_out_datatype = HipBlasDataTypeFor<T>();
210
+ auto opa = _hipblasOpFromChar(params->transa);
211
+ auto opb = _hipblasOpFromChar(params->transb);
212
+
213
+ TORCH_CHECK(transa_outer == opa && transb_outer == opb, "trans mismatch, shouldn't happen");
214
+
215
+ float alpha = static_cast<float>(params->alpha);
216
+ float beta = static_cast<float>(params->beta);
217
+
218
+ hipblasLtMatrixLayout_t mat_a, mat_b, mat_c;
219
+ hipblasLtMatmulDesc_t matmul;
220
+ if (opa == HIPBLAS_OP_N) {
221
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->m, params->k, params->lda));
222
+ }
223
+ else {
224
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->k, params->m, params->lda));
225
+ }
226
+ if (opb == HIPBLAS_OP_N) {
227
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->k, params->n, params->ldb));
228
+ }
229
+ else {
230
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->n, params->k, params->ldb));
231
+ }
232
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_c, in_out_datatype, params->m, params->n, params->ldc));
233
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescCreate(&matmul, COMPUTE_TYPE_32, DATA_TYPE_R_32));
234
+
235
+ int batch = GetBatchFromParams<T>(params);
236
+ if (batch > 1) {
237
+ int64_t stride_a = GetStrideAFromParams<T>(params);
238
+ int64_t stride_b = GetStrideBFromParams<T>(params);
239
+ int64_t stride_c = GetStrideCFromParams<T>(params);
240
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
241
+ mat_a, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
242
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
243
+ mat_a, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_a, sizeof(stride_a)));
244
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
245
+ mat_b, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
246
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
247
+ mat_b, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_b, sizeof(stride_b)));
248
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
249
+ mat_c, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
250
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
251
+ mat_c, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_c, sizeof(stride_c)));
252
+ }
253
+
254
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute(
255
+ matmul, HIPBLASLT_MATMUL_DESC_TRANSA, &opa, sizeof(int32_t)));
256
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute(
257
+ matmul, HIPBLASLT_MATMUL_DESC_TRANSB, &opb, sizeof(int32_t)));
258
+
259
+ size_t workspace_size = GetHipblasltWorkspaceSize();
260
+
261
+ auto op_handle = at::cuda::getCurrentCUDABlasLtHandle();
262
+
263
+ size_t ret_workspace_size = 0;
264
+ auto status = hipblaslt_ext::matmulIsAlgoSupported(op_handle,
265
+ matmul,
266
+ &alpha,
267
+ mat_a,
268
+ mat_b,
269
+ &beta,
270
+ mat_c,
271
+ mat_c,
272
+ algo_,
273
+ ret_workspace_size);
274
+
275
+ if (status == HIPBLAS_STATUS_SUCCESS) {
276
+ if (ret_workspace_size >= workspace_size) {
277
+ //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " workspace too large");
278
+ return FAIL;
279
+ }
280
+ }
281
+ else {
282
+ //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " not supported");
283
+ return FAIL;
284
+ }
285
+
286
+ void* workspace_buffer = nullptr;
287
+ if (workspace_size > 0) {
288
+ workspace_buffer = c10::cuda::CUDACachingAllocator::raw_alloc(workspace_size);
289
+ }
290
+
291
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmul(op_handle,
292
+ matmul,
293
+ &alpha,
294
+ params->a,
295
+ mat_a,
296
+ params->b,
297
+ mat_b,
298
+ &beta,
299
+ params->c,
300
+ mat_c,
301
+ params->c,
302
+ mat_c,
303
+ &algo_,
304
+ workspace_buffer,
305
+ workspace_size,
306
+ at::cuda::getCurrentCUDAStream()));
307
+
308
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescDestroy(matmul));
309
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_a));
310
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_b));
311
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_c));
312
+ if (workspace_size > 0) {
313
+ c10::cuda::CUDACachingAllocator::raw_delete(workspace_buffer);
314
+ }
315
+ return OK;
316
+ }
317
+
318
+ private:
319
+ hipblasLtMatmulAlgo_t algo_;
320
+ };
321
+
322
+ template <typename T, BlasOp ALayout, BlasOp BLayout, typename ParamsT>
323
+ auto GetHipBlasLtTypeStringAndOps() {
324
+ hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout);
325
+ hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout);
326
+ auto in_out_datatype = HipBlasDataTypeFor<T>();
327
+ std::vector<hipblasLtMatmulHeuristicResult_t> heuristic_result;
328
+
329
+ hipblasLtHandle_t handle;
330
+ TORCH_HIPBLASLT_CHECK(hipblasLtCreate(&handle));
331
+ TORCH_HIPBLASLT_CHECK(hipblaslt_ext::getAllAlgos(handle,
332
+ hipblaslt_ext::GemmType::HIPBLASLT_GEMM,
333
+ transa_outer,
334
+ transb_outer,
335
+ in_out_datatype,
336
+ in_out_datatype,
337
+ in_out_datatype,
338
+ in_out_datatype,
339
+ COMPUTE_TYPE_32,
340
+ heuristic_result));
341
+ TORCH_HIPBLASLT_CHECK(hipblasLtDestroy(handle));
342
+
343
+ // Sort heuristic_result by algo index to make sure the order of returned algos is deterministic.
344
+ std::sort(heuristic_result.begin(),
345
+ heuristic_result.end(),
346
+ [](hipblasLtMatmulHeuristicResult_t& a, hipblasLtMatmulHeuristicResult_t& b) {
347
+ return GETINDEXFROMALGO(a.algo) < GETINDEXFROMALGO(b.algo);
348
+ });
349
+
350
+ int returned_algo_count = heuristic_result.size();
351
+ std::vector<std::pair<std::string, std::unique_ptr<Callable<ParamsT>>>> ret;
352
+ for (int i = 0; i < returned_algo_count; i++) {
353
+ auto algo = heuristic_result[i].algo;
354
+ int algo_index = GETINDEXFROMALGO(algo);
355
+ auto callable = std::make_unique<HipblasltGemmOp<T, ALayout, BLayout, ParamsT>>(algo);
356
+ std::string type_string = c10::str(
357
+ "Gemm_Hipblaslt_", _charFromhipblasOp(transa_outer), _charFromhipblasOp(transb_outer), "_", algo_index);
358
+ ret.emplace_back(type_string, std::move(callable));
359
+ }
360
+
361
+ return ret;
362
+ }
363
+
364
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
365
+ auto GetHipBlasLtGemmTypeStringAndOps() {
366
+ return GetHipBlasLtTypeStringAndOps<T, ALayout, BLayout, GemmParams<T>>();
367
+ }
368
+
369
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
370
+ auto GetHipBlasLtGemmStridedBatchedTypeStringAndOps() {
371
+ return GetHipBlasLtTypeStringAndOps<T, ALayout, BLayout, GemmStridedBatchedParams<T>>();
372
+ }
373
+
374
+ #undef TORCH_HIPBLASLT_CHECK
375
+ #undef GETINDEXFROMALGO
376
+ #undef COMPUTE_TYPE_32
377
+ #undef DATA_TYPE_R_32
378
+
379
+ } // namespace at::cuda::tunable