applied-ai-018 commited on
Commit
b60b1e0
·
verified ·
1 Parent(s): 1baeaa7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h +48 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h +17 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h +176 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h +99 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h +259 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h +227 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h +27 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +386 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +600 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h +773 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +314 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h +36 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h +238 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h +34 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +83 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +161 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +199 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h +9 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh +47 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh +149 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh +508 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh +537 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h +334 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h +19 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h +9 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h +86 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h +101 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h +23 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h +208 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h +138 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h +87 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh +57 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h +90 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h +318 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h +261 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh +15 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h +20 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h +37 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh +121 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h +44 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h +165 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh +121 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <ostream>
7
+
8
+ namespace at {
9
+
10
+ enum class NameType: uint8_t { BASIC, WILDCARD };
11
+
12
+ struct TORCH_API Dimname {
13
+ static Dimname fromSymbol(Symbol name);
14
+ static Dimname wildcard();
15
+ static bool isValidName(const std::string& name);
16
+
17
+ NameType type() const { return type_; }
18
+ Symbol symbol() const { return name_; }
19
+
20
+ bool isBasic() const { return type_ == NameType::BASIC; }
21
+ bool isWildcard() const { return type_ == NameType::WILDCARD; }
22
+
23
+ bool matches(Dimname other) const;
24
+ c10::optional<Dimname> unify(Dimname other) const;
25
+
26
+ private:
27
+ Dimname(Symbol name)
28
+ : name_(name), type_(NameType::BASIC) {}
29
+ Dimname(Symbol name, NameType type)
30
+ : name_(name), type_(type) {}
31
+
32
+ Symbol name_;
33
+ NameType type_;
34
+ };
35
+
36
+ using DimnameList = c10::ArrayRef<Dimname>;
37
+
38
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
39
+
40
+ inline bool operator==(const Dimname& lhs, const Dimname& rhs) {
41
+ return lhs.symbol() == rhs.symbol();
42
+ }
43
+
44
+ inline bool operator!=(const Dimname& lhs, const Dimname& rhs) {
45
+ return !(lhs == rhs);
46
+ }
47
+
48
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/library.h>
4
+ #include <ATen/core/dispatch/Dispatcher.h>
5
+ #include <c10/util/ArrayRef.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
8
+
9
+ namespace at {
10
+ namespace impl {
11
+
12
+ TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
13
+ TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
14
+ TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li);
15
+ using c10::impl::dispatch_mode_enabled;
16
+
17
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace c10 {
8
+
9
+ struct IValue;
10
+ using Stack = std::vector<IValue>;
11
+
12
+ class OperatorHandle;
13
+ class KernelFunction;
14
+
15
+ // This kernel implements the behavior of falling through to the next available
16
+ // registered dispatch key. The implementation of this function is FAST; it is
17
+ // no overhead to fallthrough to the next key. See cpp file for some more
18
+ // implementation notes; notably, this does NOT actually go through the
19
+ // boxing/unboxing codepath.
20
+ TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
21
+
22
+ // Note [Ambiguity in AutogradOther kernel]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // This error-reporting kernel is registered to the AutogradOther entry in the
25
+ // dispatch table when there is both a CompositeImplicitAutograd kernel and a
26
+ // backend kernel for ANY backend that maps to AutogradOther. To see why
27
+ // this is necessary in the AutogradOther case, it's helpful to first see
28
+ // why everything works out fine for a backend that has a reserved Autograd
29
+ // entry (see rule 2.2 in [Note] DispatchTable computation):
30
+ //
31
+ // CPU AutogradCPU
32
+ // reg? registers with...
33
+ // -------------------------------------------------
34
+ // y Autograd registration takes precedence
35
+ // over CompositeImplicitAutograd.
36
+ // This is good, because the CPU specific backend
37
+ // implementation is more specialized and typically better;
38
+ // if we used the composite, we would bypass it.
39
+ // (NB: the Autograd key is guaranteed to exist because
40
+ // the autograd codegen requires it!)
41
+ //
42
+ // n CompositeImplicitAutograd takes precedence.
43
+ // This is also good, because the Autograd
44
+ // registration (if it exists) would try to redispatch
45
+ // to the (non-existent) CPU implementation; by
46
+ // using the composite, we ensure the operator
47
+ // actually works.
48
+ //
49
+ // As you can see, when we have a specific Autograd key (AutogradCPU), we can
50
+ // decide whether or not to use the CompositeImplicitAutograd kernel or the
51
+ // Autograd kernel based on whether or not the backend kernel exists.
52
+ //
53
+ // However, for AutogradOther (which is the catchall autograd kernel for
54
+ // everything that doesn't have a specific Autograd key), we can't do this
55
+ // trick because there isn't any unique backend to peek at to disambiguate;
56
+ // if there are some backends that have implementations they prefer Autograd,
57
+ // but unimplemented backends would prefer CompositeImplicitAutograd. Rather
58
+ // than arbitrarily pick one or the other, we just register a kernel that raises
59
+ // an error and let the user decide how to proceed.
60
+ TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
61
+
62
+ // Note [named_not_supported_kernel]
63
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64
+ // This kernel implements reporting an error message saying that named tensor is
65
+ // not supported. This kernel doesn't rely on the Stack, and so it is special
66
+ // cased in the dispatcher to be triggered before we attempt boxing (so we can
67
+ // give a good error message in cases when boxing is not supported). When
68
+ // boxing is universally supported this can be removed.
69
+ [[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
70
+
71
+ /**
72
+ * BoxedKernel is similar to a std::function storing a boxed kernel.
73
+ */
74
+ class TORCH_API BoxedKernel final {
75
+ public:
76
+ // This is how boxed kernels are actually stored
77
+ //
78
+ // Note [Plumbing Keys Through The Dispatcher]
79
+ // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS)
80
+ // upon every dispatch call into order to compute which kernel to dispatch to.
81
+ //
82
+ // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores
83
+ // to have a first argument of type DispatchKeySet.
84
+ //
85
+ // What are the invariants of the DispatchKeySet when it gets passed to a kernel?
86
+ // - All keys to the left of the current dispatch key have been masked out.
87
+ // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer)
88
+ // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments
89
+ // are still in the set.
90
+ //
91
+ // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches:
92
+ // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will
93
+ // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher
94
+ // upon redispatching.
95
+ //
96
+ // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature
97
+ // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples.
98
+ //
99
+ // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h.
100
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
101
+ using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
102
+ // This is the public API for how boxed kernels are defined
103
+ using BoxedKernelFunction = void(const OperatorHandle&, Stack*);
104
+ using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*);
105
+
106
+ BoxedKernel();
107
+
108
+ // Fast path for dispatch to allow not touching the boxed kernel in
109
+ // the common case where unboxed is available.
110
+ bool isValid() const;
111
+ bool isFallthrough() const;
112
+
113
+ /**
114
+ * Call the function with boxed arguments.
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Create a KernelFunction from a boxed function.
120
+ *
121
+ * Example:
122
+ *
123
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
124
+ * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>();
125
+ */
126
+ template<BoxedKernelFunction* func>
127
+ static BoxedKernel makeFromFunction();
128
+
129
+ /**
130
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
131
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
132
+ */
133
+ template<BoxedKernelFunction_withDispatchKeys* func>
134
+ static BoxedKernel makeFromFunction();
135
+
136
+ /**
137
+ * Create a KernelFunction from a boxed functor.
138
+ *
139
+ * Example:
140
+ *
141
+ * > class MyFunctor final : public c10::OperatorKernel {
142
+ * > public:
143
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
144
+ * > };
145
+ * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>());
146
+ */
147
+ template<class KernelFunctor>
148
+ static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
149
+
150
+
151
+ static BoxedKernel makeFallthrough();
152
+ static BoxedKernel makeAmbiguousAutogradOther();
153
+ static BoxedKernel makeNamedNotSupported();
154
+
155
+ private:
156
+
157
+ friend class KernelFunction;
158
+
159
+ template<BoxedKernelFunction* func>
160
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
161
+
162
+ template<BoxedKernelFunction_withDispatchKeys* func>
163
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
164
+
165
+ explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func);
166
+
167
+ OperatorKernel* getFunctor() const;
168
+ InternalBoxedKernelFunction* getFnPtr() const;
169
+
170
+ c10::intrusive_ptr<OperatorKernel> functor_;
171
+ InternalBoxedKernelFunction* boxed_kernel_func_;
172
+ };
173
+
174
+ } // namespace c10
175
+
176
+ #include <ATen/core/boxing/BoxedKernel_impl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ inline BoxedKernel::BoxedKernel()
6
+ : functor_()
7
+ , boxed_kernel_func_(nullptr)
8
+ {}
9
+
10
+ inline BoxedKernel::BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func)
11
+ : functor_(std::move(functor))
12
+ , boxed_kernel_func_(boxed_kernel_func)
13
+ {}
14
+
15
+ template<BoxedKernel::BoxedKernelFunction* func>
16
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) {
17
+ // Note that we're dropping the DispatchKeySet argument.
18
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
19
+ func(opHandle, stack);
20
+ }
21
+
22
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
23
+ inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) {
24
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
25
+ func(opHandle, ks, stack);
26
+ }
27
+
28
+ inline bool BoxedKernel::isValid() const {
29
+ return boxed_kernel_func_ != nullptr;
30
+ }
31
+
32
+ inline bool BoxedKernel::isFallthrough() const {
33
+ return boxed_kernel_func_ == &fallthrough_kernel;
34
+ }
35
+
36
+ inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
37
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
38
+ boxed_kernel_func_ != nullptr,
39
+ "Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel."
40
+ );
41
+ (*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack);
42
+ }
43
+
44
+ template<BoxedKernel::BoxedKernelFunction* func>
45
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
46
+ return BoxedKernel(
47
+ nullptr, // no functor_ object
48
+ &make_boxed_function<func>
49
+ );
50
+ }
51
+
52
+ template<BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
53
+ inline BoxedKernel BoxedKernel::makeFromFunction() {
54
+ return BoxedKernel(
55
+ nullptr, // no functor_ object
56
+ &make_boxed_function<func>
57
+ );
58
+ }
59
+
60
+ inline BoxedKernel BoxedKernel::makeFallthrough() {
61
+ return BoxedKernel(
62
+ nullptr, // no functor_ object
63
+ &fallthrough_kernel
64
+ );
65
+ }
66
+
67
+ inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() {
68
+ return BoxedKernel(
69
+ nullptr, // no functor_ object
70
+ &ambiguous_autogradother_kernel
71
+ );
72
+ }
73
+
74
+ inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
75
+ return BoxedKernel(
76
+ nullptr, // no functor_ object
77
+ &named_not_supported_kernel
78
+ );
79
+ }
80
+
81
+ template<class KernelFunctor>
82
+ inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
83
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
84
+ return BoxedKernel(
85
+ std::move(kernelFunctor),
86
+ [](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {
87
+ (*static_cast<KernelFunctor*>(kernel))(op, ks, stack);
88
+ }
89
+ );
90
+ }
91
+
92
+ inline OperatorKernel* BoxedKernel::getFunctor() const {
93
+ return functor_.get();
94
+ }
95
+ inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const {
96
+ return boxed_kernel_func_;
97
+ }
98
+
99
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/core/boxing/BoxedKernel.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/core/DispatchKeySet.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <c10/util/TypeList.h>
9
+
10
+ namespace c10 {
11
+
12
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
13
+
14
+ class OperatorHandle;
15
+ struct OperatorKernel;
16
+ class KernelFunction;
17
+
18
+ template <typename T>
19
+ using has_symint =
20
+ guts::disjunction<
21
+ std::is_same<c10::SymInt, T>,
22
+ std::is_same<c10::SymIntArrayRef, T>,
23
+ std::is_same<at::OptionalSymIntArrayRef, T>,
24
+ std::is_same<c10::optional<c10::SymInt>, T>
25
+ >;
26
+
27
+ template <typename T>
28
+ struct remove_symint {
29
+ using type = T;
30
+ };
31
+
32
+ template <>
33
+ struct remove_symint<c10::SymInt> {
34
+ using type = int64_t;
35
+ };
36
+
37
+ template <>
38
+ struct remove_symint<at::OptionalSymIntArrayRef> {
39
+ using type = OptionalIntArrayRef;
40
+ };
41
+
42
+ template <>
43
+ struct remove_symint<c10::SymIntArrayRef> {
44
+ using type = c10::IntArrayRef;
45
+ };
46
+
47
+ template <>
48
+ struct remove_symint<c10::optional<c10::SymInt>> {
49
+ using type = c10::optional<int64_t>;
50
+ };
51
+
52
+
53
+ template <bool symint, typename T>
54
+ struct maybe_keep_symint final {};
55
+
56
+ template <typename T>
57
+ struct maybe_keep_symint<true, T> { using type = T; };
58
+
59
+ template <typename T>
60
+ struct maybe_keep_symint<false, T> { using type = typename remove_symint<T>::type; };
61
+
62
+ template <typename T>
63
+ using fn_has_symint = typename guts::typelist::true_for_any_type<
64
+ has_symint,
65
+ typename guts::infer_function_traits<T>::type::parameter_types
66
+ >;
67
+
68
+ template <typename T>
69
+ struct fn_remove_symint;
70
+
71
+ template <typename Ret, typename... Args>
72
+ struct fn_remove_symint<Ret(Args...)> {
73
+ using type = Ret(typename remove_symint<Args>::type...);
74
+ };
75
+
76
+ /**
77
+ * KernelFunction is similar to std::function but stores a kernel function.
78
+ * You can create a KernelFunction from a boxed or unboxed function/functor/lambda
79
+ * and call it in a boxed or unboxed way. If the way it was created doesn't
80
+ * match the way it was called, it will do boxing or unboxing as necessary.
81
+ */
82
+ class TORCH_API KernelFunction final {
83
+ public:
84
+ using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction;
85
+ using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction;
86
+ using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys;
87
+
88
+ KernelFunction();
89
+
90
+ // Fast path for dispatch to allow not touching the boxed kernel in
91
+ // the common case where unboxed is available.
92
+ bool isValidUnboxed() const;
93
+ bool isValidSymUnboxed() const;
94
+ bool isValid() const;
95
+ bool isFallthrough() const;
96
+
97
+ /**
98
+ * Call the function in a boxed way.
99
+ * If the kernel function was created with an unboxed function,
100
+ * this will call an unboxing wrapper which then calls into that
101
+ * unboxed function.
102
+ *
103
+ * Example:
104
+ *
105
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
106
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
107
+ * > Tensor result = func.callBoxed(stack);
108
+ *
109
+ * Or, with an unboxed implementation:
110
+ *
111
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
112
+ * > [] (Tensor a, bool b) -> Tensor {...});
113
+ * > Tensor result = func.callBoxed(stack);
114
+ */
115
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
116
+
117
+ /**
118
+ * Call the function in an unboxed way.
119
+ * If the kernel function was created with a boxed function,
120
+ * this will box all inputs and then call into that boxed function.
121
+ *
122
+ * Note that this doesn't work for all types yet.
123
+ *
124
+ * Example:
125
+ *
126
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
127
+ * > [] (Tensor a, bool b) -> Tensor {...});
128
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
129
+ *
130
+ * Or, with a boxed implementation:
131
+ *
132
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
133
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
134
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
135
+ */
136
+ template<class Return, class... Args>
137
+ Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const;
138
+
139
+ /**
140
+ * Create a KernelFunction from a BoxedKernel.
141
+ */
142
+ static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn);
143
+
144
+ /**
145
+ * Create a KernelFunction from a boxed function.
146
+ *
147
+ * Example:
148
+ *
149
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
150
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>();
151
+ */
152
+ template<BoxedKernelFunction* func>
153
+ static KernelFunction makeFromBoxedFunction();
154
+
155
+ /**
156
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
157
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
158
+ */
159
+ template<BoxedKernelFunction_withDispatchKeys* func>
160
+ static KernelFunction makeFromBoxedFunction();
161
+
162
+ /**
163
+ * Create a KernelFunction from an unboxed functor.
164
+ *
165
+ * Example:
166
+ *
167
+ * > class MyFunctor final : public c10::OperatorKernel {
168
+ * > public:
169
+ * > Tensor operator()(Tensor a, Tensor b) {...}
170
+ * > };
171
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor<MyFunctor>(std::make_unique<MyFunctor>());
172
+ */
173
+ template<bool AllowLegacyTypes = false, class KernelFunctor>
174
+ static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
175
+
176
+ /**
177
+ * Create a KernelFunction from a boxed functor.
178
+ *
179
+ * Example:
180
+ *
181
+ * > class MyFunctor final : public c10::OperatorKernel {
182
+ * > public:
183
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
184
+ * > };
185
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique<MyFunctor>());
186
+ */
187
+ template<class KernelFunctor>
188
+ static KernelFunction makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
189
+
190
+ /**
191
+ * Create a KernelFunction from an unboxed function.
192
+ * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction
193
+ * because knowing the function pointer as a template argument (i.e. at
194
+ * compile time) allows the compiler to inline the function into its
195
+ * unboxing wrapper and yields better performance when calling the function.
196
+ *
197
+ * Example:
198
+ *
199
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
200
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunction<decltype(unboxed_func), &unboxed_func>();
201
+ */
202
+ template<class FuncPtr, bool AllowLegacyTypes = false>
203
+ static KernelFunction makeFromUnboxedFunction(FuncPtr);
204
+
205
+ /**
206
+ * Create a KernelFunction from an unboxed function.
207
+ * KernelFunction::makeFromUnboxedFunction is usually a better choice than
208
+ * this if you know the function pointer at compile time, see doc comment
209
+ * there for an explanation.
210
+ *
211
+ * Example:
212
+ *
213
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
214
+ * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func);
215
+ */
216
+ template<bool AllowLegacyTypes = false, class FuncType>
217
+ static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func);
218
+
219
+ static KernelFunction makeFallthrough();
220
+ static KernelFunction makeAmbiguousAutogradOther();
221
+ static KernelFunction makeNamedNotSupported();
222
+
223
+ /**
224
+ * Create a KernelFunction from an unboxed lambda.
225
+ *
226
+ * Example:
227
+ *
228
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
229
+ * > [] (Tensor a, bool b) -> Tensor {...});
230
+ */
231
+ template<bool AllowLegacyTypes = false, class Lambda>
232
+ static std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
233
+ template<bool AllowLegacyTypes = false, class Lambda>
234
+ static std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
235
+
236
+ std::string dumpState() const;
237
+ // For testing internal invariants only
238
+ bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
239
+
240
+ private:
241
+
242
+ explicit KernelFunction(
243
+ std::unique_ptr<OperatorKernel> functor,
244
+ InternalBoxedKernelFunction* boxed_kernel_func,
245
+ void* unboxed_kernel_func,
246
+ void* sym_unboxed_kernel_func);
247
+ explicit KernelFunction(
248
+ BoxedKernel boxed_fn,
249
+ void* unboxed_kernel_func,
250
+ void* sym_unboxed_kernel_func);
251
+
252
+ BoxedKernel boxed_kernel_func_;
253
+ void* unboxed_kernel_func_;
254
+ void* sym_unboxed_kernel_func_;
255
+ };
256
+
257
+ }
258
+
259
+ #include <ATen/core/boxing/KernelFunction_impl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/boxing/impl/boxing.h>
2
+ #include <ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h>
3
+ #include <ATen/core/boxing/impl/WrapFunctionIntoFunctor.h>
4
+ #include <ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h>
5
+
6
+ namespace c10 {
7
+
8
+ inline KernelFunction::KernelFunction()
9
+ : boxed_kernel_func_()
10
+ , unboxed_kernel_func_(nullptr)
11
+ , sym_unboxed_kernel_func_(nullptr)
12
+ {}
13
+
14
+ inline KernelFunction::KernelFunction(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
15
+ : boxed_kernel_func_(std::move(functor), boxed_kernel_func)
16
+ , unboxed_kernel_func_(unboxed_kernel_func)
17
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
18
+ {}
19
+
20
+ inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
21
+ : boxed_kernel_func_(std::move(boxed_fn))
22
+ , unboxed_kernel_func_(unboxed_kernel_func)
23
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
24
+ {}
25
+
26
+ inline bool KernelFunction::isValidUnboxed() const {
27
+ return unboxed_kernel_func_ != nullptr;
28
+ }
29
+
30
+ inline bool KernelFunction::isValidSymUnboxed() const {
31
+ return sym_unboxed_kernel_func_ != nullptr;
32
+ }
33
+
34
+ inline bool KernelFunction::isValid() const {
35
+ return boxed_kernel_func_.isValid();
36
+ }
37
+
38
+ inline bool KernelFunction::isFallthrough() const {
39
+ return boxed_kernel_func_.isFallthrough();
40
+ }
41
+
42
+ inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
43
+ boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack);
44
+ }
45
+
46
+ template<class Return, class... Args>
47
+ inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) {
48
+ using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...);
49
+ ActualSignature* func = reinterpret_cast<ActualSignature*>(unboxed_kernel_func);
50
+ return (*func)(functor, dispatchKeySet, std::forward<Args>(args)...);
51
+ }
52
+
53
+ // This template requires you to explicitly specify the argument you want to
54
+ // forward; it doesn't work if you try to deduce it
55
+ // NB: keep this in sync with cloneWithRealTypes in function_schema.cpp
56
+
57
+ template <typename T>
58
+ inline typename remove_symint<T>::type unpackSymInt(T x) { return x; }
59
+
60
+ template <>
61
+ inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
62
+ return x.guard_int(__FILE__, __LINE__);
63
+ }
64
+
65
+ template <>
66
+ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIntArrayRef x) {
67
+ return C10_AS_INTARRAYREF_SLOW(x);
68
+ }
69
+
70
+ template <>
71
+ inline typename remove_symint<c10::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
72
+ return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
73
+ }
74
+
75
+ template <>
76
+ inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(at::OptionalSymIntArrayRef x) {
77
+ return x.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : c10::nullopt;
78
+ }
79
+
80
+ template<class Return, class... Args>
81
+ C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const {
82
+ // note: Args above is intentionally not Args&&. We don't want perfect
83
+ // forwarding, which would require Args to be deduced, but instead we
84
+ // want callers to explicitly specify the Args.
85
+
86
+ // This should get inlined by compiler
87
+ if (guts::disjunction<has_symint<Args>...>::value) {
88
+ if (sym_unboxed_kernel_func_ != nullptr) {
89
+ auto *functor = boxed_kernel_func_.getFunctor();
90
+ return callUnboxedKernelFunction<Return, Args...>(
91
+ sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
92
+ }
93
+
94
+ if (unboxed_kernel_func_ != nullptr) {
95
+ auto *functor = boxed_kernel_func_.getFunctor();
96
+ return callUnboxedKernelFunction<Return, typename remove_symint<Args>::type...>(
97
+ unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt<Args>(args)...);
98
+ }
99
+ } else {
100
+ if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) {
101
+ auto *functor = boxed_kernel_func_.getFunctor();
102
+ return callUnboxedKernelFunction<Return, Args...>(
103
+ unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
104
+ }
105
+ }
106
+
107
+ return impl::BoxedKernelWrapper<Return(Args...)>::call(
108
+ boxed_kernel_func_,
109
+ opHandle,
110
+ dispatchKeySet,
111
+ std::forward<Args>(args)...
112
+ );
113
+ }
114
+
115
+ inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) {
116
+ return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer
117
+ }
118
+
119
+ template<KernelFunction::BoxedKernelFunction* func>
120
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
121
+ return KernelFunction::makeFromBoxedKernel(
122
+ BoxedKernel::makeFromFunction<func>());
123
+ }
124
+
125
+ template<KernelFunction::BoxedKernelFunction_withDispatchKeys* func>
126
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
127
+ return KernelFunction::makeFromBoxedKernel(
128
+ BoxedKernel::makeFromFunction<func>());
129
+ }
130
+
131
+ inline KernelFunction KernelFunction::makeFallthrough() {
132
+ return KernelFunction::makeFromBoxedKernel(
133
+ BoxedKernel::makeFallthrough());
134
+ }
135
+
136
+ inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() {
137
+ return KernelFunction::makeFromBoxedKernel(
138
+ BoxedKernel::makeAmbiguousAutogradOther());
139
+ }
140
+
141
+ inline KernelFunction KernelFunction::makeNamedNotSupported() {
142
+ return KernelFunction::makeFromBoxedKernel(
143
+ BoxedKernel::makeNamedNotSupported());
144
+ }
145
+
146
+ template<bool AllowLegacyTypes, class KernelFunctor>
147
+ inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor) {
148
+ #ifndef NDEBUG
149
+ // This assertion is costly for build time so it's debug-gated.
150
+ static_assert(guts::is_functor<KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor> but the argument is not a functor.");
151
+ #endif
152
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
153
+
154
+ auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed<KernelFunctor>::call;
155
+ void* void_unboxed_fn = reinterpret_cast<void*>(unboxed_fn);
156
+ bool is_symint = fn_has_symint<decltype(unboxed_fn)>::value;
157
+ return KernelFunction(
158
+ std::move(kernelFunctor),
159
+ &impl::make_boxed_from_unboxed_functor<KernelFunctor, AllowLegacyTypes>::call,
160
+ is_symint ? nullptr : void_unboxed_fn,
161
+ is_symint ? void_unboxed_fn : nullptr
162
+ );
163
+ }
164
+
165
+ template<class KernelFunctor>
166
+ inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
167
+ return KernelFunction::makeFromBoxedKernel(
168
+ BoxedKernel::makeFromFunctor(std::move(kernelFunctor)));
169
+ }
170
+
171
+ template<class FuncPtr, bool AllowLegacyTypes>
172
+ inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) {
173
+ static_assert(is_compile_time_function_pointer<FuncPtr>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN.");
174
+ static_assert(!std::is_same<typename FuncPtr::FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
175
+ static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr");
176
+
177
+ #if !defined(C10_MOBILE)
178
+ (void)func_ptr; // Suppress unused variable warning
179
+ return makeFromUnboxedFunctor<AllowLegacyTypes, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>(
180
+ guts::make_unique_base<OperatorKernel, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>()
181
+ );
182
+ #else
183
+ // On mobile, we rather want to optimize for binary size than for performance,
184
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
185
+ // instead.
186
+ return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr());
187
+ #endif
188
+ }
189
+
190
+ template<bool AllowLegacyTypes, class FuncType>
191
+ inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) {
192
+ static_assert(guts::is_function_type<FuncType>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type.");
193
+ static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
194
+ TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
195
+
196
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(
197
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(func)
198
+ );
199
+ }
200
+
201
+ template<bool AllowLegacyTypes, class Lambda>
202
+ inline std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
203
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
204
+
205
+ #if !defined(C10_MOBILE)
206
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
207
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
208
+ );
209
+ #else
210
+ // On mobile, we rather want to optimize for binary size than for performance,
211
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
212
+ // instead.
213
+ using FuncType = typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type;
214
+ return makeFromUnboxedRuntimeFunction<AllowLegacyTypes, FuncType>(lambda);
215
+ #endif
216
+ }
217
+
218
+ template<bool AllowLegacyTypes, class Lambda>
219
+ inline std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
220
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
221
+
222
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
223
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
224
+ );
225
+ }
226
+
227
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/intrusive_ptr.h>
3
+
4
+ namespace c10 {
5
+
6
+ /**
7
+ * Inherit from OperatorKernel to implement a c10 kernel.
8
+ *
9
+ * Example:
10
+ * > namespace {
11
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
12
+ * > public:
13
+ * > Tensor operator()(Tensor a, Tensor b) {...}
14
+ * > };
15
+ * > }
16
+ *
17
+ * The kernel class is allowed to have members but these are equivalent
18
+ * to global variables. The kernel implementation is responsible for
19
+ * preventing race conditions on them.
20
+ *
21
+ * See below for how to register this kernel with PyTorch.
22
+ */
23
+ struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target {
24
+ ~OperatorKernel() override = default;
25
+ };
26
+
27
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/CompileTimeFunctionPointer.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+ namespace detail {
8
+ template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
9
+ template<class FuncPtr, class ReturnType, class... Parameters>
10
+ class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
11
+ public:
12
+ C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
13
+ return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
14
+ }
15
+ };
16
+ }
17
+
18
+ // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
19
+ // Since it is a compile time function pointer, many compilers can inline it
20
+ // into the wrapper and you don't get any performance overhead for wrapping.
21
+ template<class FuncPtr>
22
+ struct WrapFunctionIntoFunctor final {
23
+ static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
24
+ using type = detail::WrapFunctionIntoFunctor_<
25
+ FuncPtr,
26
+ typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
27
+ typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
28
+ >;
29
+ };
30
+ }
31
+
32
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+
5
+ namespace c10 {
6
+
7
+ namespace impl {
8
+ namespace detail {
9
+ template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
10
+ template<class FuncType, class ReturnType, class... Parameters>
11
+ class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
12
+ public:
13
+ template<class FuncType_>
14
+ explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
15
+ : kernel_func_(std::forward<FuncType_>(kernel_func)) {}
16
+
17
+ decltype(auto) operator()(Parameters... args) {
18
+ return kernel_func_(std::forward<Parameters>(args)...);
19
+ }
20
+
21
+ private:
22
+ FuncType kernel_func_;
23
+ };
24
+ }
25
+
26
+ // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
27
+ // inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
28
+ // This can, for example, be used for lambdas, functors or even function pointers.
29
+ // In the case of function pointers, since it is a runtime function pointer,
30
+ // there is an overhead for calling it whenever the kernel is invoked.
31
+ template<class FuncType>
32
+ using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
33
+ FuncType,
34
+ typename guts::infer_function_traits_t<FuncType>::return_type,
35
+ typename guts::infer_function_traits_t<FuncType>::parameter_types
36
+ >;
37
+ }
38
+
39
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains boxing (not unboxing) logic,
4
+ // i.e. how to make a vector<IValue> from a set of concrete arguments.
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <ATen/core/stack.h>
8
+ #include <c10/core/TensorOptions.h>
9
+
10
+ #include <ATen/core/boxing/BoxedKernel.h>
11
+
12
+ #include <c10/util/Metaprogramming.h>
13
+
14
+ namespace c10 {
15
+ namespace impl {
16
+
17
+ //
18
+ // utils
19
+ //
20
+
21
+ // is_mutable_tensor_ref
22
+ template <class T> struct is_mutable_tensor_ref : std::false_type {};
23
+ template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
24
+
25
+ // is_tuple_of_mutable_tensor_refs
26
+ //
27
+ template <class T, class Enable = void>
28
+ struct is_tuple_of_mutable_tensor_refs : std::false_type {};
29
+
30
+ template <class T>
31
+ struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
32
+ : guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
33
+ {};
34
+
35
+ // has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
36
+ //
37
+ template <class T, class Enable = void>
38
+ struct has_ivalue_to : std::false_type {};
39
+
40
+ template <class T>
41
+ struct has_ivalue_to<T, guts::void_t<decltype(std::declval<IValue>().to<T>())>>
42
+ : std::true_type
43
+ {};
44
+
45
+ //
46
+ // boxing predicates
47
+ //
48
+
49
+ // A boxable arg type is one that IValue has a constructor for.
50
+ template <typename T>
51
+ using can_box =
52
+ guts::disjunction<
53
+ std::is_constructible<IValue, std::decay_t<T>>,
54
+ // TensorOptions are not directly constructible into IValue,
55
+ // but torch::jit::push knows how to handle them
56
+ std::is_same<TensorOptions, std::decay_t<T>>
57
+ >;
58
+
59
+ template <typename... Ts>
60
+ using can_box_all = guts::conjunction<can_box<Ts>...>;
61
+
62
+ // an unboxable result is one that can be extracted from an IValue
63
+ template <typename T>
64
+ using can_unbox =
65
+ guts::conjunction<
66
+ guts::disjunction<
67
+ has_ivalue_to<T>,
68
+ // void returns are ok
69
+ std::is_same<void, T>
70
+ >,
71
+ guts::negation<std::is_lvalue_reference<T>>
72
+ >;
73
+
74
+ //
75
+ // boxArgs - utility for pushing unboxed args onto IValue stack
76
+ //
77
+ template <class... Args>
78
+ torch::jit::Stack boxArgs(Args... args) {
79
+ // TODO Reuse stack vector instead of allocating?
80
+ torch::jit::Stack stack;
81
+ stack.reserve(sizeof...(Args));
82
+ torch::jit::push(stack, std::forward<Args>(args)...);
83
+ return stack;
84
+ }
85
+
86
+ template <class T>
87
+ static inline constexpr size_t boxed_size_one() {
88
+ static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
89
+ return 1;
90
+ }
91
+
92
+ // torch::jit::push pushes 4 values for a TensorOptions; this needs to
93
+ // be kept in sync.
94
+ template <>
95
+ inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
96
+ return 4;
97
+ }
98
+
99
+ // NOTE: this could probably be simplified with C++17 fold expressions.
100
+ template <typename...>
101
+ struct BoxedSize : std::integral_constant<size_t, 0> {};
102
+ template <class T, class... Args>
103
+ struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
104
+
105
+ template <class... Args>
106
+ static inline constexpr size_t boxed_size() {
107
+ return BoxedSize<Args...>::value;
108
+ }
109
+
110
+ using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
111
+
112
+ template <typename T>
113
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
114
+ new (&dest[lastIdx]) IValue(arg);
115
+ lastIdx++;
116
+ }
117
+
118
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
119
+ new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
120
+ new (&dest[lastIdx++]) IValue(options.layout());
121
+ new (&dest[lastIdx++]) IValue(options.device());
122
+ new (&dest[lastIdx++]) IValue(options.pinned_memory());
123
+ }
124
+
125
+ inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
126
+
127
+ template<typename T, typename... Args>
128
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
129
+ boxToStack(dest, arg, lastIdx);
130
+ boxArgsToStack(dest, lastIdx, args...);
131
+ }
132
+
133
+ //
134
+ // PopResult is a helper class whose specializations handle popping single and
135
+ // multiple return values, respectively.
136
+ //
137
+ template <class Result>
138
+ struct PopResult final {
139
+ static Result call(Stack& stack) {
140
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
141
+ stack.size() == 1,
142
+ "Boxed kernel was expected to return one value on the stack, ",
143
+ "but instead pushed ", stack.size(), " values."
144
+ );
145
+ return std::move(stack[0]).to<Result>();
146
+ }
147
+ };
148
+
149
+ template <class... Types>
150
+ struct PopResult<std::tuple<Types...>> final {
151
+ using Result = std::tuple<Types...>;
152
+
153
+ static Result call(Stack& stack) {
154
+ // for tuple return types, boxed kernel has pushed multiple values onto the stack
155
+ constexpr int RetCount = sizeof...(Types);
156
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
157
+ stack.size() == RetCount,
158
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
159
+ "but instead pushed ", stack.size(), " values."
160
+ );
161
+ return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
162
+ }
163
+ private:
164
+ // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
165
+ // I'm sure there's an incantation that slips it past the parser but eh
166
+ template <size_t... indices>
167
+ static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
168
+ return std::make_tuple((std::move(stack[indices]).to<Types>())...);
169
+ }
170
+ };
171
+
172
+ //
173
+ // BoxedKernelWrapper
174
+ //
175
+ // For a given function type FT, BoxedKernelWrapper<FT> implements
176
+ // a `call` method that
177
+ // - takes a boxed kernel and unboxed arguments as specified by FT,
178
+ // - calls `boxArgs` to box the arguments
179
+ // - calls the boxed kernel
180
+ // - unboxes and returns the result
181
+ //
182
+ // The partial specializations below handle various cases: in
183
+ // particular, not all types appearing in op signatures are supported,
184
+ // and ops returning references have nonstandard wrapper implementations.
185
+ //
186
+
187
+ // 1. The base specialization of BoxedKernelWrapper should never be instantiated.
188
+ // A "no call method defined on BoxedKernelWrapper" compile error means that
189
+ // an op signature has failed to trigger any of the partial specializations
190
+ // that follow this one.
191
+ //
192
+ template <class FuncType, class Enable = void>
193
+ struct BoxedKernelWrapper {
194
+ // The reason we're not just doing straight up static_assert(false, ...) here:
195
+ // Basically, the way to make sure a static_assert only fires if a template
196
+ // is actually instantiated (rather than every time the file is parsed) is to use
197
+ // template parameters in the expression, e.g. FuncType here. However, since
198
+ // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
199
+ // effect.
200
+ static_assert(sizeof(FuncType) != sizeof(FuncType),
201
+ "Function signature contains one or more unsupported parameter and/or return types. "
202
+ "Look for a nearby error like "
203
+ "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
204
+ "- (your function type) is the unsupported signature.");
205
+ };
206
+
207
+ //
208
+ // 2. Supported signatures, other than those involving non-const Tensor refs -
209
+ // i.e., "functional" ops.
210
+ //
211
+
212
+ template <class Result, class... Args>
213
+ struct BoxedKernelWrapper<
214
+ Result(Args...),
215
+ std::enable_if_t<
216
+ can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
217
+ void
218
+ >
219
+ > {
220
+ static Result call(
221
+ const BoxedKernel& boxed_kernel_func,
222
+ const OperatorHandle& opHandle,
223
+ DispatchKeySet dispatchKeySet,
224
+ Args... args
225
+ ) {
226
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
227
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
228
+
229
+ if constexpr (!std::is_same_v<void, Result>) {
230
+ // op has pushed one or more values onto the stack.
231
+ return PopResult<Result>::call(stack);
232
+ } else {
233
+ // op returns void, boxed kernel has pushed nothing onto stack.
234
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
235
+ stack.empty(),
236
+ "Boxed kernel was expected to return no values on the stack, ",
237
+ "but instead returned ", stack.size(), " values."
238
+ );
239
+ }
240
+ }
241
+ };
242
+
243
+ //
244
+ // 3. in-place ops take a single non-const Tensor reference
245
+ // as their first argument, and return it.
246
+ //
247
+ // Note: all signatures matching this pattern are assumed to be for such ops.
248
+ // Because of this, the generated BoxedKernelWrapper specializations simply
249
+ // return the in-place argument.
250
+ //
251
+
252
+ template <class... OtherArgs>
253
+ struct BoxedKernelWrapper<
254
+ at::Tensor&(at::Tensor&, OtherArgs...),
255
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
256
+ > {
257
+ static at::Tensor& call(
258
+ const BoxedKernel& boxed_kernel_func,
259
+ const OperatorHandle& opHandle,
260
+ DispatchKeySet dispatchKeySet,
261
+ at::Tensor& outArg, OtherArgs... otherArgs
262
+ ) {
263
+ torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
264
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
265
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
266
+ stack.size() == 1,
267
+ "Boxed kernel was expected to return a single value on the stack, ",
268
+ "but instead returned ", stack.size(), " values."
269
+ );
270
+
271
+ return outArg;
272
+ }
273
+ };
274
+
275
+ //
276
+ // 3.5. In-process migration to make in-place ops take and return
277
+ // const references instead.
278
+ template <class... OtherArgs>
279
+ struct BoxedKernelWrapper<
280
+ const at::Tensor&(const at::Tensor&, OtherArgs...),
281
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
282
+ > {
283
+ static const at::Tensor& call(
284
+ const BoxedKernel& boxed_kernel_func,
285
+ const OperatorHandle& opHandle,
286
+ DispatchKeySet dispatchKeySet,
287
+ const at::Tensor& outArg, OtherArgs... otherArgs
288
+ ) {
289
+ torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
290
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
291
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
292
+ stack.size() == 1,
293
+ "Boxed kernel was expected to return a single value on the stack, ",
294
+ "but instead returned ", stack.size(), " values."
295
+ );
296
+
297
+ return outArg;
298
+ }
299
+ };
300
+
301
+ //
302
+ // 4. out of place ops that take a single non-const Tensor reference as their
303
+ // final argument, and also return it.
304
+ //
305
+ // Note: all signatures matching this pattern are assumed to be for such ops.
306
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
307
+ // return out arguments.
308
+ //
309
+ template <class FirstArg, class... RestArgs>
310
+ struct BoxedKernelWrapper<
311
+ at::Tensor&(FirstArg, RestArgs...),
312
+ std::enable_if_t<
313
+ can_box_all<FirstArg, RestArgs...>::value
314
+ // this skips over in-place kernels with a non-const Tensor
315
+ // arg at the front, so those can unambiguously trigger the preceding specialization.
316
+ && !is_mutable_tensor_ref<FirstArg>::value,
317
+ void
318
+ >
319
+ > {
320
+ static at::Tensor& call(
321
+ const BoxedKernel& boxed_kernel_func,
322
+ const OperatorHandle& opHandle,
323
+ DispatchKeySet dispatchKeySet,
324
+ FirstArg firstArg, RestArgs... restArgs
325
+ ) {
326
+ torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
327
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
328
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
329
+ stack.size() == 1,
330
+ "Boxed kernel was expected to return a single value on the stack, ",
331
+ "but instead returned ", stack.size(), " values."
332
+ );
333
+
334
+ // reusing restArgs after it has been forwarded here is ok because we know
335
+ // that the last element is of type `Tensor&`.
336
+ return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
337
+ }
338
+ };
339
+
340
+ //
341
+ // 5. out of place ops that take multiple non-const Tensor references as their
342
+ // final arguments, and return them in a std::tuple.
343
+ //
344
+ // Note: all signatures matching this pattern are assumed to be for such ops.
345
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
346
+ // return the out arguments.
347
+ //
348
+ template <class Result, class... Args>
349
+ struct BoxedKernelWrapper<
350
+ Result(Args...),
351
+ std::enable_if_t<
352
+ can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
353
+ void
354
+ >
355
+ > {
356
+ static Result call(
357
+ const BoxedKernel& boxed_kernel_func,
358
+ const OperatorHandle& opHandle,
359
+ DispatchKeySet dispatchKeySet,
360
+ Args... args
361
+ ) {
362
+ using ArgTuple = std::tuple<Args...>;
363
+ constexpr int RetCount = std::tuple_size<Result>();
364
+
365
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
366
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
367
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
368
+ stack.size() == RetCount,
369
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
370
+ "but instead returned ", stack.size(), " values."
371
+ );
372
+
373
+ // reusing args after it has been forwarded here is ok because we know
374
+ // that the last RetCount elements are of type `Tensor&`.
375
+ auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
376
+ static_assert(
377
+ std::is_same<Result, decltype(result)>::value,
378
+ "The parameter list of an op returning a tuple of Tensor references "
379
+ "must end with an equal number of Tensor reference parameters."
380
+ );
381
+ return result;
382
+ }
383
+ };
384
+
385
+ } // impl
386
+ } // c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/util/TypeList.h>
7
+ #include <ATen/core/IListRef.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
16
+ class OperatorHandle;
17
+
18
+ /*
19
+ * [Note: Argument forwarding in the dispatcher]
20
+ *
21
+ * The dispatcher uses a somewhat unusual way to forward arguments through several layers of
22
+ * wrapper functions. This can be confusing because an experienced C++ programmer would look at this
23
+ * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.".
24
+ * It is not a bug. The common way in C++ to forward arguments is to use universal references:
25
+ *
26
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
27
+ *
28
+ * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument.
29
+ * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a
30
+ * different reference type than was used in the kernel function. The correct reference type
31
+ * is dictated by the kernel signature and must be identical since we cast function pointers
32
+ * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines
33
+ * the reference type to use by looking at the explicitly supplied operator signature, not by looking at
34
+ * the argument we're calling it with.
35
+ *
36
+ * What does std::forward do, exactly?
37
+ * ------------------------------------
38
+ * std::forward<T>(t) is a way to cast t to the reference type supplied in T.
39
+ * Let's assume decay_t<T> == U and T is either U or some reference of U.
40
+ * - std::forward<T&>(t) will return U&, no matter what kind of reference t is.
41
+ * - std::forward<T&&>(t) will return U&&, no matter what kind of reference t is.
42
+ * - std::forward<T>(t) will return U&& (not U!), no matter what kind of reference t is.
43
+ *
44
+ * For universal references, that means that in the following function
45
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
46
+ *
47
+ * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be
48
+ * a non-reference U, and std::forward<T>(t) will return U&&, correctly moving the argument.
49
+ * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only
50
+ * way to match the signature (in C++, a type that is (T&)&& will collapse to T&).
51
+ * That means std::forward<T>(t) will return U& and the value will not be moved but passed on as
52
+ * a lvalue reference.
53
+ *
54
+ * How do we use that?
55
+ * ------------------------------------
56
+ * But std::forward can also be used outside of the common "universal forwarding" pattern to change
57
+ * reference types. So instead of following the common C++ pattern, we notice what
58
+ * std::forward<T>() actually does, and that is it takes a value and changes its reference to the
59
+ * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this
60
+ * to forward based on an explicitly specified reference type instead of the inferred argument type.
61
+ *
62
+ * This is why many of the dispatcher functions look like
63
+ * > template<class T> func(T t) { func2<T>(std::forward<T>(t)); }
64
+ * instead of the common
65
+ * > template<class T> func(T&& t) { func2(std::forward<T>(t)); }
66
+ *
67
+ * and are expected to be called by explicitly specifying the template parameters in a way that matches
68
+ * the expected operator signature at each call site.
69
+ */
70
+
71
+ namespace impl {
72
+ // supported_primitive_arg_types defines which primitive types we allow in
73
+ // kernel functions as arguments or returns.
74
+ // Additionally, we support lists, dicts and optionals containing these types.
75
+ using supported_primitive_arg_types = guts::typelist::typelist<
76
+ int64_t,
77
+ double,
78
+ bool,
79
+ c10::string_view,
80
+ at::Tensor,
81
+ at::Scalar,
82
+ c10::QScheme,
83
+ c10::ScalarType,
84
+ c10::Device,
85
+ c10::DeviceIndex,
86
+ c10::Layout,
87
+ c10::MemoryFormat,
88
+ at::Dimname
89
+ >;
90
+
91
+ // We have an unboxed functor in hand that takes C++ arguments, and
92
+ // we're building a boxed functor wrapper for it that takes IValues.
93
+ // So "outside" is boxed and "inside" is unboxed.
94
+ //
95
+ // So a valid input type is one that our boxed functor wrapper can
96
+ // unbox from an IValue into a C++ value.
97
+ //
98
+ // Whereas a valid output type is one that our wrapper can recieve
99
+ // as a C++ value from the unboxed functor, and box into an IValue.
100
+
101
+ //
102
+ // assert_is_valid_input_type
103
+ // checks that T can be unboxed from an IValue into a C++ value.
104
+ //
105
+
106
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
107
+ struct assert_is_valid_input_type {
108
+ assert_is_valid_input_type() {
109
+ if constexpr (guts::typelist::contains<supported_primitive_arg_types, T>::value) {
110
+ /* everything is ok, this is a primitive type */
111
+ } else {
112
+ /* otherwise this must be an instance of a valid custom class, since it can only
113
+ have been created via IValue(x), which ensures this. */
114
+ }
115
+ }
116
+ };
117
+
118
+ template<class T, bool AllowDeprecatedTypes>
119
+ struct assert_is_valid_input_type<c10::optional<T>, AllowDeprecatedTypes>
120
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
121
+
122
+ template <bool AllowDeprecatedTypes, class... Args>
123
+ struct TypeCheckHelper;
124
+
125
+ template <bool AllowDeprecatedTypes>
126
+ struct TypeCheckHelper<AllowDeprecatedTypes> {};
127
+
128
+ template <bool AllowDeprecatedTypes, class Head, class... Rest>
129
+ struct TypeCheckHelper<AllowDeprecatedTypes, Head, Rest...>
130
+ : TypeCheckHelper<AllowDeprecatedTypes, Rest...> {
131
+ assert_is_valid_input_type<Head, AllowDeprecatedTypes> check;
132
+ };
133
+
134
+ template<class... Contained, bool AllowDeprecatedTypes>
135
+ struct assert_is_valid_input_type<std::tuple<Contained...>, AllowDeprecatedTypes>
136
+ : TypeCheckHelper<AllowDeprecatedTypes, Contained...> {};
137
+
138
+ template<class Key, class Value, bool AllowDeprecatedTypes>
139
+ struct assert_is_valid_input_type<Dict<Key, Value>, AllowDeprecatedTypes>
140
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
141
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
142
+ "You tried to register a kernel with an unsupported input type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
143
+ };
144
+
145
+ template<class Key, class Value, bool AllowDeprecatedTypes>
146
+ struct assert_is_valid_input_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
147
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
148
+ static_assert(AllowDeprecatedTypes,
149
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
150
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
151
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
152
+ };
153
+
154
+ template<class T, bool AllowDeprecatedTypes>
155
+ struct assert_is_valid_input_type<List<T>, AllowDeprecatedTypes>
156
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
157
+ static_assert(!std::is_same<T, at::Scalar>::value,
158
+ "You tried to register a kernel with an unsupported input type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
159
+ };
160
+
161
+ template<class T, bool AllowDeprecatedTypes>
162
+ struct assert_is_valid_input_type<c10::ArrayRef<T>, AllowDeprecatedTypes>
163
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
164
+ static_assert(!std::is_same<T, at::Scalar>::value,
165
+ "You tried to register a kernel with an unsupported input type: ArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
166
+ };
167
+
168
+ template<class T, bool AllowDeprecatedTypes>
169
+ struct assert_is_valid_input_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
170
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
171
+ static_assert(!std::is_same<T, at::Scalar>::value,
172
+ "You tried to register a kernel with an unsupported input type: OptionalArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
173
+ };
174
+
175
+ template<class T, size_t N, bool AllowDeprecatedTypes>
176
+ struct assert_is_valid_input_type<std::array<T, N>, AllowDeprecatedTypes>
177
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
178
+ static_assert(!std::is_same<T, at::Scalar>::value,
179
+ "You tried to register a kernel with an unsupported input type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
180
+ };
181
+
182
+ template<class T, bool AllowDeprecatedTypes>
183
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
184
+ // There is no reason to support float when we have double. Keep the API lean.
185
+ static_assert(guts::false_t<T>::value,
186
+ "You tried to register a kernel with an unsupported input type: float. Please use double instead.");
187
+ };
188
+ template<class T, bool AllowDeprecatedTypes>
189
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
190
+ static_assert(guts::false_t<T>::value,
191
+ "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead.");
192
+ };
193
+ template<class T, bool AllowDeprecatedTypes>
194
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
195
+ static_assert(guts::false_t<T>::value,
196
+ "You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
197
+ };
198
+ template<class T, bool AllowDeprecatedTypes>
199
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
200
+ static_assert(guts::false_t<T>::value,
201
+ "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead.");
202
+ };
203
+ template<class T, bool AllowDeprecatedTypes>
204
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> {
205
+ static_assert(guts::false_t<T>::value,
206
+ "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead.");
207
+ };
208
+
209
+ // TODO: it probably would be good to tighten this up quite a bit more with
210
+ // an explicit list for everything
211
+
212
+ //
213
+ // assert_is_valid_output_type
214
+ //
215
+
216
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
217
+ struct assert_is_valid_output_type {
218
+ assert_is_valid_output_type() {
219
+ if constexpr(guts::typelist::contains<supported_primitive_arg_types, T>::value) {
220
+ /* everything is ok, this is a primitive type */
221
+ } else {
222
+ /* otherwise T is verified to be a registered custom class in the IValue
223
+ constructor, so no benefit in double-checking here */
224
+ }
225
+ }
226
+ };
227
+
228
+ template<class T, bool AllowDeprecatedTypes>
229
+ struct assert_is_valid_output_type<c10::optional<T>, AllowDeprecatedTypes>
230
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
231
+
232
+ template<class T, bool AllowDeprecatedTypes>
233
+ struct assert_is_valid_output_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
234
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
235
+
236
+ template<class Key, class Value, bool AllowDeprecatedTypes>
237
+ struct assert_is_valid_output_type<Dict<Key, Value>, AllowDeprecatedTypes>
238
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
239
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
240
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
241
+ static_assert(!std::is_same<Value, at::Scalar>::value,
242
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
243
+ };
244
+
245
+ template<class Key, class Value, bool AllowDeprecatedTypes>
246
+ struct assert_is_valid_output_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
247
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
248
+ static_assert(AllowDeprecatedTypes,
249
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
250
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
251
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
252
+ static_assert(!std::is_same<Value, at::Scalar>::value,
253
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
254
+ };
255
+
256
+ template<class T, bool AllowDeprecatedTypes>
257
+ struct assert_is_valid_output_type<List<T>, AllowDeprecatedTypes>
258
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
259
+ static_assert(!std::is_same<T, at::Scalar>::value,
260
+ "You tried to register a kernel with an unsupported output type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
261
+ };
262
+
263
+ template<class T, bool AllowDeprecatedTypes>
264
+ struct assert_is_valid_output_type<std::vector<T>, AllowDeprecatedTypes>
265
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
266
+ static_assert(!std::is_same<T, at::Scalar>::value,
267
+ "You tried to register a kernel with an unsupported output type: std::vector<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
268
+ // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector<T>. Please use List<T> instead.");
269
+ };
270
+
271
+ template<class T, size_t N, bool AllowDeprecatedTypes>
272
+ struct assert_is_valid_output_type<std::array<T, N>, AllowDeprecatedTypes>
273
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
274
+ static_assert(!std::is_same<T, at::Scalar>::value,
275
+ "You tried to register a kernel with an unsupported output type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
276
+ };
277
+
278
+ // The following specialisations of assert_is_valid_output_type are technically not
279
+ // necessary since we would hit the base case and show an error message
280
+ // there if they didn't exist, but we can show a better error message
281
+ // in some common error scenarios.
282
+ template<class T, bool AllowDeprecatedTypes>
283
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
284
+ // There is no reason to support float when we have double. Keep the API lean.
285
+ static_assert(guts::false_t<T>::value,
286
+ "You tried to register a kernel with an unsupported output type: float. Please use double instead.");
287
+ };
288
+ template<class T, bool AllowDeprecatedTypes>
289
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
290
+ static_assert(guts::false_t<T>::value,
291
+ "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead.");
292
+ };
293
+ template<class T, bool AllowDeprecatedTypes>
294
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
295
+ static_assert(guts::false_t<T>::value,
296
+ "You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
297
+ };
298
+ template<class T, bool AllowDeprecatedTypes>
299
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
300
+ static_assert(guts::false_t<T>::value,
301
+ "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead.");
302
+ };
303
+
304
+ // ivalue_to_arg
305
+
306
+ template<class T>
307
+ struct decay_if_not_tensor final {
308
+ using type = std::decay_t<T>;
309
+ };
310
+
311
+ template<>
312
+ struct decay_if_not_tensor<at::Tensor&> final {
313
+ using type = at::Tensor&;
314
+ };
315
+
316
+ template<>
317
+ struct decay_if_not_tensor<const at::Tensor&> final {
318
+ using type = const at::Tensor&;
319
+ };
320
+
321
+ template<class T, bool AllowDeprecatedTypes>
322
+ struct ivalue_to_arg final {
323
+ static decltype(auto) call(IValue& v) {
324
+ assert_is_valid_input_type<T, AllowDeprecatedTypes>();
325
+ return std::move(v).to<T>();
326
+ }
327
+ };
328
+
329
+ // The following two specializations take advantage of specialized
330
+ // `toTensor()` overloads on IValue to avoid copying.
331
+ template<bool AllowDeprecatedTypes>
332
+ struct ivalue_to_arg<at::Tensor&, AllowDeprecatedTypes> final {
333
+ // We cannot use the default implementation if they asked for a
334
+ // `at::Tensor&` because it moves from the IValue, so it can't get
335
+ // an lvalue reference.
336
+ static at::Tensor& call(IValue& v) {
337
+ // Tensor& is valid, don't bother asserting
338
+ return v.toTensor();
339
+ }
340
+ };
341
+
342
+ template<bool AllowDeprecatedTypes>
343
+ struct ivalue_to_arg<const at::Tensor&, AllowDeprecatedTypes> final {
344
+ // We should not use the default implementation if they asked for
345
+ // a `const at::Tensor&` because it moves from the IValue and they
346
+ // didn't ask for that.
347
+ static const at::Tensor& call(IValue& v) {
348
+ // const Tensor& is valid, don't bother asserting
349
+ return v.toTensor();
350
+ }
351
+ };
352
+
353
+ template<bool AllowDeprecatedTypes>
354
+ struct ivalue_to_arg<at::ITensorListRef, AllowDeprecatedTypes> final {
355
+ static List<at::Tensor> call(IValue& v) {
356
+ return v.toTensorList();
357
+ }
358
+ };
359
+
360
+ template<class T, bool AllowDeprecatedTypes>
361
+ struct ivalue_to_arg<ArrayRef<T>, AllowDeprecatedTypes> final {
362
+ // If an argument is ArrayRef<T>, convert the IValue to a std::vector<T> and pass that
363
+ // to the operator. std::vector<T> is implicitly convertible to ArrayRef<T>.
364
+ static std::vector<T> call(IValue& v) {
365
+ return ivalue_to_arg<std::vector<T>, AllowDeprecatedTypes>::call(v);
366
+ }
367
+ };
368
+ template<bool AllowDeprecatedTypes>
369
+ struct ivalue_to_arg<c10::SymIntArrayRef, AllowDeprecatedTypes> final {
370
+ static std::vector<c10::SymInt> call(IValue& v) {
371
+ if (v.isIntList()) {
372
+ std::vector<c10::SymInt> r;
373
+ auto src = v.toIntList();
374
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
375
+ return r;
376
+ } else {
377
+ return ivalue_to_arg<std::vector<c10::SymInt>, AllowDeprecatedTypes>::call(v);
378
+ }
379
+ }
380
+ };
381
+ template<bool AllowDeprecatedTypes>
382
+ struct ivalue_to_arg<c10::OptionalArray<c10::SymInt>, AllowDeprecatedTypes> final {
383
+ static OptionalArray<c10::SymInt> call(IValue& v) {
384
+ if (v.isIntList()) {
385
+ std::vector<c10::SymInt> r;
386
+ auto src = v.toIntList();
387
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
388
+ return OptionalArray<c10::SymInt>(std::move(r));
389
+ } else {
390
+ return std::move(v).to<OptionalArray<c10::SymInt>>();
391
+ }
392
+ }
393
+ };
394
+ template<class T, bool AllowDeprecatedTypes>
395
+ struct ivalue_to_arg<optional<ArrayRef<T>>, AllowDeprecatedTypes> final {
396
+ // If an argument is optional<ArrayRef<T>>, convert the IValue to an optional<std::vector<T>> and pass that
397
+ // to the operator. OptionalArray<T> is basically a optional<std::vector<T>> but implicitly convertible
398
+ // to optional<ArrayRef<T>>.
399
+ static OptionalArray<T> call(IValue& v) {
400
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
401
+ }
402
+ };
403
+
404
+ template<class T, bool AllowDeprecatedTypes>
405
+ struct ivalue_to_arg<OptionalArrayRef<T>, AllowDeprecatedTypes> final {
406
+ // If an argument is OptionalArrayRef<T>, convert the IValue to an
407
+ // optional<std::vector<T>> and pass that to the operator. OptionalArray<T>
408
+ // is basically a optional<std::vector<T>> but implicitly convertible to
409
+ // OptionalArrayRef<T>
410
+ static OptionalArray<T> call(IValue& v) {
411
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
412
+ }
413
+ };
414
+
415
+ // return_to_ivalue
416
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
417
+ struct return_to_ivalue final {};
418
+
419
+ template<class T, bool AllowDeprecatedTypes>
420
+ struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<at::Tensor&, T>::value>> final {
421
+ static IValue call(T&& v) {
422
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
423
+ return c10::ivalue::from(std::move(v));
424
+ }
425
+ static IValue copy(const T& v) {
426
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
427
+ return IValue(v);
428
+ }
429
+ };
430
+
431
+ // Special case to allow kernels to return `Tensor&`.
432
+ // TODO Delete this once kernels don't do that anymore
433
+ template<bool AllowDeprecatedTypes>
434
+ struct return_to_ivalue<at::Tensor&, AllowDeprecatedTypes, void> final {
435
+ static IValue call(at::Tensor& v) {
436
+ return c10::ivalue::from(v);
437
+ }
438
+ static IValue copy(at::Tensor& v) {
439
+ return IValue(v);
440
+ }
441
+ };
442
+
443
+ // wrap_kernel_functor_unboxed_
444
+
445
+ template<class KernelFunctor, class OpSignature>
446
+ struct wrap_kernel_functor_unboxed_ final {};
447
+
448
+ // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet
449
+ // This includes kernels with 0 arguments.
450
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
451
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(ParameterTypes...)> final {
452
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
453
+ "Return type mismatch");
454
+ static_assert(std::is_same<guts::typelist::typelist<ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
455
+ "Parameter types mismatch");
456
+
457
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
458
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) {
459
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
460
+ // Note [Plumbing Keys Through The Dispatcher 2]
461
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
462
+ // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel.
463
+ //
464
+ // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type
465
+ // DispatchKeySet.
466
+ // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention
467
+ // of the dispatcher from the calling convention of manually written kernels.
468
+ return (*functor_)(std::forward<ParameterTypes>(args)...);
469
+ }
470
+ };
471
+
472
+ // This specialization is for kernels with a first argument of type DispatchKeySet
473
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
474
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(DispatchKeySet, ParameterTypes...)> final {
475
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
476
+ "Return type mismatch");
477
+ static_assert(std::is_same<guts::typelist::typelist<DispatchKeySet, ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
478
+ "Parameter types mismatch");
479
+
480
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
481
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) {
482
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
483
+ // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel.
484
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
485
+ return (*functor_)(dispatchKeySet, std::forward<ParameterTypes>(args)...);
486
+ }
487
+ };
488
+
489
+ template<class KernelFunctor>
490
+ using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_<KernelFunctor, typename guts::infer_function_traits_t<KernelFunctor>::func_type>;
491
+
492
+ // call_functor_with_args_from_stack
493
+
494
+ template<class Functor, bool AllowDeprecatedTypes, size_t... ivalue_arg_indices, typename... ArgTypes>
495
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
496
+ call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence<ivalue_arg_indices...>, guts::typelist::typelist<ArgTypes...>*) {
497
+ (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.
498
+
499
+ // We're explicitly filtering out DispatchKeySet from the argument list.
500
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
501
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
502
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
503
+ return wrap_kernel_functor_unboxed<Functor>::call(functor, dispatchKeySet,
504
+ ivalue_to_arg<typename decay_if_not_tensor<ArgTypes>::type, AllowDeprecatedTypes>::call(
505
+ torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices))
506
+ )...);
507
+ }
508
+
509
+ template<class Functor, bool AllowDeprecatedTypes>
510
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
511
+ call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) {
512
+ // We're explicitly filtering out DispatchKeySet from the argument list.
513
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
514
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
515
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
516
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<Functor>::parameter_types;
517
+ constexpr size_t num_ivalue_args = guts::typelist::size<ArgTypes>::value;
518
+ return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack, std::make_index_sequence<num_ivalue_args>(), static_cast<ArgTypes*>(nullptr));
519
+ }
520
+
521
+ // push_outputs
522
+
523
+ template<class OutputType, bool AllowDeprecatedTypes>
524
+ struct push_outputs final {
525
+ // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here
526
+ // to avoid one extra call to the move constructor in this case. This is still not a
527
+ // universal reference though because OutputType is an explicitly specified class
528
+ // template parameter.
529
+ static void call(OutputType&& output, Stack* stack) {
530
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::call(std::forward<OutputType>(output)));
531
+ }
532
+ static void copy(const OutputType& output, Stack* stack) {
533
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::copy(output));
534
+ }
535
+ };
536
+ template<class... OutputTypes, bool AllowDeprecatedTypes>
537
+ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
538
+ static void call(std::tuple<OutputTypes...>&& output, Stack* stack) {
539
+ call_(std::move(output), stack, std::make_index_sequence<sizeof...(OutputTypes)>());
540
+ }
541
+ static void copy(const std::tuple<OutputTypes...>& output, Stack* stack) {
542
+ copy_(output, stack, std::make_index_sequence<sizeof...(OutputTypes)>());
543
+ }
544
+
545
+ private:
546
+ template<size_t... indices>
547
+ static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, std::index_sequence<indices...>) {
548
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::call(std::forward<OutputTypes>(std::get<indices>(output)))...);
549
+ }
550
+ template<size_t... indices>
551
+ static void copy_(const std::tuple<OutputTypes...>& output, Stack* stack, std::index_sequence<indices...>) {
552
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::copy(std::get<indices>(output))...);
553
+ }
554
+ };
555
+ template<bool AllowDeprecatedTypes>
556
+ struct push_outputs<void, AllowDeprecatedTypes> final {
557
+ static void call(int /*dummy*/, Stack* /*stack*/) {
558
+ }
559
+ static void copy(int /*dummy*/, Stack* /*stack*/) {
560
+ }
561
+ };
562
+
563
+ // make_boxed_from_unboxed_functor
564
+
565
+ template<class KernelFunctor, bool AllowDeprecatedTypes>
566
+ struct make_boxed_from_unboxed_functor final {
567
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value,
568
+ "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
569
+
570
+ static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) {
571
+ using ReturnType = typename guts::infer_function_traits_t<KernelFunctor>::return_type;
572
+ // We're explicitly filtering out DispatchKeySet from the argument list.
573
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
574
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
575
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
576
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::parameter_types;
577
+ constexpr bool has_outputs = !std::is_same<void, ReturnType>::value;
578
+ constexpr size_t num_inputs = guts::typelist::size<ArgTypes>::value;
579
+ if constexpr (has_outputs) {
580
+ // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value
581
+ // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`.
582
+ // [Note: VC++ and 'std': ambiguous symbol]
583
+ using ReturnType_ = ::std::decay_t<ReturnType>;
584
+ ReturnType_ output = call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
585
+ torch::jit::drop(*stack, num_inputs);
586
+ // See note [ VC++ and 'std': ambiguous symbol]
587
+ push_outputs<ReturnType_, AllowDeprecatedTypes>::call(::std::move(output), stack);
588
+ } else {
589
+ call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
590
+ torch::jit::drop(*stack, num_inputs);
591
+ }
592
+ }
593
+ };
594
+ } // namespace impl
595
+
596
+ } // namespace c10
597
+
598
+ namespace torch {
599
+ using OperatorKernel = c10::OperatorKernel;
600
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <gtest/gtest.h>
4
+ #include <gmock/gmock.h>
5
+
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/core/dispatch/Dispatcher.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <c10/core/CPUAllocator.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ template<class... Inputs>
13
+ inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
14
+ return {std::forward<Inputs>(inputs)...};
15
+ }
16
+
17
+ inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
18
+ auto* allocator = c10::GetCPUAllocator();
19
+ int64_t nelements = 1;
20
+ auto dtype = caffe2::TypeMeta::Make<float>();
21
+ int64_t size_bytes = nelements * dtype.itemsize();
22
+ auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
23
+ c10::StorageImpl::use_byte_size_t(),
24
+ size_bytes,
25
+ allocator->allocate(size_bytes),
26
+ allocator,
27
+ /*resizable=*/true);
28
+ at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
29
+ // TODO: We add this to simulate the ideal case where we only have Autograd backend keys
30
+ // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
31
+ // constructor by default.
32
+ if (!requires_grad) {
33
+ t.unsafeGetTensorImpl()->remove_autograd_key();
34
+ }
35
+ return t;
36
+ }
37
+
38
+ inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
39
+ return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
40
+ }
41
+
42
+ template<class... Args>
43
+ inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
44
+ auto stack = makeStack(std::forward<Args>(args)...);
45
+ op.callBoxed(&stack);
46
+ return stack;
47
+ }
48
+
49
+ template<class Result, class... Args>
50
+ inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
51
+ return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
52
+ }
53
+
54
+ template<class Result, class... Args>
55
+ inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
56
+ return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
57
+ }
58
+
59
+ template<class Result, class... Args>
60
+ inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
61
+ return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
62
+ }
63
+
64
+ inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
65
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
66
+ EXPECT_ANY_THROW(
67
+ callOp(*op, dummyTensor(dispatch_key), 5);
68
+ );
69
+ }
70
+
71
+ inline void expectDoesntFindOperator(const char* op_name) {
72
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
73
+ EXPECT_FALSE(op.has_value());
74
+ }
75
+
76
+ template<class Exception, class Functor>
77
+ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
78
+ try {
79
+ std::forward<Functor>(functor)();
80
+ } catch (const Exception& e) {
81
+ EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
82
+ return;
83
+ }
84
+ ADD_FAILURE() << "Expected to throw exception containing \""
85
+ << expectMessageContains << "\" but didn't throw";
86
+ }
87
+
88
+ template<class T, size_t N>
89
+ void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
90
+ EXPECT_EQ(expected.size(), actual.size());
91
+ for (const auto i : c10::irange(expected.size())) {
92
+ EXPECT_EQ(expected[i], actual[i]);
93
+ }
94
+ }
95
+
96
+ template<class T>
97
+ void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
98
+ EXPECT_EQ(expected.size(), actual.size());
99
+ for (const auto i : c10::irange(expected.size())) {
100
+ EXPECT_EQ(expected[i], actual[i]);
101
+ }
102
+ }
103
+
104
+ template<class T>
105
+ void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
106
+ EXPECT_EQ(expected.size(), actual.size());
107
+ for (const auto i : c10::irange(expected.size())) {
108
+ EXPECT_EQ(expected[i], actual.get(i));
109
+ }
110
+ }
111
+
112
+ template<class T>
113
+ void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
114
+ EXPECT_EQ(expected.size(), actual.size());
115
+ for (const auto i : c10::irange(expected.size())) {
116
+ EXPECT_EQ(expected[i], actual[i]);
117
+ }
118
+ }
119
+
120
+ // NB: This is not really sound, but all of the type sets constructed here
121
+ // are singletons so it's fine
122
+ static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
123
+ return legacyExtractDispatchKey(t.key_set());
124
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/Type.h>
8
+
9
+ namespace c10 {
10
+ namespace impl {
11
+
12
+ // A CppSignature object holds RTTI information about a C++ function signature at runtime
13
+ // and can compare them or get a debug-printable name.
14
+ class TORCH_API CppSignature final {
15
+ public:
16
+ CppSignature(const CppSignature&) = default;
17
+ CppSignature(CppSignature&&) noexcept = default;
18
+ CppSignature& operator=(const CppSignature&) = default;
19
+ CppSignature& operator=(CppSignature&&) noexcept = default;
20
+
21
+ template<class FuncType>
22
+ static CppSignature make() {
23
+ // Normalize functors, lambdas, function pointers, etc. into the plain function type
24
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
25
+ // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
26
+ // with different calling conventions.
27
+ // See Note [Plumbing Keys Through The Dispatcher]
28
+ using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
29
+
30
+ return CppSignature(std::type_index(typeid(decayed_function_type)));
31
+ }
32
+
33
+ std::string name() const {
34
+ return c10::demangle(signature_.name());
35
+ }
36
+
37
+ friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
38
+ if (lhs.signature_ == rhs.signature_) {
39
+ return true;
40
+ }
41
+ // Without RTLD_GLOBAL, the type_index comparison could yield false because
42
+ // they point to different instances of the RTTI data, but the types would
43
+ // still be the same. Let's check for that case too.
44
+ // Note that there still is a case where this might not work, i.e. when
45
+ // linking libraries of different compilers together, they might have
46
+ // different ways to serialize a type name. That, together with a missing
47
+ // RTLD_GLOBAL, would still fail this.
48
+ if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
49
+ return true;
50
+ }
51
+
52
+ return false;
53
+ }
54
+
55
+ private:
56
+ explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
57
+ std::type_index signature_;
58
+ };
59
+
60
+ inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
61
+ return !(lhs == rhs );
62
+ }
63
+
64
+ }
65
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <ATen/core/function_schema.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <c10/util/Bitset.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/core/Variadic.h>
10
+ #include <ATen/core/stack.h>
11
+
12
+ namespace c10 {
13
+
14
+ namespace impl {
15
+
16
+ // Take a DispatchKeySet for a Tensor and determine what the actual dispatch
17
+ // DispatchKey should be, taking into account TLS, and skipping backends which
18
+ // fall through.
19
+ //
20
+ // Unlike Tensor::key_set(), the value of this on a tensor can change depending
21
+ // on TLS.
22
+ //
23
+ // NB: If there is no valid dispatch key, this will return Undefined
24
+ static inline DispatchKeySet computeDispatchKeySet(
25
+ DispatchKeySet ks,
26
+ // The key mask lets us eliminate (by zero entries) keys which should not
27
+ // be considered for dispatch. There are two cases when we use this:
28
+ //
29
+ // - If an operator's dispatch table contains a fallthrough entry, we
30
+ // should bypass it entirely when finding the key
31
+ // - If a user invokes with redispatch, the mask lets us
32
+ // zero out the key the user asked us to stop.
33
+ //
34
+ // These excluded backends are NOT tracked in the TLS, but must be applied
35
+ // AFTER TLS (since the backend may have been introduced for consideration
36
+ // by the included TLS), which is why you have to pass them in to this
37
+ // function (as opposed to just applying it to the input 'ks').
38
+ DispatchKeySet key_mask
39
+ ) {
40
+ c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
41
+ // TODO: It's a bit irritating that we have to do logical ORs here, it would
42
+ // be nice to only do one. Can always_included be folded into the TLS? Well,
43
+ // it's a bit troublesome, because fastpath TLS access requires the type of
44
+ // the TLS in question to be zero-initialized, so you don't actually win
45
+ // anyting in that case.
46
+ return (((ks | local.included_) - local.excluded_) & key_mask);
47
+ }
48
+
49
+ }
50
+
51
+ namespace detail {
52
+ // A small gadget to extract the DispatchKeySet from types which are known
53
+ // to have it. Used to extract dispatch keys from unboxed calls.
54
+ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
55
+ DispatchKeySet ts;
56
+ void operator()(const at::Tensor& x) {
57
+ ts = ts | x.key_set();
58
+ }
59
+ void operator()(const c10::optional<at::Tensor>& x) {
60
+ if (x.has_value()) {
61
+ ts = ts | x->key_set();
62
+ }
63
+ }
64
+ void operator()(at::ArrayRef<at::Tensor> xs) {
65
+ for (const auto& x : xs) {
66
+ ts = ts | x.key_set();
67
+ }
68
+ }
69
+ // Tensor?[] translates to this case.
70
+ void operator()(const c10::List<c10::optional<at::Tensor>>& xs) {
71
+ for (c10::optional<at::Tensor> x : xs) {
72
+ if (x.has_value()) {
73
+ ts = ts | x.value().key_set();
74
+ }
75
+ }
76
+ }
77
+ // Structured Tensor[] translates to this case
78
+ void operator()(const at::ITensorListRef& xs) {
79
+ for (const auto& x : xs) {
80
+ ts = ts | x.key_set();
81
+ }
82
+ }
83
+ [[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) {
84
+ // Just checking that the handling of Tensor?[] didn't change.
85
+ TORCH_INTERNAL_ASSERT(false);
86
+ }
87
+ void operator()(const at::Generator& gen) {
88
+ if (gen.defined()) {
89
+ ts = ts | gen.key_set();
90
+ }
91
+ }
92
+ void operator()(const c10::optional<at::Generator>& gen) {
93
+ if (gen.has_value() && gen->defined()) {
94
+ ts = ts | gen->key_set();
95
+ }
96
+ }
97
+ template <typename T>
98
+ void operator()(const T&) {
99
+ // do nothing
100
+ }
101
+ };
102
+
103
+ // NB: take by const reference (Don't do universal forwarding here! You
104
+ // don't want to move into this function!)
105
+ template <typename... Args>
106
+ DispatchKeySet multi_dispatch_key_set(const Args&... args) {
107
+ return MultiDispatchKeySet().apply(args...).ts;
108
+ }
109
+ }
110
+
111
+ /**
112
+ * An instance of DispatchKeyExtractor knows how to get a dispatch key given
113
+ * a list of arguments for an operator call.
114
+ *
115
+ * The instance is specific for a certain operator as:
116
+ * - In boxed dispatch, different operators have different ways to extract
117
+ * the dispatch key (e.g. different numbers of arguments), and we precompute
118
+ * the stack locations we should look at; and
119
+ * - In all dispatch, some backends should be excluded from dispatch because
120
+ * they have been registered as fallthrough. The set of excluded backends
121
+ * varies from operator, as some operators may have overridden the
122
+ * fallthrough with custom behavior.
123
+ *
124
+ * Note - this should maintain identical impl to the py dispatcher key extraction logic
125
+ * at pytorch/torch/dispatcher.py
126
+ */
127
+ struct TORCH_API DispatchKeyExtractor final {
128
+ public:
129
+ static DispatchKeyExtractor make(const FunctionSchema& schema) {
130
+ return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
131
+ }
132
+
133
+ static DispatchKeyExtractor makeUninitialized() {
134
+ return DispatchKeyExtractor(c10::utils::bitset());
135
+ }
136
+
137
+ void registerSchema(const FunctionSchema& schema) {
138
+ TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
139
+ dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
140
+ }
141
+ void deregisterSchema() {
142
+ dispatch_arg_indices_reverse_ = c10::utils::bitset();
143
+ }
144
+
145
+ DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
146
+ DispatchKeySet ks;
147
+ dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
148
+ const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
149
+ if (C10_LIKELY(ivalue.isTensor())) {
150
+ // NB: Take care not to introduce a refcount bump (there's
151
+ // no safe toTensorRef method, alas)
152
+ ks = ks | ivalue.unsafeToTensorImpl()->key_set();
153
+ } else if (C10_UNLIKELY(ivalue.isTensorList())) {
154
+ for (const at::Tensor& tensor : ivalue.toTensorList()) {
155
+ ks = ks | tensor.key_set();
156
+ }
157
+ }
158
+ // Tensor?[] translates to a c10::List<IValue> so we need to peek inside
159
+ else if (C10_UNLIKELY(ivalue.isList())) {
160
+ for (const auto& elt : ivalue.toListRef()) {
161
+ if (elt.isTensor()) {
162
+ ks = ks | elt.toTensor().key_set();
163
+ }
164
+ }
165
+ }
166
+ });
167
+ // Keys that are fallthrough should be skipped
168
+ if (requiresBitsetPerBackend_) {
169
+ auto backend_idx = ks.getBackendIndex();
170
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
171
+ } else {
172
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
173
+ }
174
+ }
175
+
176
+ template<class... Args>
177
+ DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
178
+ auto ks = detail::multi_dispatch_key_set(args...);
179
+ // Keys that are fallthrough should be skipped
180
+ if (requiresBitsetPerBackend_) {
181
+ auto backend_idx = ks.getBackendIndex();
182
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
183
+ } else {
184
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
185
+ }
186
+ }
187
+
188
+ void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
189
+
190
+ std::string dumpState() const;
191
+ void checkInvariants(const FunctionSchema& schema) const;
192
+
193
+ private:
194
+ static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
195
+ TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
196
+ "The function schema has ", schema.arguments().size(),
197
+ " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
198
+ c10::utils::bitset dispatch_arg_indices_reverse;
199
+ for (const auto index : c10::irange(schema.arguments().size())) {
200
+ if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
201
+ schema.arguments()[index].type()->isSubtypeOf(
202
+ *ListType::ofTensors()) ||
203
+ schema.arguments()[index].type()->isSubtypeOf(
204
+ *ListType::ofOptionalTensors()) ||
205
+ schema.arguments()[index].type()->isSubtypeOf(
206
+ *OptionalType::ofTensor())) {
207
+ dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
208
+ }
209
+ }
210
+ return dispatch_arg_indices_reverse;
211
+ }
212
+
213
+ explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
214
+ : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
215
+ , nonFallthroughKeys_(DispatchKeySet::FULL)
216
+ , requiresBitsetPerBackend_(false) {
217
+ for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
218
+ nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
219
+ }
220
+ }
221
+
222
+ // this is a bitset that has ones for each argument index which has to be
223
+ // considered for dispatch. This avoids having to iterate over the stack
224
+ // to find all the tensors. The bits are stored in reverse order, i.e.
225
+ // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
226
+ // the top of the stack (i.e. the i-th last argument of the function)
227
+ // is relevant for dispatch.
228
+ // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
229
+ // fallthrough
230
+ c10::utils::bitset dispatch_arg_indices_reverse_;
231
+
232
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel.
233
+ DispatchKeySet nonFallthroughKeys_;
234
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
235
+ // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
236
+ std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
237
+ // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
238
+ // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
239
+ bool requiresBitsetPerBackend_;
240
+ };
241
+
242
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h ADDED
@@ -0,0 +1,773 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/SequenceNumber.h>
4
+ #include <ATen/core/boxing/KernelFunction.h>
5
+ #include <ATen/core/boxing/impl/boxing.h>
6
+ #include <ATen/core/dispatch/OperatorEntry.h>
7
+ #include <ATen/core/dispatch/CppSignature.h>
8
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
9
+ #include <ATen/record_function.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/LeftRight.h>
12
+ #include <list>
13
+ #include <mutex>
14
+ #include <condition_variable>
15
+ #include <type_traits>
16
+ #include <c10/core/SafePyObject.h>
17
+
18
+ #include <ATen/core/grad_mode.h>
19
+ #include <ATen/core/enum_tag.h>
20
+
21
+ #ifndef NDEBUG
22
+ #include <iostream>
23
+ #endif
24
+
25
+ namespace c10 {
26
+
27
+ TORCH_API bool show_dispatch_trace();
28
+ TORCH_API void dispatch_trace_nesting_incr();
29
+ TORCH_API void dispatch_trace_nesting_decr();
30
+ TORCH_API int64_t dispatch_trace_nesting_value();
31
+
32
+ struct DispatchTraceNestingGuard {
33
+ DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); }
34
+ ~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); }
35
+ };
36
+
37
+ class TORCH_API OperatorHandle;
38
+ template<class FuncType> class TypedOperatorHandle;
39
+
40
+ /**
41
+ * Implement this interface and register your instance with the dispatcher
42
+ * to get notified when operators are registered or deregistered with
43
+ * the dispatcher.
44
+ *
45
+ * NB: registration events only occur when a 'def' occurs; we don't trigger
46
+ * on 'impl' or 'fallback' calls.
47
+ */
48
+ class TORCH_API OpRegistrationListener {
49
+ public:
50
+ virtual ~OpRegistrationListener();
51
+
52
+ virtual void onOperatorRegistered(const OperatorHandle& op) = 0;
53
+ virtual void onOperatorDeregistered(const OperatorHandle& op) = 0;
54
+ };
55
+
56
+ namespace detail {
57
+ class RegistrationListenerList;
58
+ }
59
+ class SchemaRegistrationHandleRAII;
60
+
61
+ /**
62
+ * Top-level dispatch interface for dispatching via the dynamic dispatcher.
63
+ * Most end users shouldn't use this directly; if you're trying to register
64
+ * ops look in op_registration
65
+ */
66
+ class TORCH_API Dispatcher final {
67
+ private:
68
+ // For direct access to backend fallback information
69
+ friend class impl::OperatorEntry;
70
+
71
+ struct OperatorDef final {
72
+ explicit OperatorDef(OperatorName&& op_name)
73
+ : op(std::move(op_name)) {}
74
+
75
+ impl::OperatorEntry op;
76
+
77
+ // These refer to the number of outstanding RegistrationHandleRAII
78
+ // for this operator. def_count reflects only def() registrations
79
+ // (in the new world, this should only ever be 1, but old style
80
+ // registrations may register the schema multiple times, which
81
+ // will increase this count). def_and_impl_count reflects the number
82
+ // of combined def() and impl() registrations. When the last def() gets
83
+ // unregistered, we must immediately call the Deregistered listeners, but we
84
+ // must not actually delete the handle as there are other outstanding RAII
85
+ // destructors which will try to destruct and they had better still have a
86
+ // working operator handle in this case
87
+ size_t def_count = 0;
88
+ size_t def_and_impl_count = 0;
89
+ };
90
+ friend class OperatorHandle;
91
+ template<class> friend class TypedOperatorHandle;
92
+
93
+ struct Guard final {
94
+ Guard() : alive(true), mutex() {}
95
+ std::atomic<bool> alive;
96
+ std::mutex mutex;
97
+ };
98
+
99
+ public:
100
+ ~Dispatcher();
101
+
102
+ // Implementation note: this class abstracts over the fact that we have per-operator
103
+ // dispatch tables. This could be easily adjusted to have a single global hash
104
+ // table.
105
+ static Dispatcher& realSingleton();
106
+
107
+ C10_ALWAYS_INLINE static Dispatcher& singleton() {
108
+ #if !defined C10_MOBILE
109
+ // Implemented inline so that steady-state code needn't incur
110
+ // function-call overhead. We can't just inline `realSingleton`
111
+ // because the function-local static would get duplicated across
112
+ // all DSOs that include & use this header, leading to multiple
113
+ // singleton instances.
114
+ static Dispatcher& s = realSingleton();
115
+ return s;
116
+ #else
117
+ // For C10_MOBILE, we should never inline a static function that
118
+ // has a static member, since the generated code calls
119
+ // __cxa_guard_acquire and __cxa_guard_release which help
120
+ // implement exactly once semantics for the initialization of the
121
+ // static Dispatcher& s above (for the non-mobile case). That
122
+ // additional code when duplicated across all operator stubs
123
+ // for every backend results in a lot of additional code
124
+ // being generated by the compiler.
125
+ return realSingleton();
126
+ #endif
127
+ }
128
+
129
+ // ------------------------------------------------------------------------
130
+ //
131
+ // Accessing operators by schema
132
+ //
133
+ // ------------------------------------------------------------------------
134
+
135
+ /**
136
+ * Looks for an operator schema with the given name and overload name
137
+ * and returns it if it is registered WITH A SCHEMA.
138
+ * Returns nullopt otherwise.
139
+ */
140
+ c10::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
141
+
142
+ /**
143
+ * Variant of findSchema that results in less code generated at the call site.
144
+ * It (1) takes const char* pointer rather than OperatorName (so we skip
145
+ * generating std::string constructor calls at the call site), and (2)
146
+ * it raises an exception if the operator is not found (so we skip
147
+ * generating exception raising code at the call site)
148
+ *
149
+ * Irritatingly, we still have to generate the handful of instructions
150
+ * for dealing with an exception being thrown during static initialization
151
+ * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we
152
+ * could avoid this code too, but as the name of the function suggests,
153
+ * it does throw exceptions.
154
+ */
155
+ OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
156
+
157
+ // Like findSchema, but also returns OperatorHandle even if there is no schema
158
+ c10::optional<OperatorHandle> findOp(const OperatorName& operator_name);
159
+
160
+ // Returns a list of all operator names present in the operatorLookupTable_
161
+ const std::vector<OperatorName> getAllOpNames();
162
+
163
+ // ------------------------------------------------------------------------
164
+ //
165
+ // Invoking operators
166
+ //
167
+ // ------------------------------------------------------------------------
168
+
169
+ template<class Return, class... Args>
170
+ Return call(const TypedOperatorHandle<Return (Args...)>& op, Args... args) const;
171
+
172
+
173
+ template<class Return, class... Args>
174
+ static Return callWithDispatchKeySlowPath(const TypedOperatorHandle<Return (Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args);
175
+
176
+ // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation.
177
+ // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set.
178
+ // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key.
179
+ // See Note [Plumbing Keys Through The Dispatcher]
180
+ template<class Return, class... Args>
181
+ Return redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const;
182
+
183
+ // Invoke an operator via the boxed calling convention using an IValue stack
184
+ void callBoxed(const OperatorHandle& op, Stack* stack) const;
185
+ void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const;
186
+
187
+ // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
188
+ // See Note [Plumbing Keys Through The Dispatcher]
189
+ void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const;
190
+
191
+ bool hasBackendFallbackForDispatchKey(DispatchKey dk) {
192
+ auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk);
193
+ if (dispatch_ix < 0) return false;
194
+ return backendFallbackKernels_[dispatch_ix].kernel.isValid();
195
+ }
196
+
197
+ // Used by torchdeploy/multipy for multiple interpreters racing.
198
+ void waitForDef(const FunctionSchema& schema);
199
+ void waitForImpl(const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key);
200
+
201
+ // ------------------------------------------------------------------------
202
+ //
203
+ // Performing registrations (NON user public; use op_registration)
204
+ //
205
+ // ------------------------------------------------------------------------
206
+
207
+ /**
208
+ * Register a new operator schema.
209
+ *
210
+ * If a schema with the same operator name and overload name already exists,
211
+ * this function will check that both schemas are exactly identical.
212
+ */
213
+ RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector<at::Tag> tags = {});
214
+
215
+ /**
216
+ * Register a kernel to the dispatch table for an operator.
217
+ * If dispatch_key is nullopt, then this registers a fallback kernel.
218
+ *
219
+ * @return A RAII object that manages the lifetime of the registration.
220
+ * Once that object is destructed, the kernel will be deregistered.
221
+ */
222
+ // NB: steals the inferred function schema, as we may need to hold on to
223
+ // it for a bit until the real schema turns up
224
+ RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
225
+
226
+ /**
227
+ * Given an operator, tells the Dispatcher that we have implemented an abstract impl
228
+ * for this op in the given Python module. Call this a "pystub".
229
+ */
230
+ RegistrationHandleRAII registerAbstractImplPyStub(const OperatorName& op_name, const char* pymodule, const char* context);
231
+
232
+ /**
233
+ * Given an operator, throws if we have an abstract impl pystub.
234
+ */
235
+ void throwIfHasAbstractImplPyStub(OperatorName op_name);
236
+
237
+ c10::optional<std::pair<const char*, const char*>> getAbstractImplPyStub(OperatorName op_name);
238
+
239
+ /**
240
+ * Register a new operator by name.
241
+ */
242
+ RegistrationHandleRAII registerName(OperatorName op_name);
243
+
244
+ /**
245
+ * Register a fallback kernel for a backend.
246
+ * If an operator is called but there is no concrete kernel for the dispatch
247
+ * key of the given operator arguments, it will check if there is such a
248
+ * fallback kernel for the given dispatch key and, if yes, call that one.
249
+ */
250
+ RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug);
251
+
252
+ /**
253
+ * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend
254
+ * API. These invocations are only permitted once per program, so we raise
255
+ * an error if this is called again for the same namespace.
256
+ */
257
+ RegistrationHandleRAII registerLibrary(std::string ns, std::string debug);
258
+
259
+ // ------------------------------------------------------------------------
260
+ //
261
+ // Listeners on registrations
262
+ //
263
+ // ------------------------------------------------------------------------
264
+
265
+ /**
266
+ * Add a listener that gets called whenever a new op is registered or an existing
267
+ * op is deregistered. Immediately after registering, this listener gets called
268
+ * for all previously registered ops, so it can be used to keep track of ops
269
+ * registered with this dispatcher.
270
+ */
271
+ RegistrationHandleRAII addRegistrationListener(std::unique_ptr<OpRegistrationListener> listener);
272
+
273
+ void checkInvariants() const;
274
+
275
+ //
276
+ // ------------------------------------------------------------------------
277
+ //
278
+ // Assertions
279
+ //
280
+ // ------------------------------------------------------------------------
281
+
282
+ /**
283
+ * For testing purposes.
284
+ * Returns a list of all operators that were created through calls to registerImpl(),
285
+ * without any corresponding calls to registerDef(). After static initialization
286
+ * is done this is almost certainly a bug, as the created OperatorHandle won't have
287
+ * any schema associated with it and users calling the op through the dispatcher
288
+ * won't be able to access it
289
+ *
290
+ * Note that we cannot enforce this invariant "as we go" during static initialization,
291
+ * due to undefined static initialization order- we have no guarantees over the order
292
+ * in which .def() and .impl() calls are registered in the dispatcher at static
293
+ * initialization time. So this function should only be called after static initialization.
294
+ */
295
+ std::vector<OperatorHandle> findDanglingImpls() const;
296
+
297
+ /**
298
+ * Useful for inspecting global Dispatcher registration state.
299
+ * Returns the names of all operators with a kernel registered for the specified DispatchKey.
300
+ * If no DispatchKey is specified, it returns all registered operators.
301
+ */
302
+ std::vector<OperatorName> getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const;
303
+
304
+ private:
305
+ Dispatcher();
306
+
307
+ static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey);
308
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey);
309
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef<const c10::IValue> args);
310
+
311
+ OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema);
312
+ OperatorHandle findOrRegisterName_(const OperatorName& op_name);
313
+
314
+ void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name);
315
+ void deregisterImpl_(
316
+ const OperatorHandle& op,
317
+ const OperatorName& op_name,
318
+ c10::optional<DispatchKey> dispatch_key,
319
+ impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
320
+ void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
321
+ void deregisterFallback_(DispatchKey dispatchKey);
322
+ void deregisterLibrary_(const std::string& ns);
323
+ void cleanup(const OperatorHandle& op, const OperatorName& op_name);
324
+ void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug);
325
+
326
+ std::list<OperatorDef> operators_;
327
+ #if !defined(C10_MOBILE)
328
+ LeftRight<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
329
+ #else
330
+ RWSafeLeftRightWrapper<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
331
+ #endif
332
+ // Map from namespace to debug string (saying, e.g., where the library was defined)
333
+ ska::flat_hash_map<std::string, std::string> libraries_;
334
+
335
+ std::array<impl::AnnotatedKernel, num_runtime_entries> backendFallbackKernels_;
336
+
337
+ std::unique_ptr<detail::RegistrationListenerList> listeners_;
338
+
339
+ // This condition variable gets notified whenever we add a new def/impl to the
340
+ // dispatch table. This is primarily used by multipy/torchdeploy, when
341
+ // we have multiple interpreters trying to register to the dispatch table.
342
+ // In this situation, whenever the non-primary interpreter would have tried
343
+ // to register to the dispatch table, instead it will check to see if the
344
+ // expected registration has already been made, and if it hasn't, wait on
345
+ // this condition variable to see if it was just racing with the primary
346
+ // interpreter.
347
+ //
348
+ // We expect it to be rare for there to be any waiters on this condition
349
+ // variable. This is mostly just to help give better diagnostics if
350
+ // something goes horribly wrong
351
+ std::condition_variable cond_var_;
352
+
353
+ // Protect concurrent access to the dispatcher. We store this in a
354
+ // `shared_ptr` as we return callbacks that call back into dispatcher methods,
355
+ // and we need to be able to handle and guard against the event when the
356
+ // `Dispatcher` has been destroyed before the callbacks fire.
357
+ std::shared_ptr<Guard> guard_;
358
+ };
359
+
360
+ /**
361
+ * This is a handle to an operator schema registered with the dispatcher.
362
+ * This handle can be used to register kernels with the dispatcher or
363
+ * to lookup a kernel for a certain set of arguments.
364
+ */
365
+ class TORCH_API OperatorHandle {
366
+ template <typename T> friend struct std::hash;
367
+
368
+ public:
369
+ OperatorHandle(OperatorHandle&&) noexcept = default;
370
+ OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
371
+ OperatorHandle(const OperatorHandle&) = default;
372
+ OperatorHandle& operator=(const OperatorHandle&) = default;
373
+ // NOLINTNEXTLINE(performance-trivially-destructible)
374
+ ~OperatorHandle();
375
+
376
+ const OperatorName& operator_name() const {
377
+ return operatorDef_->op.operator_name();
378
+ }
379
+
380
+ bool hasSchema() const {
381
+ return operatorDef_->op.hasSchema();
382
+ }
383
+
384
+ const FunctionSchema& schema() const {
385
+ return operatorDef_->op.schema();
386
+ }
387
+
388
+ const std::string& debug() const {
389
+ return operatorDef_->op.debug();
390
+ }
391
+
392
+ std::string dumpState() const {
393
+ return operatorDef_->op.dumpState();
394
+ }
395
+
396
+ bool hasKernelForDispatchKey(DispatchKey k) const {
397
+ return operatorDef_->op.hasKernelForDispatchKey(k);
398
+ }
399
+
400
+ bool hasKernelForAnyDispatchKey(DispatchKeySet k) const {
401
+ return operatorDef_->op.hasKernelForAnyDispatchKey(k);
402
+ }
403
+
404
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const {
405
+ return operatorDef_->op.hasComputedKernelForDispatchKey(k);
406
+ }
407
+
408
+ std::string dumpComputedTable() const {
409
+ return operatorDef_->op.dumpComputedTable();
410
+ }
411
+
412
+ void checkInvariants() const {
413
+ return operatorDef_->op.checkInvariants();
414
+ }
415
+
416
+ c10::ArrayRef<at::Tag> getTags() const {
417
+ return operatorDef_->op.getTags();
418
+ }
419
+
420
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback) {
421
+ operatorDef_->op.setReportErrorCallback_(std::move(callback));
422
+ }
423
+
424
+ bool hasTag(const at::Tag& tag) const {
425
+ for(const auto& tag_: getTags()) {
426
+ if (tag == tag_) {
427
+ return true;
428
+ }
429
+ }
430
+ return false;
431
+ }
432
+
433
+ template<class FuncType>
434
+ TypedOperatorHandle<FuncType> typed() const {
435
+ // NB: This assert is not 100% sound: you can retrieve a typed() operator
436
+ // handle prior to ANY C++ signature being registered on the operator
437
+ // and the check will say everything is OK (at which point you can then
438
+ // smuggle in a kernel that is typed incorrectly). For everything
439
+ // in core library this won't happen, because all the static registrations
440
+ // will be done by the time a typed() handle is acquired.
441
+ #if !defined C10_MOBILE
442
+ operatorDef_->op.assertSignatureIsCorrect<FuncType>();
443
+ if (fn_has_symint<FuncType>::value) {
444
+ operatorDef_->op.assertSignatureIsCorrect<typename fn_remove_symint<FuncType>::type>();
445
+ }
446
+ #endif
447
+ return TypedOperatorHandle<FuncType>(operatorIterator_);
448
+ }
449
+
450
+ void callBoxed(Stack* stack) const {
451
+ c10::Dispatcher::singleton().callBoxed(*this, stack);
452
+ }
453
+
454
+ void callBoxed(Stack& stack) const {
455
+ callBoxed(&stack);
456
+ }
457
+
458
+ void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const {
459
+ c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack);
460
+ }
461
+
462
+ void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
463
+ c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
464
+ }
465
+
466
+ template <typename F>
467
+ PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const {
468
+ return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor);
469
+ }
470
+
471
+ bool operator==(const OperatorHandle& other) const {
472
+ return operatorDef_ == other.operatorDef_;
473
+ }
474
+
475
+ bool operator!=(const OperatorHandle& other) const {
476
+ return operatorDef_ != other.operatorDef_;
477
+ }
478
+
479
+ private:
480
+ explicit OperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
481
+ : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {}
482
+ friend class Dispatcher;
483
+ template<class> friend class TypedOperatorHandle;
484
+
485
+ // Storing a direct pointer to the OperatorDef even though we
486
+ // already have the iterator saves an instruction in the critical
487
+ // dispatch path. The iterator is effectively a
488
+ // pointer-to-std::list-node, and (at least in libstdc++'s
489
+ // implementation) the element is at an offset 16 bytes from that,
490
+ // because the prev/next pointers come first in the list node
491
+ // struct. So, an add instruction would be necessary to convert from the
492
+ // iterator to an OperatorDef*.
493
+ Dispatcher::OperatorDef* operatorDef_;
494
+
495
+ // We need to store this iterator in order to make
496
+ // Dispatcher::cleanup() fast -- it runs a lot on program
497
+ // termination (and presuambly library unloading).
498
+ std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
499
+ };
500
+
501
+ /**
502
+ * This is a handle to an operator schema registered with the dispatcher.
503
+ * It holds the same information as an OperatorHandle, but it is templated
504
+ * on the operator arguments and allows calling the operator in an
505
+ * unboxed way.
506
+ */
507
+ template<class FuncType>
508
+ class TypedOperatorHandle final {
509
+ static_assert(guts::false_t<FuncType>(), "FuncType in OperatorHandle::typed<FuncType> was not a valid function type");
510
+ };
511
+ template<class Return, class... Args>
512
+ class TypedOperatorHandle<Return (Args...)> final : public OperatorHandle {
513
+ public:
514
+ TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default;
515
+ TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default;
516
+ TypedOperatorHandle(const TypedOperatorHandle&) = default;
517
+ TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default;
518
+
519
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
520
+ C10_ALWAYS_INLINE Return call(Args... args) const {
521
+ return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
522
+ }
523
+
524
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
525
+ C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const {
526
+ return c10::Dispatcher::singleton().redispatch<Return, Args...>(*this, currentDispatchKeySet, std::forward<Args>(args)...);
527
+ }
528
+
529
+ private:
530
+ explicit TypedOperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
531
+ : OperatorHandle(operatorIterator) {}
532
+ friend class OperatorHandle;
533
+ };
534
+
535
+ namespace detail {
536
+ template <class... Args> inline void unused_arg_(const Args&...) {}
537
+
538
+ // CaptureKernelCall is intended to capture return values from Dispatcher
539
+ // unboxed kernel calls. A record function may request to get outputs from the
540
+ // kernel calls. For boxed kernels, it's straightforward, the returned values
541
+ // are in the stack object. The stack can be passed to record functions. For
542
+ // unboxed kernels, we need to handle different kinds of return values, cache
543
+ // them temporarily, then release the values for the actual function call
544
+ // return.
545
+ template <typename ReturnType>
546
+ struct CaptureKernelCall {
547
+ template <typename F, typename... Args>
548
+ CaptureKernelCall(
549
+ const F& kernel,
550
+ const TypedOperatorHandle<ReturnType(Args...)>& op,
551
+ const DispatchKeySet& dispatchKeySet,
552
+ Args&&... args)
553
+ // Calls the kernel and capture the result in output_.
554
+ : output_{kernel.template call<ReturnType, Args...>(
555
+ op,
556
+ dispatchKeySet,
557
+ std::forward<Args>(args)...)} {}
558
+ // Wraps the return values in a Stack.
559
+ Stack getOutputs() {
560
+ Stack stack;
561
+ impl::push_outputs<ReturnType, false>::copy(output_, &stack);
562
+ return stack;
563
+ }
564
+ // Since we are returning the output_, we don't expect the output_ to be used
565
+ // afterward. Copy elision and RVO do not apply to class data members. Using
566
+ // move semantic to avoid copies when possible.
567
+ ReturnType release() && {
568
+ return std::move(output_);
569
+ }
570
+
571
+ private:
572
+ ReturnType output_;
573
+ };
574
+
575
+ // Handle the lvalue reference differently since it should not be moved.
576
+ template <>
577
+ inline at::Tensor& CaptureKernelCall<at::Tensor&>::release() && {
578
+ return output_;
579
+ }
580
+
581
+ // Handle case where the kernel returns void.
582
+ template <>
583
+ struct CaptureKernelCall<void> {
584
+ template <typename F, typename... Args>
585
+ CaptureKernelCall(
586
+ const F& kernel,
587
+ const TypedOperatorHandle<void(Args...)>& op,
588
+ const DispatchKeySet& dispatchKeySet,
589
+ Args&&... args) {
590
+ // Calling the kernel and no need to capture void.
591
+ kernel.template call<void, Args...>(
592
+ op, dispatchKeySet, std::forward<Args>(args)...);
593
+ }
594
+ Stack getOutputs() {
595
+ return Stack();
596
+ }
597
+ void release() && {}
598
+ };
599
+
600
+ } // namespace detail
601
+
602
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
603
+ template<class Return, class... Args>
604
+ inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<Return(Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) {
605
+ // If callbacks need inputs, we box the arguments and pass them to the guard.
606
+ // Note: For perf reasons we wouldn't want to prematurely box the arguments.
607
+ at::RecordFunction guard(std::move(stepCallbacks));
608
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved());
609
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
610
+ auto& schema = op.schema();
611
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
612
+ constexpr auto num_boxed_args = impl::boxed_size<Args...>();
613
+ if constexpr (num_boxed_args != 0) {
614
+ if (guard.needsInputs()) {
615
+ // If we used std::array<IValue, num_boxed_args> here, we would
616
+ // have to spend time default constructing the IValues in
617
+ // boxedArgs. aligned_storage has no such requirement.
618
+ impl::IValueAlignedStorage boxedArgs[num_boxed_args];
619
+ // For debugging only; could be removed (but the compiler will do
620
+ // that for us and it's nice to have the extra assurance of
621
+ // correctness from our debug builds).
622
+ int lastArgIdx = 0;
623
+ impl::boxArgsToStack(boxedArgs, lastArgIdx, args...);
624
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
625
+ // I don't *think* we need std::launder here, because IValue has
626
+ // no subclasses and no const or reference fields.
627
+ runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
628
+ for (size_t ii = 0; ii < num_boxed_args; ++ii) {
629
+ reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
630
+ }
631
+ } else {
632
+ runRecordFunction(guard, schema_ref, dispatchKey);
633
+ }
634
+ } else {
635
+ runRecordFunction(guard, schema_ref, dispatchKey);
636
+ }
637
+
638
+ if (C10_UNLIKELY(guard.needsOutputs())) {
639
+ // Calls the kernel and capture the output temporarily to pass to
640
+ // RecordFunction.
641
+ detail::CaptureKernelCall<Return> captureKernelCall(
642
+ kernel, op, dispatchKeySet, std::forward<Args>(args)...);
643
+ guard.setOutputs(captureKernelCall.getOutputs());
644
+ // Releases the captured output to return to caller.
645
+ return std::move(captureKernelCall).release();
646
+ }
647
+
648
+ // keeping the guard alive while executing the kernel
649
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
650
+ }
651
+
652
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
653
+ template<class Return, class... Args>
654
+ C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle<Return(Args...)>& op, Args... args) const {
655
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
656
+ auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor()
657
+ .template getDispatchKeySetUnboxed<Args...>(args...);
658
+ #ifndef NDEBUG
659
+ DispatchTraceNestingGuard debug_guard;
660
+ if (show_dispatch_trace()) {
661
+ auto nesting_value = dispatch_trace_nesting_value();
662
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
663
+ std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
664
+ }
665
+ #endif
666
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet);
667
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
668
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
669
+ if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) {
670
+ return callWithDispatchKeySlowPath<Return, Args...>(op, *step_callbacks, dispatchKeySet, kernel, std::forward<Args>(args)...);
671
+ }
672
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
673
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
674
+ }
675
+
676
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
677
+ template<class Return, class... Args>
678
+ inline Return Dispatcher::redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const {
679
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
680
+ // do not use RecordFunction on redispatch
681
+ #ifndef NDEBUG
682
+ DispatchTraceNestingGuard debug_guard;
683
+ if (show_dispatch_trace()) {
684
+ auto nesting_value = dispatch_trace_nesting_value();
685
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
686
+ std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
687
+ }
688
+ #endif
689
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet);
690
+ return kernel.template call<Return, Args...>(op, currentDispatchKeySet, std::forward<Args>(args)...);
691
+ }
692
+
693
+ inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
694
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
695
+ const auto& entry = op.operatorDef_->op;
696
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
697
+ #ifndef NDEBUG
698
+ DispatchTraceNestingGuard debug_guard;
699
+ if (show_dispatch_trace()) {
700
+ auto nesting_value = dispatch_trace_nesting_value();
701
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
702
+ std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
703
+ }
704
+ #endif
705
+ const auto& kernel = entry.lookup(dispatchKeySet);
706
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
707
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
708
+ if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) {
709
+ at::RecordFunction guard(std::move(*step_callbacks));
710
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
711
+ auto& schema = op.schema();
712
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
713
+ guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
714
+ : runRecordFunction(guard, schema_ref, dispatchKey);
715
+
716
+ // keeping the guard alive while executing the kernel
717
+ kernel.callBoxed(op, dispatchKeySet, stack);
718
+
719
+ if (C10_UNLIKELY(guard.needsOutputs())) {
720
+ guard.setOutputs(*stack);
721
+ }
722
+ return;
723
+ }
724
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
725
+ kernel.callBoxed(op, dispatchKeySet, stack);
726
+ }
727
+
728
+ // NB: this doesn't count as a "true" dispatcher jump, so no instrumentation
729
+ inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const {
730
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
731
+ const auto& entry = op.operatorDef_->op;
732
+ // We still compute this as we're obligated to pass it on to the internal
733
+ // kernel, if it is a boxed fallback
734
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
735
+ const auto& kernel = ([&]() {
736
+ if (op.hasKernelForDispatchKey(dk)) {
737
+ return entry.kernelForDispatchKey(dk);
738
+ } else {
739
+ auto idx = getDispatchTableIndexForDispatchKey(dk);
740
+ TORCH_INTERNAL_ASSERT(idx >= 0);
741
+ return backendFallbackKernels_[idx].kernel;
742
+ }
743
+ })();
744
+ kernel.callBoxed(op, dispatchKeySet, stack);
745
+ }
746
+
747
+ inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const {
748
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
749
+ const auto& entry = op.operatorDef_->op;
750
+ #ifndef NDEBUG
751
+ DispatchTraceNestingGuard debug_guard;
752
+ if (show_dispatch_trace()) {
753
+ auto nesting_value = dispatch_trace_nesting_value();
754
+ for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
755
+ std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
756
+ }
757
+ #endif
758
+ const auto& kernel = entry.lookup(dispatchKeySet);
759
+ return kernel.callBoxed(op, dispatchKeySet, stack);
760
+ }
761
+
762
+ } // namespace c10
763
+
764
+ namespace std {
765
+
766
+ template <>
767
+ struct hash<c10::OperatorHandle> {
768
+ size_t operator()(const c10::OperatorHandle& op) const noexcept {
769
+ return std::hash<void*>{}(static_cast<void*>(op.operatorDef_));
770
+ }
771
+ };
772
+
773
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/operator_name.h>
4
+ #include <string>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ struct TORCH_API ObservedOperators {
10
+ ObservedOperators() = delete;
11
+
12
+ static bool isObserved(const OperatorName& name);
13
+
14
+ static std::unordered_set<std::string>& getUnobservedOperatorList();
15
+ };
16
+
17
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <c10/util/Metaprogramming.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+ #include <c10/util/either.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <c10/core/DispatchKey.h>
9
+ #include <c10/core/PyHandleCache.h>
10
+ #include <c10/core/SafePyObject.h>
11
+ #include <ATen/core/ivalue.h>
12
+ #include <ATen/core/boxing/KernelFunction.h>
13
+ #include <ATen/core/dispatch/DispatchKeyExtractor.h>
14
+
15
+ #include <ATen/core/dispatch/OperatorOptions.h>
16
+ #include <ATen/core/dispatch/CppSignature.h>
17
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
18
+ #include <ATen/core/enum_tag.h>
19
+
20
+ #include <list>
21
+ #include <array>
22
+
23
+ #ifdef C10_MOBILE
24
+ #define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
25
+ #endif
26
+
27
+ namespace c10 {
28
+
29
+ class Dispatcher;
30
+
31
+ namespace impl {
32
+
33
+ // This data structure represents a kernel that was registered to us from a
34
+ // user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
35
+ // about the kernel that isn't necessary for actual dispatching (this is why
36
+ // we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
37
+ // giving good error messages.
38
+ struct AnnotatedKernel final {
39
+ AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
40
+ : kernel(std::move(k))
41
+ , inferred_function_schema(std::move(s))
42
+ , debug(std::move(d))
43
+ {}
44
+ AnnotatedKernel() = default;
45
+ KernelFunction kernel;
46
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
47
+ // A little debug string to help us identify the kernel in question.
48
+ // Most importantly it records the TORCH_LIBRARY block that did the
49
+ // registration.
50
+ std::string debug;
51
+ };
52
+
53
+ // This data structure represents operator schema, with metadata specifying
54
+ // where the registration of this schema occurred
55
+ struct AnnotatedSchema final {
56
+ AnnotatedSchema(FunctionSchema s, std::string d)
57
+ : schema(std::move(s))
58
+ , debug(std::move(d))
59
+ {}
60
+ FunctionSchema schema;
61
+ std::string debug;
62
+ };
63
+
64
+ // Internal data structure that records information about a specific operator.
65
+ // It's not part of the public API; typically, users will interact with
66
+ // OperatorHandle instead.
67
+ //
68
+ // Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
69
+ // lock (this is important because some methods in OperatorEntry access
70
+ // dispatcher state)
71
+ class TORCH_API OperatorEntry final {
72
+ public:
73
+ explicit OperatorEntry(OperatorName&& operator_name);
74
+
75
+ OperatorEntry(const OperatorEntry&) = delete;
76
+ OperatorEntry(OperatorEntry&&) noexcept = delete;
77
+ OperatorEntry& operator=(const OperatorEntry&) = delete;
78
+ OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
79
+
80
+ const FunctionSchema& schema() const {
81
+ TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
82
+ return schema_->schema;
83
+ }
84
+ const std::string& debug() const {
85
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
86
+ return schema_->debug;
87
+ }
88
+ bool hasSchema() const {
89
+ return schema_.has_value();
90
+ }
91
+
92
+ bool isObserved() const {
93
+ return is_observed_;
94
+ }
95
+
96
+ // We may allocate an OperatorEntry for an operator even when we don't
97
+ // have a schema. When we receive the schema registration, we post
98
+ // facto register a schema.
99
+ //
100
+ // NB: registerSchema/deregisterSchema are not idempotent; if you
101
+ // attempt to register a schema when one is already present or vice
102
+ // versa that is an error. (Refcounting for the registrations is
103
+ // handled in the OperatorHandle in Dispatcher)
104
+ void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
105
+ void deregisterSchema();
106
+
107
+ const OperatorName& operator_name() const {
108
+ return name_;
109
+ }
110
+
111
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
112
+ using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
113
+ #else
114
+ using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
115
+ #endif
116
+ using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
117
+
118
+ // Why are kernels and fallback asymmetric? It has to do with ownership.
119
+ // Kernels and the computed dispatch tables for them are canonically
120
+ // owned by OperatorEntry, but backend fallbacks are specified once
121
+ // and apply for all operators, so they should be owned by Dispatcher.
122
+ // However, the registration of a backend fallback affects the
123
+ // state of the computed dispatch table, so when a backend fallback
124
+ // is updated, we need to update the operator tables too. Thus,
125
+ // registerKernel is the mechanism by which we give kernels to
126
+ // operator entry to own (and update dispatch table), but we only
127
+ // need a non-owning mechanism to update fallback.
128
+
129
+ // Precondition: Dispatcher::mutex_ is held
130
+ // Postcondition: caller is responsible for disposing of the kernel
131
+ AnnotatedKernelContainerIterator registerKernel(
132
+ const Dispatcher& dispatcher,
133
+ c10::optional<DispatchKey> dispatch_key,
134
+ KernelFunction kernel,
135
+ c10::optional<CppSignature> cpp_signature,
136
+ std::unique_ptr<FunctionSchema> inferred_function_schema,
137
+ std::string debug
138
+ );
139
+
140
+ // Precondition: Dispatcher::mutex_ is held
141
+ void deregisterKernel_(
142
+ const Dispatcher& dispatcher,
143
+ c10::optional<DispatchKey> dispatch_key,
144
+ AnnotatedKernelContainerIterator kernel
145
+ );
146
+
147
+ // Precondition: Dispatcher::mutex_ is held
148
+ void updateFallback(
149
+ const Dispatcher& dispatcher,
150
+ DispatchKey dispatch_key
151
+ );
152
+
153
+ // Precondition: Dispatcher::mutex_ is held
154
+ void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
155
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
156
+ schema_->schema.setAliasAnalysis(a);
157
+ }
158
+
159
+ std::string dumpComputedTable() const;
160
+ std::string dumpState() const;
161
+ void checkInvariants() const;
162
+
163
+ const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
164
+
165
+ // Asserts that the given FuncType is correct for calling this operator in an unboxed way.
166
+ template<class FuncType>
167
+ inline void assertSignatureIsCorrect() {
168
+ assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
169
+ }
170
+
171
+ void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
172
+
173
+ [[noreturn]] void reportError(DispatchKey dispatchKey) const;
174
+
175
+ const KernelFunction& lookup(DispatchKeySet ks) const {
176
+ const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
177
+ if (C10_UNLIKELY(idx == -1)) {
178
+ reportError(ks.highestPriorityTypeId());
179
+ }
180
+ const auto& kernel = dispatchTable_[idx];
181
+ // A valid kernel *always* has a boxed kernel and *may* have an
182
+ // unboxed kernel. However, we typically do unboxed calls in at::
183
+ // APIs, where the kernel 1) will very likely be valid and 2)
184
+ // should have an unboxed kernel. Checking the unboxed kernel
185
+ // first will allow us to avoid touching the boxed kernel at all
186
+ // in the common case.
187
+ if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
188
+ if (!kernel.isValid()) {
189
+ reportError(ks.highestPriorityTypeId());
190
+ }
191
+ }
192
+ return kernel;
193
+ }
194
+
195
+ std::string listAllDispatchKeys() const;
196
+
197
+ // Returns true if kernel_ has entry for any key in ks.
198
+ //
199
+ // Invariant: There are no alias keys in the passed-in dispatch key set.
200
+ // Note [No Alias Keys in DispatchKeySet]
201
+ // Alias keys should be checked using `hasKernelForDispatchKey`
202
+ // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
203
+ // have a value > 63 (causing overflow).
204
+ bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
205
+ // Returns true if kernel_ has entry for a particular key.
206
+ bool hasKernelForDispatchKey(DispatchKey k) const;
207
+ // Retrieves the kernel entry at a particular key. Symmetric with
208
+ // hasKernelForDispatchKey. To get the AnnotatedKernel, see
209
+ // getKernelForDispatchKey (private)
210
+ const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
211
+ // Returns true if the "computed table" has an entry for a particular key.
212
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const;
213
+ // Returns all the operator tags added at the time of registration
214
+ const std::vector<at::Tag>& getTags() const;
215
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
216
+
217
+ template <typename F>
218
+ PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
219
+ return py_cache_.ptr_or(self_interpreter, slow_accessor);
220
+ }
221
+
222
+ private:
223
+
224
+ OperatorName name_;
225
+ c10::optional<AnnotatedSchema> schema_;
226
+ #ifndef C10_MOBILE
227
+ std::vector<at::Tag> tags_;
228
+ #endif
229
+ std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
230
+ DispatchKeyExtractor dispatchKeyExtractor_;
231
+ // Pointer to the torch.ops.ns.op.overload object for speed
232
+ c10::PyHandleCache py_cache_;
233
+
234
+ // kernels_ stores all registered kernels for the corresponding dispatch key
235
+ // and catchAllKernels_ stores the catch-all kernels.
236
+ // If an operator library gets loaded that overwrites an already existing kernel,
237
+ // both kernels will be in that list but only the newer one will be in
238
+ // dispatchTable. If any of the kernels go away (say the library gets
239
+ // unloaded), we remove the kernel from this list and update the
240
+ // dispatchTable if necessary.
241
+ // Kernels in the list are ordered by registration time descendingly,
242
+ // newer registrations are before older registrations.
243
+ // We do not combine dispatchTable and kernels into one hash map because
244
+ // kernels is a larger data structure and accessed quite infrequently
245
+ // while dispatchTable is accessed often and should be kept small to fit
246
+ // into CPU caches.
247
+ // Invariants:
248
+ // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
249
+ // - dispatchTable[dispatch_key] does not exist if and only if
250
+ // kernels_[dispatch_key] does not exist
251
+ // - If kernels_[dispatch_key] exists, then it has elements.
252
+ // It is never an empty list.
253
+ //
254
+ // Why do we do that?
255
+ // -----
256
+ // We mostly do this to enable Jupyter notebooks where a cell registering
257
+ // a kernel could be executed multiple times and the later execution
258
+ // should overwrite the earlier one. Note that this still fails when the
259
+ // function schema changed between the executions, but it works as long
260
+ // as the function schema didn't change. A better solution would be to
261
+ // unload the old extension library from the Jupyter cell when the cell is
262
+ // re-executed and then only allow one kernel here, i.e. error if a kernel
263
+ // is already registered, but that's a lot of effort to implement and
264
+ // currently not high-pri.
265
+ ska::flat_hash_map<DispatchKey,
266
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
267
+ // On mobile, we needn't worry about Jupyter notebooks.
268
+ std::array<AnnotatedKernel, 1>
269
+ #else
270
+ std::list<AnnotatedKernel>
271
+ #endif
272
+ > kernels_;
273
+
274
+ const AnnotatedKernel& missingKernel() const;
275
+ const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
276
+
277
+ // cpp_signature_ stores function signature if any of
278
+ // the kernels was created in a way that allowed us to know the function
279
+ // signature (i.e. by supplying an unboxed C++ kernel function).
280
+ // If this is set, it will be used to check that future kernel
281
+ // registrations match and it will be used in unboxed function calls
282
+ // to verify their arguments against the known function signature.
283
+ struct CppSignatureWithDebug {
284
+ CppSignature signature;
285
+ std::string debug;
286
+ c10::optional<DispatchKey> dispatch_key;
287
+ };
288
+ c10::optional<CppSignatureWithDebug> cpp_signature_;
289
+ c10::optional<CppSignatureWithDebug> sym_cpp_signature_;
290
+
291
+ // A Python custom error handler for OperatorEntry::reportError
292
+ std::unique_ptr<c10::SafePyObject> report_error_callback_;
293
+
294
+ // Whether this operator needs to be observed with RecordFunction
295
+ const bool is_observed_;
296
+
297
+ [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
298
+ const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
299
+ std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
300
+ const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
301
+ ) const;
302
+ // This function re-establishes the invariant that dispatchTable
303
+ // contains the front element from the kernels list for a given runtime dispatch key.
304
+ void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
305
+ // Like above, but also handles alias dispatch keys.
306
+ void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
307
+ // Like above, but for ALL entries in the dispatch table.
308
+ void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
309
+ // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
310
+ const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
311
+ };
312
+
313
+ } // namespace impl
314
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ namespace c10 {
6
+
7
+ enum class AliasAnalysisKind : uint8_t {
8
+ INTERNAL_SPECIAL_CASE,
9
+ CONSERVATIVE, // The most conservative alias analysis type, assumes
10
+ // side-effects. This is the default analysis.
11
+ FROM_SCHEMA,
12
+ PURE_FUNCTION
13
+ };
14
+
15
+ #if !defined(_MSC_VER)
16
+ constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
17
+ #endif
18
+ inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
19
+ return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
20
+ ? "CONSERVATIVE"
21
+ : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
22
+ ? "FROM_SCHEMA"
23
+ : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
24
+ ? "PURE_FUNCTION"
25
+ : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
26
+ ? "INTERNAL_SPECIAL_CASE"
27
+ : "UNKNOWN";
28
+ }
29
+
30
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+
5
+ namespace c10 {
6
+
7
+ class RegistrationHandleRAII final {
8
+ public:
9
+ explicit RegistrationHandleRAII(std::function<void()> onDestruction)
10
+ : onDestruction_(std::move(onDestruction)) {}
11
+
12
+ ~RegistrationHandleRAII() {
13
+ if (onDestruction_) {
14
+ onDestruction_();
15
+ }
16
+ }
17
+
18
+ RegistrationHandleRAII(const RegistrationHandleRAII&) = delete;
19
+ RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete;
20
+
21
+ RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept
22
+ : onDestruction_(std::move(rhs.onDestruction_)) {
23
+ rhs.onDestruction_ = nullptr;
24
+ }
25
+
26
+ RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept {
27
+ onDestruction_ = std::move(rhs.onDestruction_);
28
+ rhs.onDestruction_ = nullptr;
29
+ return *this;
30
+ }
31
+
32
+ private:
33
+ std::function<void()> onDestruction_;
34
+ };
35
+
36
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <type_traits>
5
+
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ namespace c10 {
10
+
11
+ using DynamicTypeBits = std::uint32_t;
12
+ #define DYNAMIC_TYPE_BIT(x) (1u << x)
13
+
14
+ constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31);
15
+ constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30);
16
+
17
+ constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1);
18
+ constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3);
19
+ constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4);
20
+ constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5);
21
+ constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7);
22
+ constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8);
23
+ constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10);
24
+
25
+ #define FORALL_DYNAMIC_TYPES(_) \
26
+ _(Tensor, DYNAMIC_TYPE_BIT(0), 1) \
27
+ _(None, kDynamicNoneTypeBit, 1) \
28
+ _(Bool, DYNAMIC_TYPE_BIT(2), 1) \
29
+ _(Int, kDynamicIntTypeBit, 1) \
30
+ _(Float, kDynamicFloatTypeBit, 1) \
31
+ _(Complex, kDynamicComplexTypeBit, 1) \
32
+ _(Number, \
33
+ (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \
34
+ 1) \
35
+ _(String, DYNAMIC_TYPE_BIT(6), 1) \
36
+ _(List, kDynamicListTypeBit, 0) \
37
+ _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \
38
+ _(Dict, DYNAMIC_TYPE_BIT(9), 0) \
39
+ _(Class, kDynamicClassTypeBit, 0) \
40
+ _(Optional, \
41
+ (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \
42
+ 0) \
43
+ _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \
44
+ _(AnyTuple, \
45
+ (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \
46
+ 1) \
47
+ _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \
48
+ _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \
49
+ _(Capsule, DYNAMIC_TYPE_BIT(14), 1) \
50
+ _(Generator, DYNAMIC_TYPE_BIT(15), 1) \
51
+ _(Storage, DYNAMIC_TYPE_BIT(16), 1) \
52
+ _(Var, DYNAMIC_TYPE_BIT(17), 0) \
53
+ _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \
54
+ _(QScheme, DYNAMIC_TYPE_BIT(18), 1) \
55
+ _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \
56
+ _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \
57
+ _(RRef, DYNAMIC_TYPE_BIT(21), 0) \
58
+ _(Future, DYNAMIC_TYPE_BIT(22), 0) \
59
+ _(Await, DYNAMIC_TYPE_BIT(23), 0) \
60
+ _(Any, 0xffffffff, 1)
61
+
62
+ #define FORALL_DYNAMIC_TYPES_FAKE(_) \
63
+ _(ScalarType, kDynamicIntTypeBit, 1) \
64
+ _(Layout, kDynamicIntTypeBit, 1) \
65
+ _(SymInt, kDynamicIntTypeBit, 1) \
66
+ _(MemoryFormat, kDynamicIntTypeBit, 1)
67
+
68
+ #define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type;
69
+ FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE)
70
+ FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE)
71
+ #undef FORWARD_DECL_TYPE
72
+
73
+ class DynamicType;
74
+ using DynamicTypePtr = std::shared_ptr<DynamicType>;
75
+
76
+ /**
77
+ * DynamicType is designed as a low dependency type system for TorchScript. The
78
+ * existing JIT types are used for both compilation and runtime, which makes
79
+ * sense for server contexts because we often compile and run the model in
80
+ * the same process, however this doesn't hold for mobile devices where we
81
+ * always compiles a model ahead of time, therefore there will be dependencies
82
+ * which are not needed, but built with mobile runtime causing binary size
83
+ * bloat, by design. Every basic type like Int, Bool or String will bring their
84
+ * vtable, typeinfo, constructor, destructor and even more data from their
85
+ * specializations for STL types to the binary causing a long tail bloat.
86
+ *
87
+ * The core problem is about the complexity to implement and maintain a single
88
+ * type system for both analysis and execution purposes. Although they should
89
+ * have the exactly same semantics, in practice implement a unified abstraction
90
+ * adds conceptual and representational overhead for both sides of the world.
91
+ *
92
+ * To address the issues, DynamicType implements a minimal subset of JIT types
93
+ * and uses a generic algorithm to test all subtyping relations. To achieve
94
+ * this, we assign each dynamic type a single integer tag to represent its
95
+ * semantics. More specifically, a dynamic type is defined as a set of "control
96
+ * bits" and "data bits", where control bits describe the special behavior when
97
+ * testing a type and data bits map to identity of each nominal type. We use bit
98
+ * operations to perform all the tests.
99
+ *
100
+ * For example, a "covariant bit" is a control bit used to describe if a type
101
+ * is covariant, right now the most used one is tuple type, and in addition to
102
+ * the control bit, tuple type's data bit is the 8th bit from the LSB. Control
103
+ * bits start from MSB and data bits start from LSB.
104
+ *
105
+ * If two types are equal, then they are subtype of each other, also if the bits
106
+ * from one type tag is subset of the other tag, it automatically becomes a
107
+ * subtype of the other. This simplifies the subtyping logic a lot, and over the
108
+ * long term it is possible to adopt this scheme on the server side as well.
109
+ * Special cases can be added but they generally should not take too much code
110
+ * size.
111
+ *
112
+ * DynamicType may or may not inherit from c10::Type because it's not the core
113
+ * requirement of DynamicType to interface with existing JIT types, but we might
114
+ * want to inherit from c10::Type to reduce the migration cost.
115
+ */
116
+ class DynamicType : public SharedType {
117
+ using ClassTypePtr = std::shared_ptr<const c10::ClassType>;
118
+
119
+ /**
120
+ * A implementation detail to support NamedTuple.
121
+ */
122
+ struct LabeledDynamicType {
123
+ c10::optional<std::string> label;
124
+ DynamicTypePtr ty;
125
+ explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {}
126
+
127
+ bool equals(const LabeledDynamicType& other) const;
128
+ bool isSubtypeOf(const LabeledDynamicType& other) const;
129
+ };
130
+
131
+ public:
132
+ // TODO Change Ptr to DynamicTypePtr when all migrations are done.
133
+ using Ptr = TypePtr;
134
+ using ElementType = DynamicType;
135
+ ~DynamicType() override;
136
+
137
+ struct Arguments {
138
+ Arguments() = default;
139
+ Arguments(c10::ArrayRef<TypePtr>);
140
+ Arguments(const std::vector<c10::string_view>&, c10::ArrayRef<TypePtr>);
141
+ std::vector<LabeledDynamicType> elems;
142
+ };
143
+
144
+ enum class Tag : DynamicTypeBits {
145
+ #define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL,
146
+ FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM)
147
+ FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM)
148
+ #undef DYNAMIC_TYPE_ITEM
149
+ };
150
+
151
+ bool equals(const Type& rhs) const override;
152
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
153
+ std::string str() const override;
154
+ static const TypeKind Kind = TypeKind::DynamicType;
155
+ static TORCH_API DynamicTypePtr create(Type& ty);
156
+
157
+ explicit DynamicType(Tag, Arguments);
158
+ explicit DynamicType(Tag, c10::string_view, Arguments);
159
+
160
+ TypePtr containedType(size_t) const override;
161
+ size_t containedTypeSize() const override;
162
+ Tag tag() const {
163
+ return tag_;
164
+ }
165
+ const c10::optional<std::string>& name() const {
166
+ return name_;
167
+ }
168
+ const Arguments& arguments() const {
169
+ return arguments_;
170
+ }
171
+ TORCH_API TypeKind dynamicKind() const;
172
+
173
+ // Should be used only on the server side to restore static type information.
174
+ #ifndef C10_MOBILE
175
+ TORCH_API
176
+ #endif
177
+ TypePtr fallback() const;
178
+
179
+ private:
180
+ bool symmetric() const override {
181
+ return false;
182
+ }
183
+ friend struct Type;
184
+ static std::shared_ptr<const DynamicType> create(const Type& ty);
185
+ DynamicType(const Type& other);
186
+ bool equals(const DynamicType& other) const;
187
+
188
+ template <typename F>
189
+ bool compareArguments(const DynamicType& other, F&& f) const {
190
+ if (arguments_.elems.size() != other.arguments_.elems.size()) {
191
+ return false;
192
+ }
193
+ for (size_t i = 0; i < arguments_.elems.size(); i++) {
194
+ if (!f(arguments_.elems[i], other.arguments_.elems[i])) {
195
+ return false;
196
+ }
197
+ }
198
+ return true;
199
+ }
200
+
201
+ Tag tag_;
202
+ c10::optional<std::string> name_;
203
+ union {
204
+ Arguments arguments_;
205
+ ClassTypePtr class_;
206
+ };
207
+ };
208
+
209
+ template <typename T>
210
+ struct DynamicTypeTrait {
211
+ C10_NOINLINE static auto tagValue() {
212
+ TORCH_CHECK(false);
213
+ return DynamicType::Tag::Any;
214
+ }
215
+ };
216
+
217
+ namespace detail {
218
+ C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag);
219
+ }
220
+
221
+ #define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \
222
+ template <> \
223
+ struct TORCH_API DynamicTypeTrait<NAME##Type> { \
224
+ C10_ERASE static auto tagValue() { \
225
+ return DynamicType::Tag::NAME; \
226
+ } \
227
+ static constexpr bool isBaseType = IS_BASE_TYPE; \
228
+ template <typename T = const DynamicTypePtr&> \
229
+ static std::enable_if_t<isBaseType, T> getBaseType() { \
230
+ static auto type = detail::makeBaseType(tagValue()); \
231
+ return type; \
232
+ } \
233
+ }; // namespace c10
234
+ FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE)
235
+ FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE)
236
+ #undef DYNAMIC_TYPE_TAG_VALUE
237
+
238
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings_class.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cstdint>
2
+ #include <cstring>
3
+ #include <mutex>
4
+ #include <string>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+ #include <ATen/core/symbol.h>
8
+ #include <c10/util/Exception.h>
9
+
10
+ namespace c10 {
11
+
12
+ struct TORCH_API InternedStrings {
13
+ InternedStrings();
14
+ Symbol symbol(const std::string& s);
15
+ std::pair<const char*, const char*> string(Symbol sym);
16
+ Symbol ns(Symbol sym);
17
+
18
+ private:
19
+ // prereq - holding mutex_
20
+ Symbol _symbol(const std::string& s);
21
+ std::pair<const char*, const char*> customString(Symbol sym);
22
+ std::unordered_map<std::string, Symbol> string_to_sym_;
23
+
24
+ struct SymbolInfo {
25
+ Symbol ns;
26
+ std::string qual_name;
27
+ std::string unqual_name;
28
+ };
29
+ std::vector<SymbolInfo> sym_to_info_;
30
+
31
+ std::mutex mutex_;
32
+ };
33
+
34
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/TensorUtils.h>
5
+ #include <ATen/core/List.h>
6
+ #include <c10/core/TensorOptions.h>
7
+
8
+ /*
9
+ * [Note: hacky wrapper removal for optional tensor]
10
+ *
11
+ * The kernel implementation takes an optional tensor marked in the schema as
12
+ * Tensor? but the C++ function takes Tensor instead of the optional<Tensor>
13
+ * expected by the dispatcher.
14
+ *
15
+ * To remove the hacky wrapper, the C++ function is changed to take
16
+ * optional<Tensor> and unwrap the Tensor value at the beginning of
17
+ * the function, e.g.:
18
+ * > c10::MaybeOwned<Tensor> weight_maybe_owned =
19
+ * > at::borrow_from_optional_tensor(weight_opt);
20
+ * > const Tensor& weight = *weight_maybe_owned;
21
+ *
22
+ * We may want to make the kernel handle optional directly without
23
+ * going through the creation of a default-constructed Tensor in
24
+ * at::borrow_from_optional_tensor.
25
+ */
26
+
27
+ /*
28
+ * [Note: hacky wrapper removal for TensorOptions]
29
+ *
30
+ * The kernel implementation takes a TensorOptions argument but the dispatcher
31
+ * expects separate arguments for dtype, layout, device, pin_memory.
32
+ *
33
+ * To remove the hacky wrapper, the kernel implementation is changed to take
34
+ * the 4 arguments (dtype, layout, device, pin_memory), and assemble the
35
+ * TensorOptions value at the beginning of the function, e.g.:
36
+ * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
37
+ * > .device(device).pinned_memory(pin_memory);
38
+ *
39
+ * We may want make the kernel handle these parameters directly without going
40
+ * through the creation of a TensorOptions value.
41
+ */
42
+
43
+ namespace c10 {
44
+ namespace impl {
45
+
46
+ TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
47
+
48
+ inline void check_and_update_common_device(optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
49
+ // TODO: Remove this once the following issue is addressed:
50
+ // https://github.com/pytorch/pytorch/issues/57380
51
+ if (!tensor.defined()) {
52
+ return;
53
+ }
54
+
55
+ if (!common_device.has_value()) {
56
+ common_device = tensor.device();
57
+ return;
58
+ }
59
+
60
+ if (C10_UNLIKELY(common_device != tensor.device())) {
61
+ common_device_check_failure(*common_device, tensor, methodName, argName);
62
+ }
63
+ }
64
+
65
+ inline void check_and_update_common_device(optional<Device>& common_device, const optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
66
+ if (tensor.has_value()) {
67
+ check_and_update_common_device(common_device, tensor.value(), methodName, argName);
68
+ }
69
+ }
70
+
71
+ inline void check_and_update_common_device(optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
72
+ for (const auto& tensor : tensors) {
73
+ check_and_update_common_device(common_device, tensor, methodName, argName);
74
+ }
75
+ }
76
+
77
+ inline void check_and_update_common_device(optional<Device>& common_device, const List<optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
78
+ for (const auto& tensor : tensors) {
79
+ check_and_update_common_device(common_device, tensor, methodName, argName);
80
+ }
81
+ }
82
+ } // namespace impl
83
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * This file contains functionality to take a C++ function and infer its
5
+ * c10::FunctionSchema.
6
+ */
7
+
8
+ #include <ATen/core/function_schema.h>
9
+ #include <c10/util/C++17.h>
10
+ #include <c10/util/Metaprogramming.h>
11
+
12
+ namespace c10 {
13
+ namespace detail {
14
+
15
+ namespace infer_schema {
16
+
17
+ /// The templated inference code creates `ArgumentDef` instead of `Argument`,
18
+ /// because that can be constructed at compile time and has a much smaller
19
+ /// binary size than having calls to `Argument` constructors in the template.
20
+ /// Creating `Argument` objects from `ArgumentDef` can then be done at
21
+ /// runtime in a non-templated way.
22
+ struct ArgumentDef final {
23
+ using GetTypeFn = TypePtr();
24
+ GetTypeFn* getTypeFn;
25
+ GetTypeFn* getFakeTypeFn;
26
+ constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
27
+ explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
28
+ };
29
+
30
+ template<bool V>
31
+ struct bool_t {};
32
+ template<> struct bool_t<true> : std::true_type {};
33
+ template<> struct bool_t<false> : std::false_type {};
34
+
35
+ /// Checks the static C++ types `Types` for correctness to catch common error cases.
36
+ template <class... Types>
37
+ constexpr int checkStaticTypes() {
38
+ // Give nice error messages for some of the common error cases.
39
+ // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
40
+ static_assert(guts::conjunction<
41
+ bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
42
+ >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
43
+ static_assert(guts::conjunction<
44
+ bool_t<!std::is_same<Types, float>::value>...
45
+ >::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
46
+ return 0;
47
+ }
48
+
49
+ template <typename... Ts, size_t... Is>
50
+ constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
51
+ return (
52
+ // Check types for common errors
53
+ checkStaticTypes<Ts...>(),
54
+
55
+ // Create the return value
56
+ std::array<ArgumentDef, sizeof...(Ts)>{
57
+ ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
58
+ );
59
+ }
60
+
61
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
62
+ /// as template arguments.
63
+ template<class ParameterTypes> struct createArguments final {};
64
+ template<class... ParameterTypes>
65
+ struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
66
+ static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
67
+ return createArgumentVectorFromTypes<ParameterTypes...>(
68
+ std::make_index_sequence<sizeof...(ParameterTypes)>()
69
+ );
70
+ }
71
+ };
72
+
73
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
74
+ /// as a tuple (i.e. in the way c10 kernels return values).
75
+ /// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
76
+ /// It can be an empty tuple<>, or void for kernels that don't return anything.
77
+ /// It can be a single type A (i.e. no tuple) for the case where a kernel just
78
+ /// returns one value.
79
+ template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
80
+
81
+ template<class... ReturnTypes>
82
+ struct createReturns<std::tuple<ReturnTypes...>, void> final {
83
+ static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
84
+ return createArgumentVectorFromTypes<ReturnTypes...>(
85
+ std::make_index_sequence<sizeof...(ReturnTypes)>()
86
+ );
87
+ }
88
+ };
89
+
90
+ template<class ReturnType>
91
+ struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
92
+ static constexpr std::array<ArgumentDef, 1> call() {
93
+ return createReturns<std::tuple<ReturnType>>::call();
94
+ }
95
+ };
96
+
97
+ template<>
98
+ struct createReturns<void, void> final {
99
+ static constexpr std::array<ArgumentDef, 0> call() {
100
+ return createReturns<std::tuple<>>::call();
101
+ }
102
+ };
103
+
104
+ template <typename ReturnType>
105
+ struct createSingleReturn {
106
+ static constexpr std::array<ArgumentDef, 1> call() {
107
+ return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
108
+ }
109
+ };
110
+
111
+ TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
112
+ TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
113
+
114
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
115
+ /// function. Flattens std::tuple returns into multiple return types
116
+ template <typename FunctionTraits>
117
+ FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
118
+ using ReturnType = typename FunctionTraits::return_type;
119
+ using ParameterTypes = typename FunctionTraits::parameter_types;
120
+
121
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
122
+ // The only code executed at runtime here is the one that creates a std::vector
123
+ // of the arguments/returns from the std::array.
124
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
125
+ constexpr auto returns = createReturns<ReturnType>::call();
126
+
127
+ return make_function_schema(arguments, returns);
128
+ }
129
+
130
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
131
+ /// function. Preserves std::tuple returns as a Tuple return type
132
+ template <typename FunctionTraits>
133
+ FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
134
+ using ReturnType = typename FunctionTraits::return_type;
135
+ using ParameterTypes = typename FunctionTraits::parameter_types;
136
+
137
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
138
+ // The only code executed at runtime here is the one that creates a std::vector
139
+ // of the arguments/returns from the std::array.
140
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
141
+ constexpr auto returns = createSingleReturn<ReturnType>::call();
142
+
143
+ return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
144
+ }
145
+
146
+ }
147
+ }
148
+
149
+ template<class FuncType>
150
+ FunctionSchema inferFunctionSchemaFlattenedReturns() {
151
+ return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
152
+ }
153
+
154
+ template<class FuncType>
155
+ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
156
+ return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
157
+ }
158
+
159
+ TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
160
+
161
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
4
+ #ifdef TEMPLATE_SELECTIVE_BUILD
5
+ #include <ATen/selected_mobile_ops.h>
6
+ #endif
7
+
8
+ /**
9
+ * This header implements functionality to build PyTorch with only a certain
10
+ * set of operators (+ dependencies) included.
11
+ *
12
+ * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
13
+ * two ops will be included in your build. The allowlist records operators
14
+ * only, no overloads; if you include aten::add, all overloads of aten::add
15
+ * will be included.
16
+ *
17
+ * Internally, this is done by removing the operator registration calls
18
+ * using compile time programming, and the linker will then prune all
19
+ * operator functions that weren't registered.
20
+ * See Note [Selective build] for more details
21
+ *
22
+ * WARNING: The allowlist mechanism doesn't work for all ways you could go about
23
+ * registering an operator. If the dispatch key / operator name is not
24
+ * sufficiently obvious at compile time, then the allowlisting mechanism
25
+ * will fail (and the operator will be included in the binary anyway).
26
+ */
27
+
28
+ #include <c10/util/string_view.h>
29
+ #include <c10/core/DispatchKey.h>
30
+ #include <c10/macros/Macros.h>
31
+
32
+
33
+ #if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
34
+ #include <ATen/record_function.h>
35
+ #endif
36
+
37
+ namespace c10 {
38
+
39
+ namespace impl {
40
+
41
+ constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
42
+
43
+ /**
44
+ * In selective build mode returns true/false depending on whether a build
45
+ * feature is available or not.
46
+ *
47
+ * In instrumenting mode (tracing mode), always returns true, and doesn't
48
+ * trigger any side effects.
49
+ */
50
+ constexpr bool is_build_feature_available(const char* name) {
51
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
52
+ // Selective Build mode.
53
+ #if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
54
+ (void)name;
55
+ return true;
56
+ #else
57
+ return allowlist_contains(
58
+ C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
59
+ name);
60
+ #endif
61
+
62
+ #else
63
+ // Instrumenting mode.
64
+ (void)name;
65
+ return true;
66
+ #endif
67
+ }
68
+
69
+ [[noreturn]] void build_feature_required_feature_not_available(const char* feature);
70
+
71
+ /**
72
+ * Use BUILD_FEATURE_REQUIRED macro in user-code.
73
+ *
74
+ * In selective build mode becomes a no-op if the build feature passed
75
+ * in is available. If not available, throws an exception (c10::Error).
76
+ * The compiler is able to perform dead code elimination for code
77
+ * following this method if the build feature is not available.
78
+ *
79
+ * In instrumenting mode (tracing mode), registers (as a side effect)
80
+ * the presence of this specific build feature being triggered.
81
+ */
82
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
83
+
84
+ #if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
85
+ #define BUILD_FEATURE_REQUIRED(NAME) \
86
+ if (!c10::impl::is_build_feature_available(NAME)) { \
87
+ ::c10::impl::build_feature_required_feature_not_available(NAME); \
88
+ }
89
+ #else // Everything trivially selected
90
+ #define BUILD_FEATURE_REQUIRED(NAME)
91
+
92
+ #endif
93
+
94
+ #else // trace mode
95
+ #define BUILD_FEATURE_REQUIRED(NAME) \
96
+ RECORD_FUNCTION_WITH_SCOPE( \
97
+ at::RecordScope::BUILD_FEATURE, \
98
+ std::string(NAME), \
99
+ {});
100
+ #endif
101
+
102
+ // Use this macro, and not is_build_feature_available
103
+ #define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
104
+
105
+ // returns true iff allowlist contains item
106
+ // allowlist_contains("a;bc;d", "bc") == true
107
+ constexpr bool allowlist_contains(string_view allowlist, string_view item) {
108
+ //Choose a really big value for next so that if something goes wrong
109
+ //this code will blow up in a hopefully detectable way.
110
+ size_t next = std::numeric_limits<size_t>::max();
111
+ for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
112
+ next = allowlist.find(';', cur);
113
+ if (next != string_view::npos) {
114
+ if (allowlist.substr(cur, next - cur).compare(item) == 0) {
115
+ return true;
116
+ }
117
+ next++;
118
+ } else {
119
+ if (allowlist.substr(cur).compare(item) == 0) {
120
+ return true;
121
+ }
122
+ break;
123
+ }
124
+ }
125
+ return false;
126
+ }
127
+
128
+ // Returns true iff the given op name is on the allowlist
129
+ // and should be registered
130
+ constexpr bool op_allowlist_check(string_view op_name) {
131
+ assert(op_name.find("::") != string_view::npos);
132
+ // Use assert() instead of throw() due to a gcc bug. See:
133
+ // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
134
+ // https://github.com/fmtlib/fmt/issues/682
135
+ assert(op_name.find("(") == string_view::npos);
136
+ #if !defined(TORCH_OPERATOR_WHITELIST)
137
+ // If the TORCH_OPERATOR_WHITELIST parameter is not defined,
138
+ // all ops are to be registered
139
+ return true;
140
+ #else
141
+ return allowlist_contains(
142
+ C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
143
+ // This function is majorly used for mobile selective build with
144
+ // root operators, where the overload is included in the allowlist.
145
+ op_name);
146
+ // // Strip overload name (as allowlist doesn't contain overloads)
147
+ // // Another function based on this may be added when there's usage
148
+ // // on op names without overload.
149
+ // OperatorNameView::parse(op_name).name);
150
+ #endif
151
+ }
152
+
153
+ // Returns true iff the given schema string is on the allowlist
154
+ // and should be registered
155
+ constexpr bool schema_allowlist_check(string_view schema) {
156
+ #if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
157
+ return true;
158
+ #else
159
+ return op_allowlist_check(schema.substr(0, schema.find("(")));
160
+ #endif
161
+ }
162
+
163
+ // Returns true iff the given custom class name is on the allowlist
164
+ // and should be registered
165
+ constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
166
+ #if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
167
+ // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
168
+ // all custom classes are to be registered
169
+ (void)custom_class_name;
170
+ return true;
171
+ #else
172
+ return allowlist_contains(
173
+ C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
174
+ custom_class_name);
175
+ #endif
176
+ }
177
+
178
+ // schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
179
+ // Add this API to pass arbitrary allowlist.
180
+ constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
181
+ return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
182
+ }
183
+
184
+ // Returns true iff the given dispatch key is on the allowlist
185
+ // and should be registered. When we turn this on, the list of valid
186
+ // mobile dispatch keys is hard coded (but you need to make sure
187
+ // that you have the correct set of dispatch keys for this).
188
+ constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
189
+ #ifdef C10_MOBILE
190
+ return true;
191
+ // Disabled for now: to be enabled later!
192
+ // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
193
+ #else
194
+ return true;
195
+ #endif
196
+ }
197
+
198
+ } // namespace impl
199
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * Include this file if you want to register operators. It includes all
5
+ * functionality needed to do so for you.
6
+ */
7
+
8
+ #include <c10/core/DispatchKey.h>
9
+ #include <c10/core/DispatchKeySet.h>
10
+ #include <c10/core/CompileTimeFunctionPointer.h>
11
+ #include <ATen/core/boxing/KernelFunction.h>
12
+ #include <ATen/core/dispatch/CppSignature.h>
13
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
14
+ #include <ATen/core/op_registration/infer_schema.h>
15
+ #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
16
+ #include <torch/csrc/jit/frontend/function_schema_parser.h>
17
+ #endif
18
+ #include <ATen/core/ATenOpList.h>
19
+
20
+ namespace c10 {
21
+
22
+ namespace detail {
23
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
24
+ // We do this because every argument in a function schema is expected to be convertable
25
+ // to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
26
+ // See Note [Plumbing Keys Through The Dispatcher]
27
+ template<class KernelFunctor>
28
+ std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
29
+ using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
30
+ return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
31
+ }
32
+ }
33
+
34
+ /**
35
+ * An instance of this class handles the registration for one or more operators.
36
+ * Make sure you keep the RegisterOperators instance around since it will
37
+ * deregister the operator it's responsible for in its destructor.
38
+ *
39
+ * Example:
40
+ *
41
+ * > namespace {
42
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
43
+ * > public:
44
+ * > Tensor operator()(Tensor a, Tensor b) {...}
45
+ * > };
46
+ * > }
47
+ * >
48
+ * > static auto registry = c10::RegisterOperators()
49
+ * > .op(c10::RegisterOperators::options()
50
+ * > .schema("my_op")
51
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
52
+ */
53
+ class TORCH_API RegisterOperators final {
54
+ public:
55
+ RegisterOperators() = default;
56
+ ~RegisterOperators() = default;
57
+
58
+ RegisterOperators(const RegisterOperators&) = delete;
59
+ RegisterOperators& operator=(const RegisterOperators&) = delete;
60
+ RegisterOperators(RegisterOperators&&) noexcept = default;
61
+ RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
62
+
63
+ class TORCH_API Options final {
64
+ public:
65
+ Options(const Options&) = delete;
66
+ Options(Options&&) noexcept = delete;
67
+ Options& operator=(const Options&) = delete;
68
+ Options& operator=(Options&&) noexcept = delete;
69
+
70
+ // internal-only for registering stack based kernels
71
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
72
+ Options&& kernel(DispatchKey dispatch_key) && {
73
+ return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
74
+ }
75
+
76
+ // internal-only for registering stack based catch-all kernels
77
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
78
+ Options&& catchAllKernel() && {
79
+ return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
80
+ }
81
+
82
+ // internal only for registering caffe2 ops
83
+ Options&& schema(FunctionSchema&& schema) {
84
+ TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
85
+ schemaOrName_ = FunctionSchema(std::move(schema));
86
+ return std::move(*this);
87
+ }
88
+
89
+ /**
90
+ * Use this to specify the schema for an operator. You can also specify
91
+ * the operator name only to have the function signature part of the
92
+ * schema be inferred from the kernel function.
93
+ *
94
+ * Example:
95
+ *
96
+ * > // Infer function signature from my_kernel_cpu
97
+ * > static auto registry = c10::RegisterOperators()
98
+ * > .op(c10::RegisterOperators::options()
99
+ * > .schema("my_op")
100
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
101
+ * >
102
+ * >
103
+ * > // Explicitly specify full schema
104
+ * > static auto registry = c10::RegisterOperators()
105
+ * > .op(c10::RegisterOperators::options()
106
+ * > .schema("my_op(Tensor a) -> Tensor")
107
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
108
+ */
109
+ Options&& schema(const std::string& schemaOrName) {
110
+ TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
111
+
112
+ #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
113
+ throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
114
+ #else
115
+ schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
116
+ #endif
117
+
118
+ return std::move(*this);
119
+ }
120
+
121
+ /**
122
+ * Use this to register an operator whose kernel is implemented as a functor.
123
+ * The kernel is only called for inputs matching the given dispatch key.
124
+ * You can register multiple kernels for different dispatch keys.
125
+ *
126
+ * Example:
127
+ *
128
+ * > namespace {
129
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
130
+ * > public:
131
+ * > Tensor operator()(Tensor a, Tensor b) {...}
132
+ * > };
133
+ * > }
134
+ * >
135
+ * > static auto registry = c10::RegisterOperators()
136
+ * > .op(c10::RegisterOperators::options()
137
+ * > .schema("my_op")
138
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
139
+ *
140
+ * The functor constructor can take arguments to configure the kernel.
141
+ * The arguments are defined in the kernel registration.
142
+ * Example:
143
+ *
144
+ * > namespace {
145
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
146
+ * > public:
147
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
148
+ * > : ... {...}
149
+ * >
150
+ * > Tensor operator()(Tensor a, Tensor b) {...}
151
+ * > };
152
+ * > }
153
+ * >
154
+ * > static auto registry = c10::RegisterOperators()
155
+ * > .op(c10::RegisterOperators::options()
156
+ * > .schema("my_op")
157
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
158
+ */
159
+ template<class KernelFunctor, class... ConstructorParameters>
160
+ // enable_if: only enable it if KernelFunctor is actually a functor
161
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
162
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
163
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
164
+
165
+ return std::move(*this).kernel(
166
+ dispatch_key,
167
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
168
+ impl::CppSignature::make<KernelFunctor>(),
169
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
170
+ );
171
+ }
172
+
173
+ /**
174
+ * Use this to register an operator whose kernel is implemented as a functor.
175
+ * The kernel is a catch-all kernel, meaning it's called independent from
176
+ * the input. Dispatch is disabled for this operator.
177
+ *
178
+ * Example:
179
+ *
180
+ * > namespace {
181
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
182
+ * > public:
183
+ * > Tensor operator()(Tensor a, Tensor b) {...}
184
+ * > };
185
+ * > }
186
+ * >
187
+ * > static auto registry = c10::RegisterOperators()
188
+ * > .op(c10::RegisterOperators::options()
189
+ * > .schema("my_op")
190
+ * > .catchAllKernel<my_kernel_cpu>());
191
+ *
192
+ * The functor constructor can take arguments to configure the kernel.
193
+ * The arguments are defined in the kernel registration.
194
+ * Example:
195
+ *
196
+ * > namespace {
197
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
198
+ * > public:
199
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
200
+ * > : ... {...}
201
+ * >
202
+ * > Tensor operator()(Tensor a, Tensor b) {...}
203
+ * > };
204
+ * > }
205
+ * >
206
+ * > static auto registry = c10::RegisterOperators()
207
+ * > .op(c10::RegisterOperators::options()
208
+ * > .schema("my_op")
209
+ * > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
210
+ */
211
+ template<class KernelFunctor, class... ConstructorParameters>
212
+ // enable_if: only enable it if KernelFunctor is actually a functor
213
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
214
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
215
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
216
+
217
+ return std::move(*this).kernel(
218
+ c10::nullopt,
219
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
220
+ impl::CppSignature::make<KernelFunctor>(),
221
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
222
+ );
223
+ }
224
+
225
+ /**
226
+ * Use this to register an operator whose kernel is implemented by a function.
227
+ * The kernel is only called for inputs matching the given dispatch key.
228
+ * You can register multiple kernels for different dispatch keys.
229
+ *
230
+ * Example:
231
+ *
232
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
233
+ * >
234
+ * > static auto registry = c10::RegisterOperators()
235
+ * > .op(c10::RegisterOperators::options()
236
+ * > .schema("my_op")
237
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
238
+ */
239
+ template<class FuncType, FuncType* kernel_func>
240
+ // enable_if: only enable it if FuncType is actually a function
241
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
242
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
243
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
244
+
245
+ return std::move(*this).kernel(
246
+ dispatch_key,
247
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
248
+ impl::CppSignature::make<FuncType>(),
249
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
250
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
251
+ );
252
+ }
253
+
254
+ /**
255
+ * Use this to register an operator whose kernel is implemented by a function.
256
+ * The kernel is a catch-all kernel, meaning it's called independent from
257
+ * the input. Dispatch is disabled for this operator.
258
+ *
259
+ * Example:
260
+ *
261
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
262
+ * >
263
+ * > static auto registry = c10::RegisterOperators()
264
+ * > .op(c10::RegisterOperators::options()
265
+ * > .schema("my_op")
266
+ * > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
267
+ */
268
+ template<class FuncType, FuncType* kernel_func>
269
+ // enable_if: only enable it if FuncType is actually a function
270
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
271
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
272
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
273
+
274
+ return std::move(*this).kernel(
275
+ c10::nullopt,
276
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
277
+ impl::CppSignature::make<FuncType>(),
278
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
279
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
280
+ );
281
+ }
282
+
283
+ template<class FuncType>
284
+ // enable_if: only enable it if FuncType is actually a function
285
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
286
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
287
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
288
+
289
+ return std::move(*this).kernel(
290
+ dispatch_key,
291
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
292
+ impl::CppSignature::make<FuncType>(),
293
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
294
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
295
+ );
296
+ }
297
+
298
+ template<class FuncType>
299
+ // enable_if: only enable it if FuncType is actually a function
300
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
301
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
302
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
303
+
304
+ return std::move(*this).kernel(
305
+ c10::nullopt,
306
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
307
+ impl::CppSignature::make<FuncType>(),
308
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
309
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
310
+ );
311
+ }
312
+
313
+ /**
314
+ * Use this to register an operator whose kernel is implemented as a lambda.
315
+ * The kernel is only called for inputs matching the given dispatch key.
316
+ * You can register multiple kernels for different dispatch keys.
317
+ *
318
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
319
+ * needs to store some configuration parameters, write the kernel as a
320
+ * functor instead.
321
+ *
322
+ * Example:
323
+ *
324
+ * > static auto registry = c10::RegisterOperators()
325
+ * > .op(c10::RegisterOperators::options()
326
+ * > .schema("my_op")
327
+ * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
328
+ */
329
+ template<class Lambda>
330
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
331
+ std::enable_if_t<
332
+ guts::is_functor<std::decay_t<Lambda>>::value
333
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
334
+ Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
335
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
336
+
337
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
338
+ // behavior would be nonobvious. A functor kernel with cache gets a new instance of
339
+ // its cache each time the kernel is looked up from the dispatch table.
340
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
341
+ // So, instead of making users having to think about it (including the thread-safety
342
+ // issues this causes), let's just forbid stateful lambdas altogether.
343
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
344
+
345
+ return std::move(*this).kernel(
346
+ dispatch_key,
347
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
348
+ impl::CppSignature::make<Lambda>(),
349
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
350
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
351
+ );
352
+ }
353
+
354
+ /**
355
+ * Use this to register an operator whose kernel is implemented as a lambda.
356
+ * The kernel is a catch-all kernel, meaning it's called independent from
357
+ * the input. Dispatch is disabled for this operator.
358
+ *
359
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
360
+ * needs to store some configuration parameters, write the kernel as a
361
+ * functor instead.
362
+ *
363
+ * Example:
364
+ *
365
+ * > static auto registry = c10::RegisterOperators()
366
+ * > .op(c10::RegisterOperators::options()
367
+ * > .schema("my_op")
368
+ * > .catchAllKernel([] (Tensor a) -> Tensor {...}));
369
+ */
370
+ template<class Lambda>
371
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
372
+ std::enable_if_t<
373
+ guts::is_functor<std::decay_t<Lambda>>::value
374
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
375
+ Options&&> catchAllKernel(Lambda&& lambda) && {
376
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
377
+
378
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
379
+ // behavior would be nonobvious.
380
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
381
+ // This would be a likely source for unexpected race conditions, so we forbid it.
382
+ // If a kernel really needs global state, they can just have regular global state
383
+ // in their .cpp file next to the kernel lambda.
384
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
385
+
386
+ return std::move(*this).kernel(
387
+ c10::nullopt,
388
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
389
+ impl::CppSignature::make<Lambda>(),
390
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
391
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
392
+ );
393
+ }
394
+
395
+ Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
396
+ TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
397
+ aliasAnalysisKind_ = aliasAnalysisKind;
398
+ return std::move(*this);
399
+ }
400
+
401
+ private:
402
+ Options&& kernel(c10::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
403
+ KernelRegistrationConfig config;
404
+ config.dispatch_key = dispatch_key;
405
+ config.func = std::move(func);
406
+ config.cpp_signature = cpp_signature;
407
+ config.inferred_function_schema = std::move(inferred_function_schema);
408
+ kernels.push_back(std::move(config));
409
+ return std::move(*this);
410
+ }
411
+
412
+ Options()
413
+ : schemaOrName_(c10::nullopt)
414
+ , kernels()
415
+ , aliasAnalysisKind_(c10::nullopt)
416
+ {}
417
+
418
+ // KernelRegistrationConfig accumulates all information from the config
419
+ // parameters passed to a RegisterOperators::op() call into one object.
420
+ struct KernelRegistrationConfig final {
421
+ KernelRegistrationConfig()
422
+ : dispatch_key(c10::nullopt)
423
+ , func()
424
+ , cpp_signature(c10::nullopt)
425
+ , inferred_function_schema(nullptr)
426
+ {}
427
+
428
+ c10::optional<DispatchKey> dispatch_key;
429
+ KernelFunction func;
430
+ c10::optional<impl::CppSignature> cpp_signature;
431
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
432
+ };
433
+
434
+ c10::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
435
+
436
+ std::vector<KernelRegistrationConfig> kernels;
437
+ optional<AliasAnalysisKind> aliasAnalysisKind_;
438
+ friend class RegisterOperators;
439
+ friend class Library;
440
+ };
441
+
442
+ /**
443
+ * Call this to get an instance of registration options, which
444
+ * can be passed to a call to RegisterOperators::op() to specify
445
+ * these options for the operator registration.
446
+ * See class doc comment for examples.
447
+ */
448
+ static Options options() {
449
+ return {};
450
+ }
451
+
452
+ /**
453
+ * Call this to register an operator. See class doc comment for examples.
454
+ */
455
+ RegisterOperators&& op(Options&& options) && {
456
+ checkSchemaAndRegisterOp_(std::move(options));
457
+ return std::move(*this);
458
+ }
459
+
460
+ // Regular mutator version of the && version above
461
+ RegisterOperators& op(Options&& options) & {
462
+ checkSchemaAndRegisterOp_(std::move(options));
463
+ return *this;
464
+ }
465
+
466
+ /**
467
+ * This is a shorthand for RegisterOperators::op(Options) where you can
468
+ * specify the operator schema outside of the options parameter.
469
+ * See class doc comment for examples.
470
+ */
471
+ RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
472
+ return std::move(*this).op(std::move(options).schema(schemaOrName));
473
+ }
474
+
475
+ // internal only for registering caffe2 ops
476
+ RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
477
+ return std::move(*this).op(std::move(options).schema(std::move(schema)));
478
+ }
479
+
480
+ template<class FuncType>
481
+ explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
482
+ : RegisterOperators() {
483
+ std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
484
+ }
485
+
486
+ /**
487
+ * This API registers an operator based on a kernel function pointer.
488
+ *
489
+ * Given a kernel
490
+ *
491
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
492
+ *
493
+ * This API looks like:
494
+ *
495
+ * > static auto registry = c10::RegisterOperators()
496
+ * > .op("my_op", &my_kernel_cpu);
497
+ *
498
+ * If your kernel is small and the overhead of calling it matters,
499
+ * then this API might be the wrong choice since the following API
500
+ * has a slightly lower overhead for calling into the kernel:
501
+ *
502
+ * > static auto registry = c10::RegisterOperators()
503
+ * > .op("my_op", c10::RegisterOperators::options()
504
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
505
+ *
506
+ * Or, alternatively, write your kernel as a functor:
507
+ *
508
+ * > namespace {
509
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
510
+ * > public:
511
+ * > Tensor operator()(Tensor a, Tensor b) {...}
512
+ * > };
513
+ * > }
514
+ * >
515
+ * > static auto registry = c10::RegisterOperators()
516
+ * > .op("my_op", c10::RegisterOperators::options()
517
+ * > .kernel<my_kernel_cpu>());
518
+ */
519
+ template<class FuncType>
520
+ // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
521
+ std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
522
+ op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
523
+ constexpr bool AllowLegacyTypes = true;
524
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
525
+ c10::nullopt,
526
+ KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
527
+ impl::CppSignature::make<FuncType>(),
528
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
529
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
530
+ ));
531
+ }
532
+
533
+ /**
534
+ * This API registers an operator based on a kernel lambda.
535
+ *
536
+ * This API looks like:
537
+ *
538
+ * > static auto registry = c10::RegisterOperators()
539
+ * > .op("my_op", [] (Tensor a, Tensor b) {...});
540
+ *
541
+ * This is equivalent to:
542
+ *
543
+ * > static auto registry = c10::RegisterOperators()
544
+ * > .op("my_op", c10::RegisterOperators::options()
545
+ * > .catchAllKernel([] (Tensor a, Tensor b) {...}));
546
+ *
547
+ */
548
+ template<class Lambda>
549
+ // enable_if: only enable it if Lambda is actually a stateless lambda
550
+ std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
551
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
552
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
553
+
554
+ constexpr bool AllowLegacyTypes = true;
555
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
556
+ c10::nullopt,
557
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
558
+ impl::CppSignature::make<Lambda>(),
559
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
560
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
561
+ ));
562
+ }
563
+
564
+ template<class Lambda>
565
+ C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
566
+ // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
567
+ std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
568
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
569
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
570
+
571
+ constexpr bool AllowLegacyTypes = true;
572
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
573
+ c10::nullopt,
574
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
575
+ impl::CppSignature::make<Lambda>(),
576
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
577
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
578
+ ));
579
+ }
580
+
581
+ private:
582
+ void checkSchemaAndRegisterOp_(Options&& config);
583
+
584
+ static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
585
+ void checkNoDuplicateKernels_(const Options& options);
586
+ void registerOp_(Options&& options);
587
+
588
+ std::vector<RegistrationHandleRAII> registrars_;
589
+ };
590
+
591
+ } // namespace c10
592
+
593
+ namespace torch {
594
+ // Old-style API
595
+ using RegisterOperators = c10::RegisterOperators;
596
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cuda_runtime.h>
5
+ #include <cuda_fp16.h>
6
+
7
+ #include <c10/macros/Export.h>
8
+
9
+ // Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cuda/CUDAContext.h>
2
+
3
+ #include <cuda_runtime.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T>
11
+ __host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ namespace {
16
+
17
+ // Threads per block for our apply kernel
18
+ // FIXME: use occupancy calculator instead
19
+ constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
20
+ constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
21
+
22
+ template <int step = 1>
23
+ inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
24
+ if (curDevice == -1) return false;
25
+ uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
26
+ uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
27
+ uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
28
+ if (numBlocks > maxGridX)
29
+ numBlocks = maxGridX;
30
+ grid = dim3(numBlocks);
31
+ return true;
32
+ }
33
+
34
+ constexpr int getApplyBlocksPerSM() {
35
+ return AT_APPLY_BLOCKS_PER_SM;
36
+ }
37
+
38
+ constexpr int getApplyBlockSize() {
39
+ return AT_APPLY_THREADS_PER_BLOCK;
40
+ }
41
+
42
+ inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
43
+ return dim3(max_threads_per_block);
44
+ }
45
+
46
+ } // anonymous namespace
47
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ // Collection of direct PTX functions
5
+
6
+ namespace at::cuda {
7
+
8
+ template <typename T>
9
+ struct Bitfield {};
10
+
11
+ template <>
12
+ struct Bitfield<unsigned int> {
13
+ static __device__ __host__ __forceinline__
14
+ unsigned int getBitfield(unsigned int val, int pos, int len) {
15
+ #if !defined(__CUDA_ARCH__)
16
+ pos &= 0xff;
17
+ len &= 0xff;
18
+
19
+ unsigned int m = (1u << len) - 1u;
20
+ return (val >> pos) & m;
21
+ #else
22
+ unsigned int ret;
23
+ asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
24
+ return ret;
25
+ #endif
26
+ }
27
+
28
+ static __device__ __host__ __forceinline__
29
+ unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
30
+ #if !defined(__CUDA_ARCH__)
31
+ pos &= 0xff;
32
+ len &= 0xff;
33
+
34
+ unsigned int m = (1u << len) - 1u;
35
+ toInsert &= m;
36
+ toInsert <<= pos;
37
+ m <<= pos;
38
+
39
+ return (val & ~m) | toInsert;
40
+ #else
41
+ unsigned int ret;
42
+ asm("bfi.b32 %0, %1, %2, %3, %4;" :
43
+ "=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
44
+ return ret;
45
+ #endif
46
+ }
47
+ };
48
+
49
+ template <>
50
+ struct Bitfield<uint64_t> {
51
+ static __device__ __host__ __forceinline__
52
+ uint64_t getBitfield(uint64_t val, int pos, int len) {
53
+ #if !defined(__CUDA_ARCH__)
54
+ pos &= 0xff;
55
+ len &= 0xff;
56
+
57
+ uint64_t m = (1u << len) - 1u;
58
+ return (val >> pos) & m;
59
+ #else
60
+ uint64_t ret;
61
+ asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len));
62
+ return ret;
63
+ #endif
64
+ }
65
+
66
+ static __device__ __host__ __forceinline__
67
+ uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) {
68
+ #if !defined(__CUDA_ARCH__)
69
+ pos &= 0xff;
70
+ len &= 0xff;
71
+
72
+ uint64_t m = (1u << len) - 1u;
73
+ toInsert &= m;
74
+ toInsert <<= pos;
75
+ m <<= pos;
76
+
77
+ return (val & ~m) | toInsert;
78
+ #else
79
+ uint64_t ret;
80
+ asm("bfi.b64 %0, %1, %2, %3, %4;" :
81
+ "=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len));
82
+ return ret;
83
+ #endif
84
+ }
85
+ };
86
+
87
+ __device__ __forceinline__ int getLaneId() {
88
+ #if defined(USE_ROCM)
89
+ return __lane_id();
90
+ #else
91
+ int laneId;
92
+ asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
93
+ return laneId;
94
+ #endif
95
+ }
96
+
97
+ #if defined(USE_ROCM)
98
+ __device__ __forceinline__ unsigned long long int getLaneMaskLt() {
99
+ const std::uint64_t m = (1ull << getLaneId()) - 1ull;
100
+ return m;
101
+ }
102
+ #else
103
+ __device__ __forceinline__ unsigned getLaneMaskLt() {
104
+ unsigned mask;
105
+ asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask));
106
+ return mask;
107
+ }
108
+ #endif
109
+
110
+ #if defined (USE_ROCM)
111
+ __device__ __forceinline__ unsigned long long int getLaneMaskLe() {
112
+ std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1));
113
+ return m;
114
+ }
115
+ #else
116
+ __device__ __forceinline__ unsigned getLaneMaskLe() {
117
+ unsigned mask;
118
+ asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask));
119
+ return mask;
120
+ }
121
+ #endif
122
+
123
+ #if defined(USE_ROCM)
124
+ __device__ __forceinline__ unsigned long long int getLaneMaskGt() {
125
+ const std::uint64_t m = getLaneMaskLe();
126
+ return m ? ~m : m;
127
+ }
128
+ #else
129
+ __device__ __forceinline__ unsigned getLaneMaskGt() {
130
+ unsigned mask;
131
+ asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask));
132
+ return mask;
133
+ }
134
+ #endif
135
+
136
+ #if defined(USE_ROCM)
137
+ __device__ __forceinline__ unsigned long long int getLaneMaskGe() {
138
+ const std::uint64_t m = getLaneMaskLt();
139
+ return ~m;
140
+ }
141
+ #else
142
+ __device__ __forceinline__ unsigned getLaneMaskGe() {
143
+ unsigned mask;
144
+ asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask));
145
+ return mask;
146
+ }
147
+ #endif
148
+
149
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/Half.h>
5
+ #include <c10/util/BFloat16.h>
6
+
7
+ #include <ATen/NumericUtils.h>
8
+
9
+ #if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))))
10
+ #include <cuda_bf16.h>
11
+ #endif
12
+
13
+ template <typename T>
14
+ struct AtomicFPOp;
15
+
16
+ template <>
17
+ struct AtomicFPOp<at::Half> {
18
+ template <typename func_t>
19
+ inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) {
20
+ unsigned int * address_as_ui =
21
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
22
+ unsigned int old = *address_as_ui;
23
+ unsigned int assumed;
24
+
25
+ at::Half hsum;
26
+ do {
27
+ assumed = old;
28
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
29
+ hsum = func(hsum, val);
30
+ old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
31
+ old = atomicCAS(address_as_ui, assumed, old);
32
+ } while (assumed != old);
33
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
34
+ return hsum;
35
+ }
36
+ };
37
+
38
+ template <>
39
+ struct AtomicFPOp<at::BFloat16> {
40
+ template <typename func_t>
41
+ inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) {
42
+ unsigned int * address_as_ui =
43
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
44
+ unsigned int old = *address_as_ui;
45
+ unsigned int assumed;
46
+
47
+ at::BFloat16 bsum;
48
+ do {
49
+ assumed = old;
50
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
51
+ bsum = func(bsum, val);
52
+ old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x;
53
+ old = atomicCAS(address_as_ui, assumed, old);
54
+ } while (assumed != old);
55
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
56
+ return bsum.x;
57
+ }
58
+ };
59
+
60
+ template <>
61
+ struct AtomicFPOp<double> {
62
+ template <typename func_t>
63
+ inline __device__ double operator() (double * address, double val, const func_t& func) {
64
+ unsigned long long int* address_as_ull = (unsigned long long int*)address;
65
+ unsigned long long int old = *address_as_ull;
66
+ unsigned long long int assumed;
67
+
68
+ do {
69
+ assumed = old;
70
+ old = atomicCAS(address_as_ull, assumed, func(val, assumed));
71
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
72
+ } while (assumed != old);
73
+
74
+ return __longlong_as_double(old);
75
+ }
76
+ };
77
+
78
+ #define ATOMIC_INTEGER_IMPL(NAME) \
79
+ template <typename T, size_t n> \
80
+ struct Atomic##NAME##IntegerImpl; \
81
+ \
82
+ template<typename T> \
83
+ struct Atomic##NAME##IntegerImpl<T, 1> { \
84
+ template <typename func_t> \
85
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
86
+ size_t offset = (size_t)address & 3; \
87
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
88
+ uint32_t old = *address_as_ui; \
89
+ uint32_t shift = offset * 8; \
90
+ uint32_t old_byte; \
91
+ uint32_t newval; \
92
+ uint32_t assumed; \
93
+ \
94
+ do { \
95
+ assumed = old; \
96
+ old_byte = (old >> shift) & 0xff; \
97
+ newval = static_cast<uint8_t>(func(val, static_cast<T>(old_byte))); \
98
+ newval = (old & ~(0x000000ff << shift)) | (newval << shift); \
99
+ old = atomicCAS(address_as_ui, assumed, newval); \
100
+ } while (assumed != old); \
101
+ } \
102
+ }; \
103
+ \
104
+ template<typename T> \
105
+ struct Atomic##NAME##IntegerImpl<T, 2> { \
106
+ template <typename func_t> \
107
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
108
+ size_t offset = (size_t)address & 2; \
109
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
110
+ bool is_32_align = offset; \
111
+ uint32_t old = *address_as_ui; \
112
+ uint32_t old_bytes; \
113
+ uint32_t newval; \
114
+ uint32_t assumed; \
115
+ \
116
+ do { \
117
+ assumed = old; \
118
+ old_bytes = is_32_align ? old >> 16 : old & 0xffff; \
119
+ newval = static_cast<uint16_t>(func(val, static_cast<T>(old_bytes))); \
120
+ newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \
121
+ old = atomicCAS(address_as_ui, assumed, newval); \
122
+ } while (assumed != old); \
123
+ } \
124
+ }; \
125
+ \
126
+ template<typename T> \
127
+ struct Atomic##NAME##IntegerImpl<T, 4> { \
128
+ template <typename func_t> \
129
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
130
+ uint32_t * address_as_ui = (uint32_t *) (address); \
131
+ uint32_t old = *address_as_ui; \
132
+ uint32_t newval; \
133
+ uint32_t assumed; \
134
+ \
135
+ do { \
136
+ assumed = old; \
137
+ newval = static_cast<uint32_t>(func(val, static_cast<T>(old))); \
138
+ old = atomicCAS(address_as_ui, assumed, newval); \
139
+ } while (assumed != old); \
140
+ } \
141
+ }; \
142
+ \
143
+ template<typename T> \
144
+ struct Atomic##NAME##IntegerImpl<T, 8> { \
145
+ template <typename func_t> \
146
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
147
+ unsigned long long * address_as_ui = (unsigned long long *) (address); \
148
+ unsigned long long old = *address_as_ui; \
149
+ unsigned long long newval; \
150
+ unsigned long long assumed; \
151
+ \
152
+ do { \
153
+ assumed = old; \
154
+ newval = static_cast<uint64_t>(func(val, static_cast<T>(old))); \
155
+ old = atomicCAS(address_as_ui, assumed, newval); \
156
+ } while (assumed != old); \
157
+ } \
158
+ };
159
+
160
+
161
+ # define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \
162
+ static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \
163
+ Atomic##NAME##IntegerImpl<DTYPE, sizeof(DTYPE)>()(address, \
164
+ val, \
165
+ [](DTYPE a, DTYPE b) { \
166
+ return OP; \
167
+ }); \
168
+ } \
169
+
170
+ ATOMIC_INTEGER_IMPL(Add)
171
+ GPU_ATOMIC_INTEGER(Add, a || b, bool)
172
+
173
+ // Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64)
174
+ static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) {
175
+ AtomicAddIntegerImpl<uint8_t, sizeof(uint8_t)>()(address,
176
+ val,
177
+ [](uint8_t a, uint8_t b) {
178
+ return a + b;
179
+ });
180
+ }
181
+
182
+ static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) {
183
+ AtomicAddIntegerImpl<int8_t, sizeof(int8_t)>()(address,
184
+ val,
185
+ [](int8_t a, int8_t b) {
186
+ return a + b;
187
+ });
188
+ }
189
+
190
+ static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) {
191
+ AtomicAddIntegerImpl<int16_t, sizeof(int16_t)>()(address,
192
+ val,
193
+ [](int16_t a, int16_t b) {
194
+ return a + b;
195
+ });
196
+ }
197
+
198
+ static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) {
199
+ return atomicAdd(address, val);
200
+ }
201
+
202
+ static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) {
203
+ #if defined(USE_ROCM)
204
+ __atomic_fetch_add(address, val, __ATOMIC_RELAXED);
205
+ #else
206
+ static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed");
207
+ atomicAdd(reinterpret_cast<unsigned long long int *>(address), static_cast<unsigned long long int>(val));
208
+ #endif
209
+ }
210
+
211
+ static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) {
212
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
213
+ return AtomicFPOp<at::Half>()(address, val,
214
+ [](at::Half hsum, at::Half val) {
215
+ return hsum + val;
216
+ });
217
+ #else
218
+ return atomicAdd(reinterpret_cast<__half*>(address), val);
219
+ #endif
220
+ }
221
+
222
+ static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) {
223
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))
224
+ return AtomicFPOp<at::BFloat16>()(address, val,
225
+ [](at::BFloat16 bsum, at::BFloat16 val) {
226
+ return bsum + val;
227
+ });
228
+ #else
229
+ __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val));
230
+ return *reinterpret_cast<c10::BFloat16*>(&r);
231
+ #endif
232
+ }
233
+
234
+ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
235
+ // from CUDA C Programmic Guide
236
+ static inline __device__ double atomicAdd(double* address, double val)
237
+ #if defined(__clang__) && defined(__CUDA__)
238
+ #pragma GCC diagnostic push
239
+ #pragma GCC diagnostic ignored "-Wgcc-compat"
240
+ __attribute__((enable_if(true, "")))
241
+ #pragma GCC diagnostic pop
242
+ #endif
243
+ {
244
+
245
+ return AtomicFPOp<double>()(address, val,
246
+ [](double val, unsigned long long int assumed) {
247
+ return __double_as_longlong(val + __longlong_as_double(assumed));
248
+ });
249
+ }
250
+ #elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__))
251
+
252
+ /* Note [hip-clang differences to hcc]
253
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
254
+ * The upcoming hip-clang compiler for ROCm differs from hcc in a few details.
255
+ * It exports the __HIP__ macro, we can hence differentiate between hcc and
256
+ * hip-clang. In the below, hcc only received support for atomicAdd with double
257
+ * typing after work week 18312. hip-clang had support from the first version.
258
+ * In general, the code-visible differences between hip-clang and hcc will be
259
+ * minimal.
260
+ */
261
+
262
+ #if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__
263
+ // This needs to be defined for the host side pass
264
+ static inline __device__ double atomicAdd(double *address, double val) { }
265
+ #endif
266
+ #endif
267
+
268
+ static inline __device__ double gpuAtomicAdd(double *address, double val) {
269
+ return atomicAdd(address, val);
270
+ }
271
+
272
+ static inline __device__ float gpuAtomicAdd(float *address, float val) {
273
+ return atomicAdd(address, val);
274
+ }
275
+
276
+ template<typename T>
277
+ static inline __device__ void gpuAtomicAdd(c10::complex<T> *address, c10::complex<T> val) {
278
+ gpuAtomicAdd(&address->real_, val.real_);
279
+ gpuAtomicAdd(&address->imag_, val.imag_);
280
+ }
281
+
282
+ /* Note [gpuAtomicAdd vs atomicAdd]
283
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
284
+ * Some extensions such as torchvision call atomicAdd()
285
+ * directly and require non-library provided data type support. Only for these, we
286
+ * continue to provide atomicAdd overloads.
287
+ */
288
+ static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
289
+ return gpuAtomicAdd(address, val);
290
+ }
291
+
292
+ static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) {
293
+ return gpuAtomicAdd(address, val);
294
+ }
295
+
296
+ static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) {
297
+ gpuAtomicAdd(address, val);
298
+ }
299
+
300
+ static inline __device__ void atomicAdd(int8_t *address, int8_t val) {
301
+ gpuAtomicAdd(address, val);
302
+ }
303
+
304
+ static inline __device__ void atomicAdd(int16_t *address, int16_t val) {
305
+ gpuAtomicAdd(address, val);
306
+ }
307
+
308
+ static inline __device__ void atomicAdd(int64_t *address, int64_t val) {
309
+ gpuAtomicAdd(address, val);
310
+ }
311
+
312
+ static inline __device__ void atomicAdd(bool *address, bool val) {
313
+ gpuAtomicAdd(address, val);
314
+ }
315
+
316
+ /* Note [explicitly non-returning atomics]
317
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318
+ * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet().
319
+ * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction.
320
+ * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd,
321
+ * therefore we need a new API 'gpuAtomicAddNoReturn'.
322
+ */
323
+ template<typename T>
324
+ static inline __device__ void gpuAtomicAddNoReturn(c10::complex<T> *address, c10::complex<T> val) { gpuAtomicAdd(address, val); }
325
+ static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); }
326
+ static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); }
327
+ static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); }
328
+ static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); }
329
+ static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); }
330
+ static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); }
331
+ static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); }
332
+ static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); }
333
+ static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); }
334
+
335
+ /* Special case fp32 atomic. */
336
+ #if defined(USE_ROCM)
337
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); }
338
+ #else
339
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); }
340
+ #endif
341
+
342
+ // Atomic multiplication implementation.
343
+
344
+ ATOMIC_INTEGER_IMPL(Mul)
345
+ GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t)
346
+ GPU_ATOMIC_INTEGER(Mul, a * b, int8_t)
347
+ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
348
+ GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
349
+ GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
350
+
351
+ inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
352
+ return AtomicFPOp<at::Half>()(address, val,
353
+ [](at::Half bsum, at::Half val) {
354
+ return bsum * val;
355
+ });
356
+ }
357
+
358
+ inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) {
359
+ return AtomicFPOp<at::BFloat16>()(address, val,
360
+ [](at::BFloat16 bsum, at::BFloat16 val) {
361
+ return bsum * val;
362
+ });
363
+ }
364
+
365
+ inline __device__ double gpuAtomicMul(double * address, double val) {
366
+ return AtomicFPOp<double>()(address, val,
367
+ [](double val, unsigned long long int assumed) {
368
+ return __double_as_longlong(val * __longlong_as_double(assumed));
369
+ });
370
+ }
371
+
372
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
373
+ inline __device__ float gpuAtomicMul (float * address, float val) {
374
+ unsigned int* address_as_ull = (unsigned int*)address;
375
+ unsigned int old = *address_as_ull;
376
+ unsigned int assumed;
377
+
378
+ do {
379
+ assumed = old;
380
+ old = atomicCAS(address_as_ull, assumed,
381
+ __float_as_int(val *
382
+ __int_as_float(assumed)));
383
+
384
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
385
+ } while (assumed != old);
386
+
387
+ return __int_as_float(old);
388
+ }
389
+
390
+ // Atomic maximum implementation.
391
+
392
+ template <typename T>
393
+ __host__ __device__ T safe_max(T a, T b) {
394
+ #if defined(__HIPCC__)
395
+ // TODO: remove this special case for HIP when issue is fixed:
396
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
397
+ T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max<T>(a, b));
398
+ #else
399
+ T max = at::_isnan(b) ? b : std::max<T>(a, b);
400
+ #endif
401
+
402
+ return max;
403
+ }
404
+
405
+ ATOMIC_INTEGER_IMPL(Max)
406
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
407
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
408
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t)
409
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t)
410
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t)
411
+
412
+ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
413
+ return AtomicFPOp<at::Half>()(address, val,
414
+ [](at::Half bsum, at::Half val) {
415
+ return safe_max(bsum, val);
416
+ });
417
+ }
418
+
419
+ inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
420
+ return AtomicFPOp<at::BFloat16>()(address, val,
421
+ [](at::BFloat16 bsum, at::BFloat16 val) {
422
+ return safe_max(bsum, val);
423
+ });
424
+ }
425
+
426
+ inline __device__ double gpuAtomicMax(double * address, double val) {
427
+ return AtomicFPOp<double>()(address, val,
428
+ [](double val, unsigned long long int assumed) {
429
+ return __double_as_longlong(safe_max(val, __longlong_as_double(assumed)));
430
+ });
431
+ }
432
+
433
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
434
+ inline __device__ float gpuAtomicMax(float * address, float val) {
435
+ unsigned int* address_as_ull = (unsigned int*)address;
436
+ unsigned int old = *address_as_ull;
437
+ unsigned int assumed;
438
+
439
+ do {
440
+ assumed = old;
441
+ old = atomicCAS(address_as_ull, assumed,
442
+ __float_as_int(safe_max(val, __int_as_float(assumed))));
443
+
444
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
445
+ } while (assumed != old);
446
+
447
+ return __int_as_float(old);
448
+ }
449
+
450
+ // Atomic minimum implementation.
451
+
452
+ template <typename T>
453
+ __host__ __device__ T safe_min(T a, T b) {
454
+ #if defined(__HIPCC__)
455
+ // TODO: remove this special case for HIP when issue is fixed:
456
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
457
+ T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min<T>(a, b));
458
+ #else
459
+ T min = at::_isnan(b) ? b : std::min<T>(a, b);
460
+ #endif
461
+
462
+ return min;
463
+ }
464
+
465
+ ATOMIC_INTEGER_IMPL(Min)
466
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
467
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
468
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t)
469
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t)
470
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t)
471
+
472
+ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
473
+ return AtomicFPOp<at::Half>()(address, val,
474
+ [](at::Half bsum, at::Half val) {
475
+ return safe_min(bsum, val);
476
+ });
477
+ }
478
+
479
+ inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
480
+ return AtomicFPOp<at::BFloat16>()(address, val,
481
+ [](at::BFloat16 bsum, at::BFloat16 val) {
482
+ return safe_min(bsum, val);
483
+ });
484
+ }
485
+
486
+ inline __device__ double gpuAtomicMin(double * address, double val) {
487
+ return AtomicFPOp<double>()(address, val,
488
+ [](double val, unsigned long long int assumed) {
489
+ return __double_as_longlong(safe_min(val, __longlong_as_double(assumed)));
490
+ });
491
+ }
492
+
493
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
494
+ inline __device__ float gpuAtomicMin(float * address, float val) {
495
+ unsigned int* address_as_ull = (unsigned int*)address;
496
+ unsigned int old = *address_as_ull;
497
+ unsigned int assumed;
498
+
499
+ do {
500
+ assumed = old;
501
+ old = atomicCAS(address_as_ull, assumed,
502
+ __float_as_int(safe_min(val, __int_as_float(assumed))));
503
+
504
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
505
+ } while (assumed != old);
506
+
507
+ return __int_as_float(old);
508
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ApplyGridUtils.cuh>
4
+ #include <ATen/cuda/detail/IndexUtils.cuh>
5
+ #include <ATen/core/TensorBase.h>
6
+ #include <ATen/ceil_div.h>
7
+ #include <ATen/cuda/Atomic.cuh>
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <c10/macros/Macros.h>
10
+ #include <ATen/native/Copy.h>
11
+
12
+ #include <math.h>
13
+
14
+ //
15
+ // This file contains pointwise operation functions and kernels that
16
+ // work on both contiguous and non-contiguous tensor arguments of
17
+ // arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without
18
+ // copying or temporary storage.
19
+ //
20
+
21
+ /*
22
+ NOTE [ CUDA_tensor_applyN helpers ]
23
+
24
+ The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4)
25
+ functions apply a pointwise operator to N tensor(s).
26
+
27
+ The calling convention is
28
+
29
+ 1. The template arguments should be, sequentially,
30
+ - First N typename args specify the scalar types of each of the N tensors.
31
+ - (Optional) `int step` arg specifies the number of elements processed
32
+ together at the same time.
33
+ Default is 1.
34
+ - A usually omitted (i.e., inferred) typename arg specifies the type of the
35
+ function/functor applied on `N * step` values in each iteration of each
36
+ CUDA thread.
37
+ 2. The arguments should be, sequentially,
38
+ - N tensors
39
+ - op: a function/functor that processes `N * step` values at the same time.
40
+ - If `step == 1`, it must have signature
41
+ `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where
42
+ `scalar*_t`s are the first N typename template args, and the inputs
43
+ are the `N` values from the `N` tensors retrieved at a common index.
44
+ - Otherwise, it must must have signature
45
+ void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times
46
+ scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times
47
+ ...,
48
+ scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times
49
+ Different from `step == 1` case, it processes `N * step` values taken
50
+ from `step` common indices. Moreover, the first input `n` represents the
51
+ number of valid indices (it will always have `0 < n <= step`). It will
52
+ almost always be `step`, but at the boundary we may not have full `step`
53
+ elements and `n` can be a lesser value.
54
+
55
+ E.g., if `step == 4` and `N == 2`, `op` could be
56
+
57
+ [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4,
58
+ scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) {
59
+ // Only process u1, ..., un and v1, ..., vn.
60
+ // So if `n == 3`, `u4` and `v4` need not to be considered.
61
+ }
62
+
63
+ In both cases, the references can actually be const, but at least one of
64
+ them should be non-const in order to write the output.
65
+ - (Optional, but recommended) N TensorArgType args that specify for each
66
+ tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite),
67
+ or only reads (i.e., TensorArgType::ReadOnly).
68
+ Default is TensorArgType::ReadWrite for first Tensor, and
69
+ TensorArgType::ReadOnly for the rest.
70
+
71
+ E.g.,
72
+
73
+ to compute a = b^2 for a and b of same dtype, we can call
74
+
75
+ CUDA_tensor_apply2<scalar, scalar>(
76
+ a, b,
77
+ [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; }
78
+ );
79
+
80
+ to work on 2 values at the same time, we can call
81
+
82
+ CUDA_tensor_apply2<scalar1, scalar2, 2>(
83
+ a, b,
84
+ [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2,
85
+ const scalar2 &b_val1, const scalar2 &b_val2) {
86
+ // call special vectorized op here, or just do elementwise and enjoy unrolling...
87
+ // if n == 1, only process a_val1 and b_val1
88
+ }
89
+ );
90
+ */
91
+
92
+ namespace at::cuda {
93
+
94
+ // TODO: combine with TensorArg? So far that's been for debugging, and this is functional...
95
+ enum class TensorArgType { ReadWrite, ReadOnly };
96
+
97
+ namespace {
98
+
99
+ // Rearrange dimensions for pointwise operations so that strides are in
100
+ // decreasing order as much as possible, so that kernels have better memory
101
+ // access patterns.
102
+ //
103
+ // For example, consider a binary operation on two "transposed" 2-dim tensors:
104
+ // sizes: 256 512
105
+ // aInfo->strides: 1 256
106
+ // bInfo->strides: 1 256
107
+ //
108
+ // Given this, each concurrent memory access inside kernelPointwiseApply2() is
109
+ // exactly 256 elements apart, resulting in poor performance.
110
+ //
111
+ // This function exchanges dimensions so that memory access is contiguous:
112
+ // sizes: 512 256
113
+ // aInfo->strides: 256 1
114
+ // bInfo->strides: 256 1
115
+ //
116
+ // (Actually, it becomes even better because now collapseDims() can turn each
117
+ // input into one contiguous array.)
118
+ //
119
+ // In general, given M (<=4) TensorInfo's with N dimensions, we can view each
120
+ // strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange
121
+ // strides[i] and [j] if
122
+ // (1) strides[i][k] < strides[j][k] for some k (0 <= k < M)
123
+ // (exchanging them will benefit input #k), and
124
+ // (2) strides[i][k] <= strieds[j][k] for all k
125
+ // (exchanging them will not make any input worse).
126
+ template <typename T1, typename IndexType,
127
+ typename T2 = void, typename T3 = void, typename T4 = void>
128
+ inline void rearrangeDims(detail::TensorInfo<T1, IndexType>* aInfo,
129
+ detail::TensorInfo<T2, IndexType>* bInfo = nullptr,
130
+ detail::TensorInfo<T3, IndexType>* cInfo = nullptr,
131
+ detail::TensorInfo<T4, IndexType>* dInfo = nullptr) {
132
+ int numInfos = 1;
133
+ int dims = aInfo->dims;
134
+ IndexType *sizes[4] = { aInfo->sizes, };
135
+ IndexType *strides[4] = { aInfo->strides, };
136
+
137
+ if (bInfo != nullptr) {
138
+ ++numInfos;
139
+ if (bInfo->dims != dims) return;
140
+ sizes[1] = bInfo->sizes;
141
+ strides[1] = bInfo->strides;
142
+ }
143
+
144
+ if (cInfo != nullptr) {
145
+ ++numInfos;
146
+ if (cInfo->dims != dims) return;
147
+ sizes[2] = cInfo->sizes;
148
+ strides[2] = cInfo->strides;
149
+ }
150
+
151
+ if (dInfo != nullptr) {
152
+ ++numInfos;
153
+ if (dInfo->dims != dims) return;
154
+ sizes[3] = dInfo->sizes;
155
+ strides[3] = dInfo->strides;
156
+ }
157
+
158
+ // Bail out if sizes do not match: we are using "deprecated pointwise
159
+ // behavior" among tensors of different shapes but same number of elements.
160
+ for (int i = 1; i < numInfos; ++i) {
161
+ for (int j = 0; j < dims; ++j) {
162
+ if (sizes[i][j] != sizes[0][j]) return;
163
+ }
164
+ }
165
+
166
+ for (int i = 0; i < dims - 1; ++i) {
167
+ // No need to consider dimensions of size 1.
168
+ if (sizes[0][i] == 1) continue;
169
+
170
+ for (int j = i + 1; j < dims; ++j) {
171
+ if (sizes[0][j] == 1) continue;
172
+
173
+ // Compare the relative sizes of strides between dim #i and dim #j.
174
+ bool hasIncreasingStrides = false;
175
+ bool hasDecreasingStrides = false;
176
+
177
+ for (int k = 0; k < numInfos; k++) {
178
+ IndexType stride_i = strides[k][i];
179
+ IndexType stride_j = strides[k][j];
180
+ if (stride_i < stride_j) {
181
+ hasIncreasingStrides = true;
182
+ } else if (stride_i > stride_j) {
183
+ hasDecreasingStrides = true;
184
+ }
185
+ }
186
+
187
+ if (hasIncreasingStrides && !hasDecreasingStrides) {
188
+ for (int k = 0; k < numInfos; k++) {
189
+ IndexType size = sizes[k][i];
190
+ sizes[k][i] = sizes[k][j];
191
+ sizes[k][j] = size;
192
+
193
+ IndexType stride = strides[k][i];
194
+ strides[k][i] = strides[k][j];
195
+ strides[k][j] = stride;
196
+ }
197
+ }
198
+ }
199
+ }
200
+ }
201
+
202
+ // The `remaining_steps` argument is used to support Op that operates on
203
+ // multiple elements at the same time. Generally, the strategy of ApplyOpN is to
204
+ // 1. Initialize `remaining_steps = step`, where `step` is the template arg of
205
+ // CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the
206
+ // number of elements in bound for this call. It will almost always equal to
207
+ // `step` except at boundaries.
208
+ // 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in
209
+ // bound), and recursively call `ApplyOpN` with `remaining_steps - 1`.
210
+ // 3. At `remaining_steps = 0`,
211
+ // if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`;
212
+ // if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep,
213
+ // tensor2_val1, tensor2_val2, ..., tesor2_valstep,
214
+ // ...
215
+ // tensorN_val1, tensorN_val2, ..., tesorN_valstep);`
216
+ //
217
+ // See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like.
218
+
219
+ template <typename Op,
220
+ typename scalar,
221
+ typename IndexType,
222
+ int ADims,
223
+ int remaining_steps,
224
+ typename... Offsets>
225
+ struct ApplyOp1 {
226
+ __device__ __forceinline__
227
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
228
+ IndexType linearIndex, Offsets... aOffsets) {
229
+ // Convert `linearIndex` into an offset of `a`
230
+ const IndexType aOffset = sizeof...(Offsets) < n ?
231
+ detail::IndexToOffset<scalar, IndexType, ADims>::get(linearIndex, a) : 0;
232
+
233
+ ApplyOp1<Op, scalar, IndexType, ADims, remaining_steps - 1, const IndexType, Offsets...>::apply(
234
+ a, op, n, linearIndex + 1, aOffsets..., aOffset
235
+ );
236
+ }
237
+ };
238
+
239
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
240
+ // We don't need to pass in how many elements need to processed in this case.
241
+ template <typename Op,
242
+ typename scalar,
243
+ typename IndexType,
244
+ int ADims,
245
+ typename Offset>
246
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offset> {
247
+ __device__ __forceinline__
248
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op,
249
+ int n, IndexType linearIndex, Offset offset) {
250
+ op(a.data[offset]);
251
+ }
252
+ };
253
+
254
+ template <typename Op,
255
+ typename scalar,
256
+ typename IndexType,
257
+ int ADims,
258
+ typename... Offsets>
259
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offsets...> {
260
+ __device__ __forceinline__
261
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
262
+ IndexType linearIndex, Offsets... offsets) {
263
+ op(n, a.data[offsets]...);
264
+ }
265
+ };
266
+
267
+ template <typename Op,
268
+ typename scalar,
269
+ typename IndexType,
270
+ int ADims,
271
+ int step>
272
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
273
+ C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
274
+ #endif
275
+ __global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
276
+ IndexType totalElements, const Op op) {
277
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
278
+ linearIndex < totalElements;
279
+ linearIndex += gridDim.x * blockDim.x * step) {
280
+ ApplyOp1<Op, scalar, IndexType, ADims, step>::apply(
281
+ a, op, ::min(step, static_cast<int>(totalElements - linearIndex)), linearIndex);
282
+ }
283
+ }
284
+
285
+
286
+ template <typename Op,
287
+ typename scalar1,
288
+ typename scalar2,
289
+ typename IndexType,
290
+ int ADims,
291
+ int BDims,
292
+ int remaining_steps,
293
+ typename... Offsets>
294
+ struct ApplyOp2 {
295
+ __device__ __forceinline__
296
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
297
+ detail::TensorInfo<scalar2, IndexType> &b,
298
+ const Op &op, int64_t n, IndexType linearIndex,
299
+ Offsets... aOffsets, Offsets... bOffsets) {
300
+ // Convert `linearIndex` into an offset of `a`
301
+ const IndexType aOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
302
+ detail::IndexToOffset<scalar1, IndexType, ADims>::get(linearIndex, a) : 0;
303
+
304
+ // Convert `linearIndex` into an offset of `b`
305
+ const IndexType bOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
306
+ detail::IndexToOffset<scalar2, IndexType, BDims>::get(linearIndex, b) : 0;
307
+
308
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, remaining_steps - 1, const IndexType, Offsets...>::apply(
309
+ a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset
310
+ );
311
+ }
312
+ };
313
+
314
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
315
+ // We don't need to pass in how many elements need to processed in this case.
316
+ template <typename Op,
317
+ typename scalar1,
318
+ typename scalar2,
319
+ typename IndexType,
320
+ int ADims,
321
+ int BDims,
322
+ typename Offset>
323
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offset> {
324
+ __device__ __forceinline__
325
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
326
+ detail::TensorInfo<scalar2, IndexType> &b,
327
+ const Op &op, int /*n*/, IndexType /*linearIndex*/,
328
+ Offset aOffset, Offset bOffset) {
329
+ op(a.data[aOffset], b.data[bOffset]);
330
+ }
331
+ };
332
+
333
+ template <typename Op,
334
+ typename scalar1,
335
+ typename scalar2,
336
+ typename IndexType,
337
+ int ADims,
338
+ int BDims,
339
+ typename... Offsets>
340
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offsets...> {
341
+ __device__ __forceinline__
342
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
343
+ detail::TensorInfo<scalar2, IndexType> &b,
344
+ const Op &op, int n, IndexType linearIndex,
345
+ Offsets... aOffsets, Offsets... bOffsets) {
346
+ op(n, a.data[aOffsets]..., b.data[bOffsets]...);
347
+ }
348
+ };
349
+
350
+ template <typename Op,
351
+ typename scalar1,
352
+ typename scalar2,
353
+ typename IndexType,
354
+ int ADims, int BDims,
355
+ int step,
356
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
357
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
358
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
359
+ C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm)
360
+ #endif
361
+ __global__ void
362
+ kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
363
+ detail::TensorInfo<scalar2, IndexType> b,
364
+ IndexType totalElements,
365
+ const Op op) {
366
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
367
+ linearIndex < totalElements;
368
+ linearIndex += gridDim.x * blockDim.x * step) {
369
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, step>::apply(
370
+ a, b, op, ::min(step, static_cast<int>(totalElements - linearIndex)),
371
+ linearIndex);
372
+ }
373
+ }
374
+
375
+ } // anonymous namespace
376
+
377
+ template <typename scalar1, typename scalar2, int step, typename Op,
378
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
379
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
380
+ inline bool CUDA_tensor_apply2(at::TensorBase a,
381
+ at::TensorBase b,
382
+ const Op op,
383
+ TensorArgType aType = TensorArgType::ReadWrite,
384
+ TensorArgType bType = TensorArgType::ReadOnly) {
385
+ TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(),
386
+ "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got "
387
+ "tensors with type ", a.device().type(), " and ", b.device().type());
388
+ int64_t totalElements = a.numel();
389
+
390
+ if (totalElements != b.numel()) {
391
+ return false;
392
+ }
393
+
394
+ if (a.dim() > MAX_TENSORINFO_DIMS ||
395
+ b.dim() > MAX_TENSORINFO_DIMS) {
396
+ return false;
397
+ }
398
+
399
+ if (a.numel() == 0) {
400
+ // Empty tensor; do nothing
401
+ return true;
402
+ }
403
+ const dim3 block = getApplyBlock(max_threads_per_block);
404
+
405
+ dim3 grid;
406
+ int64_t curDevice = current_device();
407
+ if (curDevice == -1) return false;
408
+ if (!getApplyGrid<step>(totalElements, grid, curDevice, max_threads_per_block)) {
409
+ return false;
410
+ }
411
+
412
+ /*
413
+ Expands readable/writable tensors whose indices may be "overlapped."
414
+ This ensures that each element of the tensor is operated on once and only
415
+ once.
416
+ */
417
+ TensorBase oldA;
418
+ TensorBase oldB;
419
+
420
+ if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) {
421
+ // Must perform in contiguous space
422
+ oldA = std::exchange(a, a.contiguous());
423
+ }
424
+ if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) {
425
+ // Must perform in contiguous space
426
+ oldB = std::exchange(b, b.contiguous());
427
+ }
428
+
429
+ // It is possible that the tensor dimensions are able to be collapsed,
430
+ // and thus we can reduce the actual code complexity of the copy by
431
+ // exploiting this knowledge statically, since the div/mod is the
432
+ // most expensive part of the operation, more so than memory accesses.
433
+ // For instance, when copying a non-contiguous to a contiguous tensor
434
+ // (or vice versa), the contiguous tensor can be collapsed to one
435
+ // dimension, and the loop to translate the linear index to the array
436
+ // index can be similarly collapsed. That is what this unrolling is for.
437
+
438
+ #define HANDLE_CASE(TYPE, A, B) \
439
+ kernelPointwiseApply2<Op, \
440
+ scalar1, \
441
+ scalar2, \
442
+ TYPE, A, B, step, \
443
+ max_threads_per_block, \
444
+ min_blocks_per_sm> \
445
+ <<<grid, block, 0, at::cuda::getCurrentCUDAStream(curDevice)>>>( \
446
+ aInfo, bInfo, static_cast<TYPE>(totalElements), op); \
447
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
448
+
449
+ #define HANDLE_B_CASE(TYPE, A, B) { \
450
+ switch (B) { \
451
+ case 1: \
452
+ HANDLE_CASE(TYPE, A, 1); \
453
+ break; \
454
+ case 2: \
455
+ HANDLE_CASE(TYPE, A, 2); \
456
+ break; \
457
+ default: \
458
+ HANDLE_CASE(TYPE, A, -1); \
459
+ break; \
460
+ } \
461
+ }
462
+
463
+ #define HANDLE_A_CASE(TYPE, A, B) { \
464
+ switch (A) { \
465
+ case 1: \
466
+ HANDLE_B_CASE(TYPE, 1, B); \
467
+ break; \
468
+ case 2: \
469
+ HANDLE_B_CASE(TYPE, 2, B); \
470
+ break; \
471
+ default: \
472
+ HANDLE_B_CASE(TYPE, -1, B); \
473
+ break; \
474
+ } \
475
+ }
476
+
477
+ if (detail::canUse32BitIndexMath(a) &&
478
+ detail::canUse32BitIndexMath(b)) {
479
+ detail::TensorInfo<scalar1, unsigned int> aInfo =
480
+ detail::getTensorInfo<scalar1, unsigned int>(a);
481
+
482
+ detail::TensorInfo<scalar2, unsigned int> bInfo =
483
+ detail::getTensorInfo<scalar2, unsigned int>(b);
484
+ rearrangeDims(&aInfo, &bInfo);
485
+ aInfo.collapseDims();
486
+ bInfo.collapseDims();
487
+
488
+ HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims);
489
+ } else {
490
+ detail::TensorInfo<scalar1, uint64_t> aInfo =
491
+ detail::getTensorInfo<scalar1, uint64_t>(a);
492
+
493
+ detail::TensorInfo<scalar2, uint64_t> bInfo =
494
+ detail::getTensorInfo<scalar2, uint64_t>(b);
495
+ rearrangeDims(&aInfo, &bInfo);
496
+ aInfo.collapseDims();
497
+ bInfo.collapseDims();
498
+
499
+ /*
500
+ Only instantiates the all 1D special case and the fallback all nD case for
501
+ large (64-bit indexed) tensors to reduce compilation time.
502
+ */
503
+ if (aInfo.dims == 1 && bInfo.dims == 1) {
504
+ HANDLE_CASE(uint64_t, 1, 1);
505
+ } else {
506
+ HANDLE_CASE(uint64_t, -1, -1);
507
+ }
508
+ }
509
+ #undef HANDLE_CASE
510
+ #undef HANDLE_B_CASE
511
+ #undef HANDLE_A_CASE
512
+
513
+ if (oldA.defined()) {
514
+ at::native::copy_ignoring_overlaps(oldA, a);
515
+ }
516
+
517
+ if (oldB.defined()) {
518
+ at::native::copy_ignoring_overlaps(oldB, b);
519
+ }
520
+
521
+ return true;
522
+ }
523
+
524
+ /* Provides default step = 1 to CUDA_tensor_apply2. */
525
+ template <typename scalar1, typename scalar2, typename Op,
526
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
527
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
528
+ inline bool CUDA_tensor_apply2(const at::TensorBase &a,
529
+ const at::TensorBase &b,
530
+ const Op op,
531
+ TensorArgType aType = TensorArgType::ReadWrite,
532
+ TensorArgType bType = TensorArgType::ReadOnly) {
533
+ return CUDA_tensor_apply2<scalar1, scalar2, 1, Op,
534
+ max_threads_per_block, min_blocks_per_sm>(a, b, op, aType, bType);
535
+ }
536
+
537
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ /*
3
+ Provides a subset of CUDA BLAS functions as templates:
4
+
5
+ gemm<Dtype>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c,
6
+ ldc)
7
+
8
+ gemv<Dtype>(transa, m, n, alpha, a, lda, x, incx, beta, y, incy)
9
+
10
+ dot<Dtype>(n, x, incx, y, incy, result)
11
+
12
+ where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot).
13
+ The functions are available in at::cuda::blas namespace.
14
+ */
15
+
16
+ #include <ATen/cuda/CUDAContext.h>
17
+ #include <ATen/OpMathType.h>
18
+
19
+ namespace at::cuda::blas {
20
+
21
+ // RAII guard that sets the CuBLAS pointer mode and restores it to
22
+ // its previous value when the guard is destroyed
23
+ class PointerModeGuard {
24
+ public:
25
+ PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) :
26
+ handle(handle) {
27
+ TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode));
28
+ TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode));
29
+ }
30
+
31
+ ~PointerModeGuard() {
32
+ cublasSetPointerMode(handle, previous_mode);
33
+ }
34
+
35
+ private:
36
+ cublasHandle_t handle;
37
+ cublasPointerMode_t previous_mode;
38
+ };
39
+
40
+ /* LEVEL 3 BLAS FUNCTIONS */
41
+
42
+ #define CUDABLAS_GEMM_ARGTYPES(Dtype) \
43
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
44
+ const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type<Dtype> beta,\
45
+ Dtype *c, int64_t ldc
46
+
47
+ template <typename Dtype>
48
+ inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
49
+ AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name());
50
+ }
51
+
52
+ template <>
53
+ void gemm<double>(CUDABLAS_GEMM_ARGTYPES(double));
54
+ template <>
55
+ void gemm<float>(CUDABLAS_GEMM_ARGTYPES(float));
56
+ template <>
57
+ void gemm<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
58
+ template <>
59
+ void gemm<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
60
+ template <>
61
+ void gemm<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
62
+ template <>
63
+ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
64
+
65
+ #if !defined(USE_ROCM) && !defined(_MSC_VER)
66
+ enum GEMMAndBiasActivationEpilogue {
67
+ None,
68
+ RELU,
69
+ GELU,
70
+ };
71
+
72
+ // NOTE: GELU activation is not supported prior to CUDA 11.4 and will
73
+ // do nothing if passed in that case.
74
+ template <typename Dtype>
75
+ void gemm_and_bias(
76
+ bool transpose_mat1,
77
+ bool transpose_mat2,
78
+ int64_t m,
79
+ int64_t n,
80
+ int64_t k,
81
+ at::opmath_type<Dtype> alpha_val,
82
+ const Dtype* mat1_ptr,
83
+ int64_t mat1_ld,
84
+ const Dtype* mat2_ptr,
85
+ int64_t mat2_ld,
86
+ const Dtype* bias,
87
+ Dtype* result_ptr,
88
+ int64_t result_ld,
89
+ GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None);
90
+
91
+ void int8_gemm(
92
+ bool transpose_mat1,
93
+ bool transpose_mat2,
94
+ int64_t m,
95
+ int64_t n,
96
+ int64_t k,
97
+ const int8_t* mat1_ptr,
98
+ int64_t mat1_ld,
99
+ const int8_t* mat2_ptr,
100
+ int64_t mat2_ld,
101
+ int32_t* result_ptr,
102
+ int64_t result_ld);
103
+
104
+ void scaled_gemm(
105
+ char transa,
106
+ char transb,
107
+ int64_t m,
108
+ int64_t n,
109
+ int64_t k,
110
+ const void* mat1_ptr,
111
+ const void* mat1_scale_ptr,
112
+ int64_t mat1_ld,
113
+ ScalarType mat1_dtype,
114
+ const void* mat2_ptr,
115
+ const void* mat2_scale_ptr,
116
+ int64_t mat2_ld,
117
+ ScalarType mat2_dtype,
118
+ const void* bias_ptr,
119
+ ScalarType bias_dtype,
120
+ void* result_ptr,
121
+ const void* result_scale_ptr,
122
+ int64_t result_ld,
123
+ ScalarType result_dtype,
124
+ void* amax_ptr,
125
+ bool use_fast_accum);
126
+ #endif
127
+
128
+ #define CUDABLAS_BGEMM_ARGTYPES(Dtype) \
129
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
130
+ const Dtype *a, int64_t lda, int64_t stridea, \
131
+ const Dtype *b, int64_t ldb, int64_t strideb, \
132
+ at::opmath_type<Dtype> beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches
133
+
134
+ template <typename Dtype>
135
+ inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
136
+ AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name());
137
+ }
138
+
139
+ template <>
140
+ void bgemm<double>(CUDABLAS_BGEMM_ARGTYPES(double));
141
+ template <>
142
+ void bgemm<float>(CUDABLAS_BGEMM_ARGTYPES(float));
143
+ template <>
144
+ void bgemm<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
145
+ template <>
146
+ void bgemm<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
147
+ template <>
148
+ void bgemm<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
149
+ template <>
150
+ void bgemm<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
151
+
152
+ #if defined(USE_ROCM) && ROCM_VERSION <= 55000
153
+ // ROCm 5.6 hipblas matches the const Dtype *A API, but prior hipblas does not.
154
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
155
+ hipblasHandle_t handle, hipblasSideMode_t side, hipblasFillMode_t uplo, \
156
+ hipblasOperation_t trans, hipblasDiagType_t diag, int m, int n, \
157
+ const Dtype *alpha, Dtype *A, int lda, Dtype *B, int ldb
158
+ #else
159
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
160
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
161
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
162
+ const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb
163
+ #endif
164
+
165
+ template <typename Dtype>
166
+ inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) {
167
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name());
168
+ }
169
+
170
+ template <>
171
+ TORCH_CUDA_CU_API void trsm<float>(CUDABLAS_TRSM_ARGTYPES(float));
172
+ template <>
173
+ TORCH_CUDA_CU_API void trsm<double>(CUDABLAS_TRSM_ARGTYPES(double));
174
+ template <>
175
+ TORCH_CUDA_CU_API void trsm<c10::complex<float>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<float>));
176
+ template <>
177
+ TORCH_CUDA_CU_API void trsm<c10::complex<double>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<double>));
178
+
179
+ #define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \
180
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
181
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
182
+ const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \
183
+ int batchCount
184
+
185
+ template <typename Dtype>
186
+ inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) {
187
+ TORCH_INTERNAL_ASSERT(
188
+ false,
189
+ "at::cuda::blas::trsmBatched: not implemented for ",
190
+ typeid(Dtype).name());
191
+ }
192
+
193
+ template <>
194
+ TORCH_CUDA_CU_API void trsmBatched<float>(CUDABLAS_TRSM_BATCHED_ARGTYPES(float));
195
+ template <>
196
+ TORCH_CUDA_CU_API void trsmBatched<double>(CUDABLAS_TRSM_BATCHED_ARGTYPES(double));
197
+ template <>
198
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<float>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<float>));
199
+ template <>
200
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<double>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<double>));
201
+
202
+ /* LEVEL 2 BLAS FUNCTIONS */
203
+
204
+ #define CUDABLAS_GEMV_ARGTYPES(Dtype) \
205
+ char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \
206
+ const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy
207
+
208
+ template <typename Dtype>
209
+ inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) {
210
+ AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name());
211
+ }
212
+
213
+ template <>
214
+ void gemv<double>(CUDABLAS_GEMV_ARGTYPES(double));
215
+ template <>
216
+ void gemv<float>(CUDABLAS_GEMV_ARGTYPES(float));
217
+ template <>
218
+ void gemv<c10::complex<double>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<double>));
219
+ template <>
220
+ void gemv<c10::complex<float>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<float>));
221
+ template <>
222
+ void gemv<at::Half>(CUDABLAS_GEMV_ARGTYPES(at::Half));
223
+ template <>
224
+ void gemv<at::BFloat16>(CUDABLAS_GEMV_ARGTYPES(at::BFloat16));
225
+
226
+ /* LEVEL 1 BLAS FUNCTIONS */
227
+
228
+ #define CUDABLAS_DOT_ARGTYPES(Dtype) \
229
+ cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \
230
+ int incy, Dtype *result
231
+
232
+ template <typename Dtype>
233
+ inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
234
+ AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name());
235
+ }
236
+
237
+ template <>
238
+ void dot<double>(CUDABLAS_DOT_ARGTYPES(double));
239
+ template <>
240
+ void dot<float>(CUDABLAS_DOT_ARGTYPES(float));
241
+ template <>
242
+ void dot<at::Half>(CUDABLAS_DOT_ARGTYPES(at::Half));
243
+ template <>
244
+ void dot<at::BFloat16>(CUDABLAS_DOT_ARGTYPES(at::BFloat16));
245
+ template <>
246
+ void dot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
247
+ template <>
248
+ void dot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
249
+
250
+ template <typename Dtype>
251
+ inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
252
+ AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name());
253
+ }
254
+
255
+ template <>
256
+ void vdot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
257
+ template <>
258
+ void vdot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
259
+
260
+ #define CUDABLAS_GETRS_ARGTYPES(Dtype) \
261
+ cublasHandle_t handle, cublasOperation_t trans, \
262
+ int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \
263
+ Dtype** dB_array, int ldb, int* info_array, int batchsize
264
+
265
+ template<class Dtype>
266
+ void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) {
267
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ",
268
+ typeid(Dtype).name());
269
+ }
270
+ template<>
271
+ TORCH_CUDA_CU_API void getrsBatched<float>(CUDABLAS_GETRS_ARGTYPES(float));
272
+ template<>
273
+ TORCH_CUDA_CU_API void getrsBatched<double>(CUDABLAS_GETRS_ARGTYPES(double));
274
+ template<>
275
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<float>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<float>));
276
+ template<>
277
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<double>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<double>));
278
+
279
+ #define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \
280
+ cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \
281
+ Dtype **tau_array, int *info, int batchsize
282
+
283
+ template <class Dtype>
284
+ void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) {
285
+ TORCH_INTERNAL_ASSERT(
286
+ false,
287
+ "at::cuda::blas::geqrfBatched: not implemented for ",
288
+ typeid(Dtype).name());
289
+ }
290
+ template <>
291
+ TORCH_CUDA_CU_API void geqrfBatched<float>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float));
292
+ template <>
293
+ TORCH_CUDA_CU_API void geqrfBatched<double>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double));
294
+ template <>
295
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<double>>(
296
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<double>));
297
+ template <>
298
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<float>>(
299
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<float>));
300
+
301
+ #define CUDABLAS_GETRF_ARGTYPES(Dtype) \
302
+ int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize
303
+
304
+ template<class Dtype>
305
+ void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) {
306
+ TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name());
307
+ }
308
+ template<>
309
+ TORCH_CUDA_CU_API void getrfBatched<float>(CUDABLAS_GETRF_ARGTYPES(float));
310
+ template<>
311
+ TORCH_CUDA_CU_API void getrfBatched<double>(CUDABLAS_GETRF_ARGTYPES(double));
312
+ template<>
313
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<double>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<double>));
314
+ template<>
315
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<float>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<float>));
316
+
317
+ #define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \
318
+ cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize
319
+
320
+ template <class Dtype>
321
+ void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) {
322
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name());
323
+ }
324
+
325
+ template<>
326
+ TORCH_CUDA_CU_API void gelsBatched<double>(CUDABLAS_GELS_BATCHED_ARGTYPES(double));
327
+ template<>
328
+ TORCH_CUDA_CU_API void gelsBatched<float>(CUDABLAS_GELS_BATCHED_ARGTYPES(float));
329
+ template<>
330
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<double>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<double>));
331
+ template<>
332
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<float>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<float>));
333
+
334
+ } // namespace at::cuda::blas
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Test these using #if AT_CUDNN_ENABLED(), not #ifdef, so that it's
4
+ // obvious if you forgot to include Config.h
5
+ // c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
6
+ //
7
+ // NB: This header MUST NOT be included from other headers; it should
8
+ // only be included from C++ files.
9
+ #define AT_CUDNN_ENABLED() 1
10
+ #define AT_CUSPARSELT_ENABLED() 1
11
+ #define AT_ROCM_ENABLED() 0
12
+ #define AT_MAGMA_ENABLED() 1
13
+
14
+ // Needed for hipMAGMA to correctly identify implementation
15
+ #if (AT_ROCM_ENABLED() && AT_MAGMA_ENABLED())
16
+ #define HAVE_HIP 1
17
+ #endif
18
+
19
+ #define NVCC_FLAGS_EXTRA "-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90"
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContextLight.h>
4
+
5
+ // Preserved for BC, as many files depend on these includes
6
+ #include <ATen/Context.h>
7
+ #include <c10/cuda/CUDAStream.h>
8
+ #include <c10/util/Logging.h>
9
+ #include <ATen/cuda/Exceptions.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Light-weight version of CUDAContext.h with fewer transitive includes
3
+
4
+ #include <cstdint>
5
+
6
+ #include <cuda_runtime_api.h>
7
+ #include <cusparse.h>
8
+ #include <cublas_v2.h>
9
+
10
+ #ifdef CUDART_VERSION
11
+ #include <cusolverDn.h>
12
+ #endif
13
+
14
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50300
15
+ #include <hipsolver/hipsolver.h>
16
+ #endif
17
+
18
+ #include <c10/core/Allocator.h>
19
+ #include <c10/cuda/CUDAFunctions.h>
20
+
21
+ namespace c10 {
22
+ struct Allocator;
23
+ }
24
+
25
+ namespace at::cuda {
26
+
27
+ /*
28
+ A common CUDA interface for ATen.
29
+
30
+ This interface is distinct from CUDAHooks, which defines an interface that links
31
+ to both CPU-only and CUDA builds. That interface is intended for runtime
32
+ dispatch and should be used from files that are included in both CPU-only and
33
+ CUDA builds.
34
+
35
+ CUDAContext, on the other hand, should be preferred by files only included in
36
+ CUDA builds. It is intended to expose CUDA functionality in a consistent
37
+ manner.
38
+
39
+ This means there is some overlap between the CUDAContext and CUDAHooks, but
40
+ the choice of which to use is simple: use CUDAContext when in a CUDA-only file,
41
+ use CUDAHooks otherwise.
42
+
43
+ Note that CUDAContext simply defines an interface with no associated class.
44
+ It is expected that the modules whose functions compose this interface will
45
+ manage their own state. There is only a single CUDA context/state.
46
+ */
47
+
48
+ /**
49
+ * DEPRECATED: use device_count() instead
50
+ */
51
+ inline int64_t getNumGPUs() {
52
+ return c10::cuda::device_count();
53
+ }
54
+
55
+ /**
56
+ * CUDA is available if we compiled with CUDA, and there are one or more
57
+ * devices. If we compiled with CUDA but there is a driver problem, etc.,
58
+ * this function will report CUDA is not available (rather than raise an error.)
59
+ */
60
+ inline bool is_available() {
61
+ return c10::cuda::device_count() > 0;
62
+ }
63
+
64
+ TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties();
65
+
66
+ TORCH_CUDA_CPP_API int warp_size();
67
+
68
+ TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(int64_t device);
69
+
70
+ TORCH_CUDA_CPP_API bool canDeviceAccessPeer(
71
+ int64_t device,
72
+ int64_t peer_device);
73
+
74
+ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
75
+
76
+ /* Handles */
77
+ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
78
+ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
79
+
80
+ TORCH_CUDA_CPP_API void clearCublasWorkspaces();
81
+
82
+ #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300
83
+ TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle();
84
+ #endif
85
+
86
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+
5
+ #include <cuda.h>
6
+ #include <library_types.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ template <typename scalar_t>
11
+ cudaDataType getCudaDataType() {
12
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.")
13
+ }
14
+
15
+ template<> inline cudaDataType getCudaDataType<at::Half>() {
16
+ return CUDA_R_16F;
17
+ }
18
+ template<> inline cudaDataType getCudaDataType<float>() {
19
+ return CUDA_R_32F;
20
+ }
21
+ template<> inline cudaDataType getCudaDataType<double>() {
22
+ return CUDA_R_64F;
23
+ }
24
+ template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() {
25
+ return CUDA_C_16F;
26
+ }
27
+ template<> inline cudaDataType getCudaDataType<c10::complex<float>>() {
28
+ return CUDA_C_32F;
29
+ }
30
+ template<> inline cudaDataType getCudaDataType<c10::complex<double>>() {
31
+ return CUDA_C_64F;
32
+ }
33
+
34
+ // HIP doesn't define integral types
35
+ #ifndef USE_ROCM
36
+ template<> inline cudaDataType getCudaDataType<uint8_t>() {
37
+ return CUDA_R_8U;
38
+ }
39
+ template<> inline cudaDataType getCudaDataType<int8_t>() {
40
+ return CUDA_R_8I;
41
+ }
42
+ template<> inline cudaDataType getCudaDataType<int>() {
43
+ return CUDA_R_32I;
44
+ }
45
+ #endif
46
+
47
+ #if !defined(USE_ROCM)
48
+ template<> inline cudaDataType getCudaDataType<int16_t>() {
49
+ return CUDA_R_16I;
50
+ }
51
+ template<> inline cudaDataType getCudaDataType<int64_t>() {
52
+ return CUDA_R_64I;
53
+ }
54
+ template<> inline cudaDataType getCudaDataType<at::BFloat16>() {
55
+ return CUDA_R_16BF;
56
+ }
57
+ #endif
58
+
59
+ inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) {
60
+ switch (scalar_type) {
61
+ // HIP doesn't define integral types
62
+ #ifndef USE_ROCM
63
+ case c10::ScalarType::Byte:
64
+ return CUDA_R_8U;
65
+ case c10::ScalarType::Char:
66
+ return CUDA_R_8I;
67
+ case c10::ScalarType::Int:
68
+ return CUDA_R_32I;
69
+ #endif
70
+ case c10::ScalarType::Half:
71
+ return CUDA_R_16F;
72
+ case c10::ScalarType::Float:
73
+ return CUDA_R_32F;
74
+ case c10::ScalarType::Double:
75
+ return CUDA_R_64F;
76
+ case c10::ScalarType::ComplexHalf:
77
+ return CUDA_C_16F;
78
+ case c10::ScalarType::ComplexFloat:
79
+ return CUDA_C_32F;
80
+ case c10::ScalarType::ComplexDouble:
81
+ return CUDA_C_64F;
82
+ #if !defined(USE_ROCM)
83
+ case c10::ScalarType::Short:
84
+ return CUDA_R_16I;
85
+ case c10::ScalarType::Long:
86
+ return CUDA_R_64I;
87
+ case c10::ScalarType::BFloat16:
88
+ return CUDA_R_16BF;
89
+ #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080
90
+ case c10::ScalarType::Float8_e4m3fn:
91
+ return CUDA_R_8F_E4M3;
92
+ case c10::ScalarType::Float8_e5m2:
93
+ return CUDA_R_8F_E5M2;
94
+ #endif
95
+ #endif
96
+ default:
97
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.")
98
+ }
99
+ }
100
+
101
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/Exceptions.h>
4
+
5
+ #include <cuda.h>
6
+ #include <cuda_runtime.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ inline Device getDeviceFromPtr(void* ptr) {
11
+ cudaPointerAttributes attr{};
12
+
13
+ AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr));
14
+
15
+ #if !defined(USE_ROCM)
16
+ TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered,
17
+ "The specified pointer resides on host memory and is not registered with any CUDA device.");
18
+ #endif
19
+
20
+ return {c10::DeviceType::CUDA, static_cast<DeviceIndex>(attr.device)};
21
+ }
22
+
23
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ATenCUDAGeneral.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <c10/core/impl/GPUTrace.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/cuda/CUDAGuard.h>
8
+ #include <ATen/cuda/Exceptions.h>
9
+ #include <c10/util/Exception.h>
10
+
11
+ #include <cuda_runtime_api.h>
12
+
13
+ #include <cstdint>
14
+ #include <utility>
15
+
16
+ namespace at::cuda {
17
+
18
+ /*
19
+ * CUDAEvents are movable not copyable wrappers around CUDA's events.
20
+ *
21
+ * CUDAEvents are constructed lazily when first recorded unless it is
22
+ * reconstructed from a cudaIpcEventHandle_t. The event has a device, and this
23
+ * device is acquired from the first recording stream. However, if reconstructed
24
+ * from a handle, the device should be explicitly specified; or if ipc_handle() is
25
+ * called before the event is ever recorded, it will use the current device.
26
+ * Later streams that record the event must match this device.
27
+ */
28
+ struct TORCH_CUDA_CPP_API CUDAEvent {
29
+ // Constructors
30
+ // Default value for `flags` is specified below - it's cudaEventDisableTiming
31
+ CUDAEvent() noexcept = default;
32
+ CUDAEvent(unsigned int flags) noexcept : flags_{flags} {}
33
+
34
+ CUDAEvent(
35
+ DeviceIndex device_index, const cudaIpcEventHandle_t* handle) {
36
+ device_index_ = device_index;
37
+ CUDAGuard guard(device_index_);
38
+
39
+ AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle));
40
+ is_created_ = true;
41
+ }
42
+
43
+ // Note: event destruction done on creating device to avoid creating a
44
+ // CUDA context on other devices.
45
+ ~CUDAEvent() {
46
+ try {
47
+ if (is_created_) {
48
+ CUDAGuard guard(device_index_);
49
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
50
+ if (C10_UNLIKELY(interp)) {
51
+ (*interp)->trace_gpu_event_deletion(reinterpret_cast<uintptr_t>(event_));
52
+ }
53
+ cudaEventDestroy(event_);
54
+ }
55
+ } catch (...) { /* No throw */ }
56
+ }
57
+
58
+ CUDAEvent(const CUDAEvent&) = delete;
59
+ CUDAEvent& operator=(const CUDAEvent&) = delete;
60
+
61
+ CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); }
62
+ CUDAEvent& operator=(CUDAEvent&& other) noexcept {
63
+ if (this != &other) {
64
+ moveHelper(std::move(other));
65
+ }
66
+ return *this;
67
+ }
68
+
69
+ operator cudaEvent_t() const { return event(); }
70
+
71
+ // Less than operator (to allow use in sets)
72
+ friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) {
73
+ return left.event_ < right.event_;
74
+ }
75
+
76
+ optional<at::Device> device() const {
77
+ if (is_created_) {
78
+ return at::Device(at::kCUDA, device_index_);
79
+ } else {
80
+ return {};
81
+ }
82
+ }
83
+
84
+ bool isCreated() const { return is_created_; }
85
+ DeviceIndex device_index() const {return device_index_;}
86
+ cudaEvent_t event() const { return event_; }
87
+
88
+ // Note: cudaEventQuery can be safely called from any device
89
+ bool query() const {
90
+ if (!is_created_) {
91
+ return true;
92
+ }
93
+
94
+ cudaError_t err = cudaEventQuery(event_);
95
+ if (err == cudaSuccess) {
96
+ return true;
97
+ } else if (err != cudaErrorNotReady) {
98
+ C10_CUDA_CHECK(err);
99
+ } else {
100
+ // ignore and clear the error if not ready
101
+ (void)cudaGetLastError();
102
+ }
103
+
104
+ return false;
105
+ }
106
+
107
+ void record() { record(getCurrentCUDAStream()); }
108
+
109
+ void recordOnce(const CUDAStream& stream) {
110
+ if (!was_recorded_) record(stream);
111
+ }
112
+
113
+ // Note: cudaEventRecord must be called on the same device as the event.
114
+ void record(const CUDAStream& stream) {
115
+ if (!is_created_) {
116
+ createEvent(stream.device_index());
117
+ }
118
+
119
+ TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_,
120
+ " does not match recording stream's device ", stream.device_index(), ".");
121
+ CUDAGuard guard(device_index_);
122
+ AT_CUDA_CHECK(cudaEventRecord(event_, stream));
123
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
124
+ if (C10_UNLIKELY(interp)) {
125
+ (*interp)->trace_gpu_event_record(
126
+ reinterpret_cast<uintptr_t>(event_),
127
+ reinterpret_cast<uintptr_t>(stream.stream())
128
+ );
129
+ }
130
+ was_recorded_ = true;
131
+ }
132
+
133
+ // Note: cudaStreamWaitEvent must be called on the same device as the stream.
134
+ // The event has no actual GPU resources associated with it.
135
+ void block(const CUDAStream& stream) {
136
+ if (is_created_) {
137
+ CUDAGuard guard(stream.device_index());
138
+ AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0));
139
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
140
+ if (C10_UNLIKELY(interp)) {
141
+ (*interp)->trace_gpu_event_wait(
142
+ reinterpret_cast<uintptr_t>(event_),
143
+ reinterpret_cast<uintptr_t>(stream.stream())
144
+ );
145
+ }
146
+ }
147
+ }
148
+
149
+ // Note: cudaEventElapsedTime can be safely called from any device
150
+ float elapsed_time(const CUDAEvent& other) const {
151
+ TORCH_CHECK(is_created_ && other.isCreated(),
152
+ "Both events must be recorded before calculating elapsed time.");
153
+ float time_ms = 0;
154
+ // raise cudaErrorNotReady if either event is recorded but not yet completed
155
+ AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_));
156
+ return time_ms;
157
+ }
158
+
159
+ // Note: cudaEventSynchronize can be safely called from any device
160
+ void synchronize() const {
161
+ if (is_created_) {
162
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
163
+ if (C10_UNLIKELY(interp)) {
164
+ (*interp)->trace_gpu_event_synchronization(reinterpret_cast<uintptr_t>(event_));
165
+ }
166
+ AT_CUDA_CHECK(cudaEventSynchronize(event_));
167
+ }
168
+ }
169
+
170
+ // Note: cudaIpcGetEventHandle must be called on the same device as the event
171
+ void ipc_handle(cudaIpcEventHandle_t * handle) {
172
+ if (!is_created_) {
173
+ // this CUDAEvent object was initially constructed from flags but event_
174
+ // is not created yet.
175
+ createEvent(getCurrentCUDAStream().device_index());
176
+ }
177
+ CUDAGuard guard(device_index_);
178
+ AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_));
179
+ }
180
+
181
+ private:
182
+ unsigned int flags_ = cudaEventDisableTiming;
183
+ bool is_created_ = false;
184
+ bool was_recorded_ = false;
185
+ DeviceIndex device_index_ = -1;
186
+ cudaEvent_t event_{};
187
+
188
+ void createEvent(DeviceIndex device_index) {
189
+ device_index_ = device_index;
190
+ CUDAGuard guard(device_index_);
191
+ AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_));
192
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
193
+ if (C10_UNLIKELY(interp)) {
194
+ (*interp)->trace_gpu_event_creation(reinterpret_cast<uintptr_t>(event_));
195
+ }
196
+ is_created_ = true;
197
+ }
198
+
199
+ void moveHelper(CUDAEvent&& other) {
200
+ std::swap(flags_, other.flags_);
201
+ std::swap(is_created_, other.is_created_);
202
+ std::swap(was_recorded_, other.was_recorded_);
203
+ std::swap(device_index_, other.device_index_);
204
+ std::swap(event_, other.event_);
205
+ }
206
+ };
207
+
208
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <ATen/cuda/PhiloxCudaState.h>
5
+ #include <ATen/Context.h>
6
+ #include <limits>
7
+ #include <atomic>
8
+
9
+ namespace at {
10
+ /**
11
+ * Note [CUDA Graph-safe RNG states]
12
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
+ *
14
+ * Strategy:
15
+ * ~~~~~~~~~
16
+ * (It helps to look at
17
+ * cuda/detail/PhiloxCudaStateRaw.cuh and
18
+ * cuda/detail/UnpackRaw.cuh
19
+ * while you read this.)
20
+ *
21
+ * A CUDA graph containing multiple RNG ops behaves like a
22
+ * single giant kernel from the perspective of ops external
23
+ * to the graph. During graph capture, logic in CUDAGeneratorImpl
24
+ * records the total of all offset increments that occur in the
25
+ * graphed region, and records the final total as the offset for
26
+ * the entire graph.
27
+ *
28
+ * When the graph reruns, the logic that reruns it
29
+ * increments this device's CUDA generator's offset
30
+ * by that total.
31
+ *
32
+ * Meanwhile, within the graph, at capture time, instead of
33
+ * populating PhiloxCudaStates with the uint64_t offset pulled
34
+ * directly from the global state, PhiloxCudaState uses a pointer
35
+ * to a one-element stream-local int64_t device tensor
36
+ * holding an initial offset value, and a uint64_t holding an
37
+ * intra-graph offset. (The intra-graph offset starts from zero
38
+ * when capture begins.) In each consumer kernel,
39
+ * at::cuda::philox::unpack computes the offset to use for this kernel
40
+ * as intra-graph offset + *initial offset.
41
+ *
42
+ * When the graph reruns, the logic that reruns it first
43
+ * fill_s the initial offset tensor with this device's
44
+ * CUDA generator's current offset.
45
+ *
46
+ * The control flow above ensures graphed execution is bitwise
47
+ * identical to eager execution as long as RNG ops are enqueued
48
+ * from a single thread, even if RNG ops and graphs containing
49
+ * RNG ops are enqueued and run simultaneously on multiple streams.
50
+ *
51
+ * Usage:
52
+ * ~~~~~~
53
+ * PhiloxCudaState in this file, and unpack() in
54
+ * cuda/CUDAGraphsUtils.cuh allow non-divergent use of
55
+ * CUDAGeneratorImpl whether graph capture is underway or not.
56
+ *
57
+ * Each PhiloxCudaState instance should be used for one and only one
58
+ * consumer kernel.
59
+ *
60
+ * Example (see e.g. native/cuda/Dropout.cu):
61
+ *
62
+ * #include <ATen/cuda/CUDAGeneratorImpl.h>
63
+ * #include <ATen/cuda/CUDAGraphsUtils.cuh>
64
+ *
65
+ * __global__ void kernel(..., PhiloxCudaState philox_args) {
66
+ * auto seeds = at::cuda::philox::unpack(philox_args);
67
+ * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
68
+ * curandStatePhilox4_32_10_t state;
69
+ * curand_init(std::get<0>(seeds), // seed
70
+ * idx, // per-thread subsequence
71
+ * std::get<1>(seeds), // offset in subsequence
72
+ * &state);
73
+ * ...
74
+ * }
75
+ *
76
+ * host_caller(...) {
77
+ * PhiloxCudaState rng_engine_inputs;
78
+ * {
79
+ * // See Note [Acquire lock when using random generators]
80
+ * std::lock_guard<std::mutex> lock(gen->mutex_);
81
+ *
82
+ * // gen could be HostState or DevState here! No divergent code needed!
83
+ * rng_engine_inputs = gen->philox_cuda_state(offset_increment);
84
+ * }
85
+ * kernel<<<...>>>(..., rng_engine_inputs);
86
+ * }
87
+ *
88
+ */
89
+
90
+ struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl {
91
+ // Constructors
92
+ CUDAGeneratorImpl(DeviceIndex device_index = -1);
93
+ ~CUDAGeneratorImpl() override = default;
94
+
95
+ // CUDAGeneratorImpl methods
96
+ std::shared_ptr<CUDAGeneratorImpl> clone() const;
97
+ void set_current_seed(uint64_t seed) override;
98
+ void set_offset(uint64_t offset) override;
99
+ uint64_t get_offset() const override;
100
+ uint64_t current_seed() const override;
101
+ uint64_t seed() override;
102
+ void set_state(const c10::TensorImpl& new_state) override;
103
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
104
+ void set_philox_offset_per_thread(uint64_t offset);
105
+ uint64_t philox_offset_per_thread() const;
106
+ void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph);
107
+ uint64_t capture_epilogue();
108
+ PhiloxCudaState philox_cuda_state(uint64_t increment);
109
+
110
+ bool reset_rnn_state() {
111
+ return !no_reset_rnn_state_.test_and_set();
112
+ }
113
+
114
+ // Temporarily accommodates call sites that use philox_engine_inputs.
115
+ // Allows incremental refactor of call sites to use philox_cuda_state.
116
+ std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
117
+
118
+ static c10::DeviceType device_type();
119
+
120
+ private:
121
+ CUDAGeneratorImpl* clone_impl() const override;
122
+ uint64_t seed_ = default_rng_seed_val;
123
+ uint64_t philox_offset_per_thread_ = 0;
124
+ int64_t* seed_extragraph_{};
125
+ int64_t* offset_extragraph_{};
126
+ uint32_t offset_intragraph_ = 0;
127
+ bool graph_expects_this_gen_ = false;
128
+ std::atomic_flag no_reset_rnn_state_;
129
+ };
130
+
131
+ namespace cuda::detail {
132
+
133
+ TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator(
134
+ DeviceIndex device_index = -1);
135
+ TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1);
136
+
137
+ } // namespace cuda::detail
138
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+
8
+ #include <mutex>
9
+
10
+ namespace at {
11
+
12
+ struct CUDAGeneratorImpl;
13
+
14
+ namespace cuda {
15
+
16
+ // Standalone way to get a unique mempool id usable as a pool=... argument
17
+ // to CUDAGraph::capture_begin
18
+ TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle();
19
+
20
+ struct TORCH_CUDA_CPP_API CUDAGraph {
21
+ CUDAGraph();
22
+ ~CUDAGraph();
23
+
24
+ static void inc_pending_event_queries();
25
+ static void dec_pending_event_queries();
26
+ static int num_pending_event_queries();
27
+ void capture_begin(MempoolId_t pool={0, 0}, cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal);
28
+ void capture_end();
29
+ void replay();
30
+ void reset();
31
+ MempoolId_t pool();
32
+ void enable_debug_mode();
33
+ void debug_dump(const std::string& debug_path);
34
+
35
+ protected:
36
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
37
+ cudaGraph_t graph_ = NULL;
38
+ cudaGraphExec_t graph_exec_ = NULL;
39
+ #endif
40
+
41
+ static std::atomic<int> pending_event_queries;
42
+
43
+ // internal states so reset() can do its best cleaning up
44
+ // Set to true in capture_end if cudaStreamEndCapture succeeded
45
+ // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate
46
+ // to create graph_exec_, then graph_ is deleted
47
+ bool has_graph_ = false;
48
+ // Set to true in capture_end if cudaGraphInstantiate succeeded
49
+ bool has_graph_exec_ = false;
50
+
51
+ // uuid of this instance's current capture, retrieved from Cuda
52
+ CaptureId_t id_;
53
+
54
+ // uuid used to request a particular private mempool from CUDACachingAllocator.
55
+ // By default, this will be set to {id_, 0}.
56
+ //
57
+ // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_
58
+ // will be set to the other graph's mempool_id_, and therefore share a mempool with the
59
+ // other graph.
60
+ //
61
+ // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(),
62
+ // it will share a mempool with any other captures that used "pool=handle".
63
+ //
64
+ // Sharing a mempool across graphs saves memory, and it's safe if you
65
+ // know you'll replay those graphs in the same order you captured them.
66
+ MempoolId_t mempool_id_;
67
+
68
+ // Stream on which capture began
69
+ at::cuda::CUDAStream capture_stream_;
70
+
71
+ // Default generator on device where capture began
72
+ at::CUDAGeneratorImpl* capture_gen_;
73
+
74
+ // Device where capture occurred. Right now, for simplicity, we require all ops
75
+ // in a capture to run on the same device, but this is a limitation of CUDAGraph,
76
+ // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
77
+ // captures if needed.
78
+ int capture_dev_;
79
+
80
+ // RNG state trackers
81
+ at::Tensor seed_extragraph_;
82
+ at::Tensor offset_extragraph_;
83
+ uint64_t wholegraph_increment_;
84
+ };
85
+
86
+ } // namespace cuda
87
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAGeneratorImpl.h>
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <ATen/cuda/PhiloxUtils.cuh>
6
+ #include <ATen/cuda/detail/CUDAHooks.h>
7
+ #include <ATen/detail/CUDAHooksInterface.h>
8
+ #include <c10/core/StreamGuard.h>
9
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
10
+ #include <c10/cuda/CUDAGuard.h>
11
+
12
+ // c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.
13
+ // This file adds utils used by aten only.
14
+
15
+ namespace at::cuda {
16
+
17
+ using CaptureId_t = c10::cuda::CaptureId_t;
18
+ using CaptureStatus = c10::cuda::CaptureStatus;
19
+
20
+ // Use this version where you don't want to create a CUDA context if none exists.
21
+ inline CaptureStatus currentStreamCaptureStatus() {
22
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
23
+ // don't create a context if we don't have to
24
+ if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {
25
+ return c10::cuda::currentStreamCaptureStatusMayInitCtx();
26
+ } else {
27
+ return CaptureStatus::None;
28
+ }
29
+ #else
30
+ return CaptureStatus::None;
31
+ #endif
32
+ }
33
+
34
+ inline void assertNotCapturing(std::string attempt) {
35
+ auto status = currentStreamCaptureStatus();
36
+ TORCH_CHECK(status == CaptureStatus::None,
37
+ attempt,
38
+ " during CUDA graph capture. If you need this call to be captured, "
39
+ "please file an issue. "
40
+ "Current cudaStreamCaptureStatus: ",
41
+ status);
42
+ }
43
+
44
+ inline void errorIfCapturingCudnnBenchmark(std::string version_specific) {
45
+ auto status = currentStreamCaptureStatus();
46
+ TORCH_CHECK(status == CaptureStatus::None,
47
+ "Current cudaStreamCaptureStatus: ",
48
+ status,
49
+ "\nCapturing ",
50
+ version_specific,
51
+ "is prohibited. Possible causes of this error:\n"
52
+ "1. No warmup iterations occurred before capture.\n"
53
+ "2. The convolutions you're trying to capture use dynamic shapes, "
54
+ "in which case capturing them is generally prohibited.");
55
+ }
56
+
57
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #if defined(USE_ROCM)
5
+ #include <hipsparse/hipsparse-version.h>
6
+ #define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch)
7
+ #endif
8
+
9
+ // cuSparse Generic API added in CUDA 10.1
10
+ // Windows support added in CUDA 11.0
11
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32)))
12
+ #define AT_USE_CUSPARSE_GENERIC_API() 1
13
+ #else
14
+ #define AT_USE_CUSPARSE_GENERIC_API() 0
15
+ #endif
16
+
17
+ // cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0
18
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
19
+ (CUSPARSE_VERSION < 12000)
20
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1
21
+ #else
22
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0
23
+ #endif
24
+
25
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
26
+ (CUSPARSE_VERSION >= 12000)
27
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1
28
+ #else
29
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0
30
+ #endif
31
+
32
+ #if defined(USE_ROCM)
33
+
34
+ // hipSparse const API added in v2.4.0
35
+ #if HIPSPARSE_VERSION >= 200400
36
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1
37
+ #define AT_USE_HIPSPARSE_GENERIC_52_API() 0
38
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
39
+ #else
40
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
41
+
42
+ // hipSparse Generic API ROCm 5.2
43
+ #if ROCM_VERSION >= 50200
44
+ #define AT_USE_HIPSPARSE_GENERIC_52_API() 1
45
+ #else
46
+ #define AT_USE_HIPSPARSE_GENERIC_52_API() 0
47
+ #endif
48
+
49
+ // hipSparse Generic API ROCm 5.1
50
+ #if ROCM_VERSION >= 50100
51
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
52
+ #else
53
+ #define AT_USE_HIPSPARSE_GENERIC_API() 0
54
+ #endif
55
+
56
+ #endif // HIPSPARSE_VERSION >= 200400
57
+ #else // USE_ROCM
58
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
59
+ #define AT_USE_HIPSPARSE_GENERIC_52_API() 0
60
+ #define AT_USE_HIPSPARSE_GENERIC_API() 0
61
+ #endif // USE_ROCM
62
+
63
+ // cuSparse Generic API spsv function was added in CUDA 11.3.0
64
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500)
65
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 1
66
+ #else
67
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 0
68
+ #endif
69
+
70
+ // cuSparse Generic API spsm function was added in CUDA 11.3.1
71
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600)
72
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 1
73
+ #else
74
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 0
75
+ #endif
76
+
77
+ // cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400)
78
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400)
79
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 1
80
+ #else
81
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 0
82
+ #endif
83
+
84
+ // BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0)
85
+ #if defined(CUDART_VERSION) || \
86
+ (defined(USE_ROCM) && ROCM_VERSION >= 40500 )
87
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1
88
+ #else
89
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0
90
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /*
4
+ Provides a subset of cuSPARSE functions as templates:
5
+
6
+ csrgeam2<scalar_t>(...)
7
+
8
+ where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
9
+ The functions are available in at::cuda::sparse namespace.
10
+ */
11
+
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/CUDASparse.h>
14
+
15
+ namespace at::cuda::sparse {
16
+
17
+ #define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
18
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
19
+ const cusparseMatDescr_t descrA, int nnzA, \
20
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
21
+ const int *csrSortedColIndA, const scalar_t *beta, \
22
+ const cusparseMatDescr_t descrB, int nnzB, \
23
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
24
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
25
+ const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \
26
+ const int *csrSortedColIndC, size_t *pBufferSizeInBytes
27
+
28
+ template <typename scalar_t>
29
+ inline void csrgeam2_bufferSizeExt(
30
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) {
31
+ TORCH_INTERNAL_ASSERT(
32
+ false,
33
+ "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ",
34
+ typeid(scalar_t).name());
35
+ }
36
+
37
+ template <>
38
+ void csrgeam2_bufferSizeExt<float>(
39
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float));
40
+ template <>
41
+ void csrgeam2_bufferSizeExt<double>(
42
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double));
43
+ template <>
44
+ void csrgeam2_bufferSizeExt<c10::complex<float>>(
45
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>));
46
+ template <>
47
+ void csrgeam2_bufferSizeExt<c10::complex<double>>(
48
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>));
49
+
50
+ #define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \
51
+ cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \
52
+ int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \
53
+ const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \
54
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
55
+ int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace
56
+
57
+ template <typename scalar_t>
58
+ inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) {
59
+ TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz(
60
+ handle,
61
+ m,
62
+ n,
63
+ descrA,
64
+ nnzA,
65
+ csrSortedRowPtrA,
66
+ csrSortedColIndA,
67
+ descrB,
68
+ nnzB,
69
+ csrSortedRowPtrB,
70
+ csrSortedColIndB,
71
+ descrC,
72
+ csrSortedRowPtrC,
73
+ nnzTotalDevHostPtr,
74
+ workspace));
75
+ }
76
+
77
+ #define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \
78
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
79
+ const cusparseMatDescr_t descrA, int nnzA, \
80
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
81
+ const int *csrSortedColIndA, const scalar_t *beta, \
82
+ const cusparseMatDescr_t descrB, int nnzB, \
83
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
84
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
85
+ scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \
86
+ void *pBuffer
87
+
88
+ template <typename scalar_t>
89
+ inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) {
90
+ TORCH_INTERNAL_ASSERT(
91
+ false,
92
+ "at::cuda::sparse::csrgeam2: not implemented for ",
93
+ typeid(scalar_t).name());
94
+ }
95
+
96
+ template <>
97
+ void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float));
98
+ template <>
99
+ void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double));
100
+ template <>
101
+ void csrgeam2<c10::complex<float>>(
102
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>));
103
+ template <>
104
+ void csrgeam2<c10::complex<double>>(
105
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>));
106
+
107
+ #define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \
108
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
109
+ cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \
110
+ int kb, int nnzb, const scalar_t *alpha, \
111
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
112
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
113
+ const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc
114
+
115
+ template <typename scalar_t>
116
+ inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) {
117
+ TORCH_INTERNAL_ASSERT(
118
+ false,
119
+ "at::cuda::sparse::bsrmm: not implemented for ",
120
+ typeid(scalar_t).name());
121
+ }
122
+
123
+ template <>
124
+ void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float));
125
+ template <>
126
+ void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double));
127
+ template <>
128
+ void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>));
129
+ template <>
130
+ void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>));
131
+
132
+ #define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \
133
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
134
+ cusparseOperation_t transA, int mb, int nb, int nnzb, \
135
+ const scalar_t *alpha, const cusparseMatDescr_t descrA, \
136
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
137
+ int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y
138
+
139
+ template <typename scalar_t>
140
+ inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) {
141
+ TORCH_INTERNAL_ASSERT(
142
+ false,
143
+ "at::cuda::sparse::bsrmv: not implemented for ",
144
+ typeid(scalar_t).name());
145
+ }
146
+
147
+ template <>
148
+ void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float));
149
+ template <>
150
+ void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double));
151
+ template <>
152
+ void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>));
153
+ template <>
154
+ void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>));
155
+
156
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
157
+
158
+ #define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \
159
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
160
+ cusparseOperation_t transA, int mb, int nnzb, \
161
+ const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
162
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
163
+ bsrsv2Info_t info, int *pBufferSizeInBytes
164
+
165
+ template <typename scalar_t>
166
+ inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) {
167
+ TORCH_INTERNAL_ASSERT(
168
+ false,
169
+ "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ",
170
+ typeid(scalar_t).name());
171
+ }
172
+
173
+ template <>
174
+ void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float));
175
+ template <>
176
+ void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double));
177
+ template <>
178
+ void bsrsv2_bufferSize<c10::complex<float>>(
179
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>));
180
+ template <>
181
+ void bsrsv2_bufferSize<c10::complex<double>>(
182
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>));
183
+
184
+ #define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \
185
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
186
+ cusparseOperation_t transA, int mb, int nnzb, \
187
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
188
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
189
+ bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
190
+
191
+ template <typename scalar_t>
192
+ inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) {
193
+ TORCH_INTERNAL_ASSERT(
194
+ false,
195
+ "at::cuda::sparse::bsrsv2_analysis: not implemented for ",
196
+ typeid(scalar_t).name());
197
+ }
198
+
199
+ template <>
200
+ void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float));
201
+ template <>
202
+ void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double));
203
+ template <>
204
+ void bsrsv2_analysis<c10::complex<float>>(
205
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>));
206
+ template <>
207
+ void bsrsv2_analysis<c10::complex<double>>(
208
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>));
209
+
210
+ #define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \
211
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
212
+ cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \
213
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
214
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
215
+ bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \
216
+ cusparseSolvePolicy_t policy, void *pBuffer
217
+
218
+ template <typename scalar_t>
219
+ inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) {
220
+ TORCH_INTERNAL_ASSERT(
221
+ false,
222
+ "at::cuda::sparse::bsrsv2_solve: not implemented for ",
223
+ typeid(scalar_t).name());
224
+ }
225
+
226
+ template <>
227
+ void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float));
228
+ template <>
229
+ void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double));
230
+ template <>
231
+ void bsrsv2_solve<c10::complex<float>>(
232
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>));
233
+ template <>
234
+ void bsrsv2_solve<c10::complex<double>>(
235
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>));
236
+
237
+ #define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \
238
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
239
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
240
+ int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
241
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
242
+ bsrsm2Info_t info, int *pBufferSizeInBytes
243
+
244
+ template <typename scalar_t>
245
+ inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) {
246
+ TORCH_INTERNAL_ASSERT(
247
+ false,
248
+ "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ",
249
+ typeid(scalar_t).name());
250
+ }
251
+
252
+ template <>
253
+ void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float));
254
+ template <>
255
+ void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double));
256
+ template <>
257
+ void bsrsm2_bufferSize<c10::complex<float>>(
258
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>));
259
+ template <>
260
+ void bsrsm2_bufferSize<c10::complex<double>>(
261
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>));
262
+
263
+ #define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \
264
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
265
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
266
+ int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
267
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
268
+ bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
269
+
270
+ template <typename scalar_t>
271
+ inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) {
272
+ TORCH_INTERNAL_ASSERT(
273
+ false,
274
+ "at::cuda::sparse::bsrsm2_analysis: not implemented for ",
275
+ typeid(scalar_t).name());
276
+ }
277
+
278
+ template <>
279
+ void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float));
280
+ template <>
281
+ void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double));
282
+ template <>
283
+ void bsrsm2_analysis<c10::complex<float>>(
284
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>));
285
+ template <>
286
+ void bsrsm2_analysis<c10::complex<double>>(
287
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>));
288
+
289
+ #define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \
290
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
291
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
292
+ int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \
293
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
294
+ int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \
295
+ scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer
296
+
297
+ template <typename scalar_t>
298
+ inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) {
299
+ TORCH_INTERNAL_ASSERT(
300
+ false,
301
+ "at::cuda::sparse::bsrsm2_solve: not implemented for ",
302
+ typeid(scalar_t).name());
303
+ }
304
+
305
+ template <>
306
+ void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float));
307
+ template <>
308
+ void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double));
309
+ template <>
310
+ void bsrsm2_solve<c10::complex<float>>(
311
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>));
312
+ template <>
313
+ void bsrsm2_solve<c10::complex<double>>(
314
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>));
315
+
316
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
317
+
318
+ } // namespace at::cuda::sparse
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/cuda/CUDASparse.h>
6
+
7
+ #include <c10/core/ScalarType.h>
8
+
9
+ #if defined(USE_ROCM)
10
+ #include <type_traits>
11
+ #endif
12
+
13
+ namespace at::cuda::sparse {
14
+
15
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
16
+ struct CuSparseDescriptorDeleter {
17
+ void operator()(T* x) {
18
+ if (x != nullptr) {
19
+ TORCH_CUDASPARSE_CHECK(destructor(x));
20
+ }
21
+ }
22
+ };
23
+
24
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
25
+ class CuSparseDescriptor {
26
+ public:
27
+ T* descriptor() const {
28
+ return descriptor_.get();
29
+ }
30
+ T* descriptor() {
31
+ return descriptor_.get();
32
+ }
33
+
34
+ protected:
35
+ std::unique_ptr<T, CuSparseDescriptorDeleter<T, destructor>> descriptor_;
36
+ };
37
+
38
+ #if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
39
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
40
+ struct ConstCuSparseDescriptorDeleter {
41
+ void operator()(T* x) {
42
+ if (x != nullptr) {
43
+ TORCH_CUDASPARSE_CHECK(destructor(x));
44
+ }
45
+ }
46
+ };
47
+
48
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
49
+ class ConstCuSparseDescriptor {
50
+ public:
51
+ T* descriptor() const {
52
+ return descriptor_.get();
53
+ }
54
+ T* descriptor() {
55
+ return descriptor_.get();
56
+ }
57
+
58
+ protected:
59
+ std::unique_ptr<T, ConstCuSparseDescriptorDeleter<T, destructor>> descriptor_;
60
+ };
61
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS
62
+
63
+ #if defined(USE_ROCM)
64
+ using cusparseMatDescr = std::remove_pointer<hipsparseMatDescr_t>::type;
65
+ using cusparseDnMatDescr = std::remove_pointer<hipsparseDnMatDescr_t>::type;
66
+ using cusparseDnVecDescr = std::remove_pointer<hipsparseDnVecDescr_t>::type;
67
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
68
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
69
+ using cusparseSpGEMMDescr = std::remove_pointer<hipsparseSpGEMMDescr_t>::type;
70
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
71
+ using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type;
72
+ using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type;
73
+ #endif
74
+ #endif
75
+
76
+ class TORCH_CUDA_CPP_API CuSparseMatDescriptor
77
+ : public CuSparseDescriptor<cusparseMatDescr, &cusparseDestroyMatDescr> {
78
+ public:
79
+ CuSparseMatDescriptor() {
80
+ cusparseMatDescr_t raw_descriptor;
81
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
82
+ descriptor_.reset(raw_descriptor);
83
+ }
84
+
85
+ CuSparseMatDescriptor(bool upper, bool unit) {
86
+ cusparseFillMode_t fill_mode =
87
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
88
+ cusparseDiagType_t diag_type =
89
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
90
+ cusparseMatDescr_t raw_descriptor;
91
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
92
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode));
93
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type));
94
+ descriptor_.reset(raw_descriptor);
95
+ }
96
+ };
97
+
98
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
99
+
100
+ class TORCH_CUDA_CPP_API CuSparseBsrsv2Info
101
+ : public CuSparseDescriptor<bsrsv2Info, &cusparseDestroyBsrsv2Info> {
102
+ public:
103
+ CuSparseBsrsv2Info() {
104
+ bsrsv2Info_t raw_descriptor;
105
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor));
106
+ descriptor_.reset(raw_descriptor);
107
+ }
108
+ };
109
+
110
+ class TORCH_CUDA_CPP_API CuSparseBsrsm2Info
111
+ : public CuSparseDescriptor<bsrsm2Info, &cusparseDestroyBsrsm2Info> {
112
+ public:
113
+ CuSparseBsrsm2Info() {
114
+ bsrsm2Info_t raw_descriptor;
115
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor));
116
+ descriptor_.reset(raw_descriptor);
117
+ }
118
+ };
119
+
120
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
121
+
122
+ #if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
123
+
124
+ cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type);
125
+
126
+ #if AT_USE_HIPSPARSE_GENERIC_52_API() || \
127
+ (AT_USE_CUSPARSE_GENERIC_API() && AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS())
128
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
129
+ : public CuSparseDescriptor<cusparseDnMatDescr, &cusparseDestroyDnMat> {
130
+ public:
131
+ explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
132
+ };
133
+
134
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
135
+ : public CuSparseDescriptor<cusparseDnVecDescr, &cusparseDestroyDnVec> {
136
+ public:
137
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
138
+ };
139
+
140
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
141
+ : public CuSparseDescriptor<cusparseSpMatDescr, &cusparseDestroySpMat> {};
142
+
143
+ //AT_USE_HIPSPARSE_GENERIC_52_API() || (AT_USE_CUSPARSE_GENERIC_API() && AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS())
144
+
145
+ #elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
146
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
147
+ : public ConstCuSparseDescriptor<
148
+ cusparseDnMatDescr,
149
+ &cusparseDestroyDnMat> {
150
+ public:
151
+ explicit CuSparseDnMatDescriptor(
152
+ const Tensor& input,
153
+ int64_t batch_offset = -1);
154
+ };
155
+
156
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
157
+ : public ConstCuSparseDescriptor<
158
+ cusparseDnVecDescr,
159
+ &cusparseDestroyDnVec> {
160
+ public:
161
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
162
+ };
163
+
164
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
165
+ : public ConstCuSparseDescriptor<
166
+ cusparseSpMatDescr,
167
+ &cusparseDestroySpMat> {};
168
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS()
169
+
170
+ class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor
171
+ : public CuSparseSpMatDescriptor {
172
+ public:
173
+ explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1);
174
+
175
+ std::tuple<int64_t, int64_t, int64_t> get_size() {
176
+ int64_t rows, cols, nnz;
177
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize(
178
+ this->descriptor(),
179
+ &rows,
180
+ &cols,
181
+ &nnz));
182
+ return std::make_tuple(rows, cols, nnz);
183
+ }
184
+
185
+ void set_tensor(const Tensor& input) {
186
+ auto crow_indices = input.crow_indices();
187
+ auto col_indices = input.col_indices();
188
+ auto values = input.values();
189
+
190
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous());
191
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous());
192
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous());
193
+ TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers(
194
+ this->descriptor(),
195
+ crow_indices.data_ptr(),
196
+ col_indices.data_ptr(),
197
+ values.data_ptr()));
198
+ }
199
+
200
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
201
+ void set_mat_fill_mode(bool upper) {
202
+ cusparseFillMode_t fill_mode =
203
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
204
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
205
+ this->descriptor(),
206
+ CUSPARSE_SPMAT_FILL_MODE,
207
+ &fill_mode,
208
+ sizeof(fill_mode)));
209
+ }
210
+
211
+ void set_mat_diag_type(bool unit) {
212
+ cusparseDiagType_t diag_type =
213
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
214
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
215
+ this->descriptor(),
216
+ CUSPARSE_SPMAT_DIAG_TYPE,
217
+ &diag_type,
218
+ sizeof(diag_type)));
219
+ }
220
+ #endif
221
+ };
222
+
223
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
224
+ class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor
225
+ : public CuSparseDescriptor<cusparseSpSVDescr, &cusparseSpSV_destroyDescr> {
226
+ public:
227
+ CuSparseSpSVDescriptor() {
228
+ cusparseSpSVDescr_t raw_descriptor;
229
+ TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor));
230
+ descriptor_.reset(raw_descriptor);
231
+ }
232
+ };
233
+ #endif
234
+
235
+ #if AT_USE_CUSPARSE_GENERIC_SPSM()
236
+ class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor
237
+ : public CuSparseDescriptor<cusparseSpSMDescr, &cusparseSpSM_destroyDescr> {
238
+ public:
239
+ CuSparseSpSMDescriptor() {
240
+ cusparseSpSMDescr_t raw_descriptor;
241
+ TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor));
242
+ descriptor_.reset(raw_descriptor);
243
+ }
244
+ };
245
+ #endif
246
+
247
+ #if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || !defined(USE_ROCM)
248
+ class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor
249
+ : public CuSparseDescriptor<cusparseSpGEMMDescr, &cusparseSpGEMM_destroyDescr> {
250
+ public:
251
+ CuSparseSpGEMMDescriptor() {
252
+ cusparseSpGEMMDescr_t raw_descriptor;
253
+ TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor));
254
+ descriptor_.reset(raw_descriptor);
255
+ }
256
+ };
257
+ #endif
258
+
259
+ #endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
260
+
261
+ } // namespace at::cuda::sparse
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ #include <cuda.h>
7
+ #include <cuda_runtime.h>
8
+ #include <cuda_fp16.h>
9
+
10
+ namespace at {
11
+ template <>
12
+ inline __half* Tensor::data() const {
13
+ return reinterpret_cast<__half*>(data<Half>());
14
+ }
15
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ // Check if every tensor in a list of tensors matches the current
8
+ // device.
9
+ inline bool check_device(ArrayRef<Tensor> ts) {
10
+ if (ts.empty()) {
11
+ return true;
12
+ }
13
+ Device curDevice = Device(kCUDA, current_device());
14
+ for (const Tensor& t : ts) {
15
+ if (t.device() != curDevice) return false;
16
+ }
17
+ return true;
18
+ }
19
+
20
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAStream.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ //
9
+ // A caching allocator for CUDA host allocations (pinned memory).
10
+ //
11
+ // This provides a drop-in replacement for THCudaHostAllocator, which re-uses
12
+ // freed pinned (page-locked) memory allocations. This avoids device
13
+ // synchronizations due to cudaFreeHost calls.
14
+ //
15
+ // To ensure correct behavior, THCCachingHostAllocator_recordEvent must be
16
+ // called anytime a pointer from this allocator is used in a cudaMemcpyAsync
17
+ // call between host and device, and passed the corresponding context from the
18
+ // allocation. This is currently invoked by at::native::copy_kernel_cuda.
19
+ //
20
+ // Note that this allocator does not split larger allocations into smaller
21
+ // blocks, unlike the caching device allocator.
22
+ //
23
+ TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator();
24
+
25
+ // Records an event in the specified stream. The allocation corresponding to the
26
+ // input `ptr`/`ctx` will not be re-used until the event has occurred.
27
+ TORCH_CUDA_CPP_API bool
28
+ CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream);
29
+
30
+ // Releases cached pinned memory allocations via cudaHostFree
31
+ TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache();
32
+
33
+ inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) {
34
+ return getCachingHostAllocator()->allocate(size);
35
+ }
36
+
37
+ } // namespace at::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/complex.h>
5
+ #include <c10/util/Half.h>
6
+
7
+ __device__ __forceinline__ unsigned int ACTIVE_MASK()
8
+ {
9
+ #if !defined(USE_ROCM)
10
+ return __activemask();
11
+ #else
12
+ // will be ignored anyway
13
+ return 0xffffffff;
14
+ #endif
15
+ }
16
+
17
+ __device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) {
18
+ #if !defined(USE_ROCM)
19
+ return __syncwarp(mask);
20
+ #endif
21
+ }
22
+
23
+ #if defined(USE_ROCM)
24
+ __device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
25
+ {
26
+ return __ballot(predicate);
27
+ }
28
+ #else
29
+ __device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
30
+ {
31
+ #if !defined(USE_ROCM)
32
+ return __ballot_sync(mask, predicate);
33
+ #else
34
+ return __ballot(predicate);
35
+ #endif
36
+ }
37
+ #endif
38
+
39
+ template <typename T>
40
+ __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
41
+ {
42
+ #if !defined(USE_ROCM)
43
+ return __shfl_xor_sync(mask, value, laneMask, width);
44
+ #else
45
+ return __shfl_xor(value, laneMask, width);
46
+ #endif
47
+ }
48
+
49
+ template <typename T>
50
+ __device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff)
51
+ {
52
+ #if !defined(USE_ROCM)
53
+ return __shfl_sync(mask, value, srcLane, width);
54
+ #else
55
+ return __shfl(value, srcLane, width);
56
+ #endif
57
+ }
58
+
59
+ template <typename T>
60
+ __device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
61
+ {
62
+ #if !defined(USE_ROCM)
63
+ return __shfl_up_sync(mask, value, delta, width);
64
+ #else
65
+ return __shfl_up(value, delta, width);
66
+ #endif
67
+ }
68
+
69
+ template <typename T>
70
+ __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
71
+ {
72
+ #if !defined(USE_ROCM)
73
+ return __shfl_down_sync(mask, value, delta, width);
74
+ #else
75
+ return __shfl_down(value, delta, width);
76
+ #endif
77
+ }
78
+
79
+ #if defined(USE_ROCM)
80
+ template<>
81
+ __device__ __forceinline__ int64_t WARP_SHFL_DOWN<int64_t>(int64_t value, unsigned int delta, int width , unsigned int mask)
82
+ {
83
+ //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
84
+ int2 a = *reinterpret_cast<int2*>(&value);
85
+ a.x = __shfl_down(a.x, delta);
86
+ a.y = __shfl_down(a.y, delta);
87
+ return *reinterpret_cast<int64_t*>(&a);
88
+ }
89
+ #endif
90
+
91
+ template<>
92
+ __device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half value, unsigned int delta, int width, unsigned int mask)
93
+ {
94
+ return c10::Half(WARP_SHFL_DOWN<unsigned short>(value.x, delta, width, mask), c10::Half::from_bits_t{});
95
+ }
96
+
97
+ template <typename T>
98
+ __device__ __forceinline__ c10::complex<T> WARP_SHFL_DOWN(c10::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
99
+ {
100
+ #if !defined(USE_ROCM)
101
+ return c10::complex<T>(
102
+ __shfl_down_sync(mask, value.real_, delta, width),
103
+ __shfl_down_sync(mask, value.imag_, delta, width));
104
+ #else
105
+ return c10::complex<T>(
106
+ __shfl_down(value.real_, delta, width),
107
+ __shfl_down(value.imag_, delta, width));
108
+ #endif
109
+ }
110
+
111
+ /**
112
+ * For CC 3.5+, perform a load using __ldg
113
+ */
114
+ template <typename T>
115
+ __device__ __forceinline__ T doLdg(const T* p) {
116
+ #if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM)
117
+ return __ldg(p);
118
+ #else
119
+ return *p;
120
+ #endif
121
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+
4
+ namespace at::detail {
5
+
6
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
7
+ IntArrayRef size,
8
+ ScalarType dtype,
9
+ c10::optional<Device> device_opt,
10
+ c10::optional<c10::MemoryFormat> memory_format_opt);
11
+
12
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
13
+ IntArrayRef size,
14
+ c10::optional<ScalarType> dtype_opt,
15
+ c10::optional<Layout> layout_opt,
16
+ c10::optional<Device> device_opt,
17
+ c10::optional<bool> pin_memory_opt,
18
+ c10::optional<c10::MemoryFormat> memory_format_opt);
19
+
20
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
21
+ IntArrayRef size,
22
+ const TensorOptions &options);
23
+
24
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ ScalarType dtype,
28
+ c10::optional<Device> device_opt);
29
+
30
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
31
+ IntArrayRef size,
32
+ IntArrayRef stride,
33
+ c10::optional<ScalarType> dtype_opt,
34
+ c10::optional<Layout> layout_opt,
35
+ c10::optional<Device> device_opt,
36
+ c10::optional<bool> pin_memory_opt);
37
+
38
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
39
+ IntArrayRef size,
40
+ IntArrayRef stride,
41
+ const TensorOptions &options);
42
+
43
+
44
+ } // namespace at::detail
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cublas_v2.h>
4
+ #include <cusparse.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ #ifdef CUDART_VERSION
8
+ #include <cusolver_common.h>
9
+ #endif
10
+
11
+ #include <ATen/Context.h>
12
+ #include <c10/util/Exception.h>
13
+ #include <c10/cuda/CUDAException.h>
14
+
15
+
16
+ namespace c10 {
17
+
18
+ class CuDNNError : public c10::Error {
19
+ using Error::Error;
20
+ };
21
+
22
+ } // namespace c10
23
+
24
+ #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__)
25
+
26
+ // See Note [CHECK macro]
27
+ #define AT_CUDNN_CHECK(EXPR, ...) \
28
+ do { \
29
+ cudnnStatus_t status = EXPR; \
30
+ if (status != CUDNN_STATUS_SUCCESS) { \
31
+ if (status == CUDNN_STATUS_NOT_SUPPORTED) { \
32
+ TORCH_CHECK_WITH(CuDNNError, false, \
33
+ "cuDNN error: ", \
34
+ cudnnGetErrorString(status), \
35
+ ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \
36
+ } else { \
37
+ TORCH_CHECK_WITH(CuDNNError, false, \
38
+ "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \
39
+ } \
40
+ } \
41
+ } while (0)
42
+
43
+ namespace at::cuda::blas {
44
+ C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error);
45
+ } // namespace at::cuda::blas
46
+
47
+ #define TORCH_CUDABLAS_CHECK(EXPR) \
48
+ do { \
49
+ cublasStatus_t __err = EXPR; \
50
+ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \
51
+ "CUDA error: ", \
52
+ at::cuda::blas::_cublasGetErrorEnum(__err), \
53
+ " when calling `" #EXPR "`"); \
54
+ } while (0)
55
+
56
+ const char *cusparseGetErrorString(cusparseStatus_t status);
57
+
58
+ #define TORCH_CUDASPARSE_CHECK(EXPR) \
59
+ do { \
60
+ cusparseStatus_t __err = EXPR; \
61
+ TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \
62
+ "CUDA error: ", \
63
+ cusparseGetErrorString(__err), \
64
+ " when calling `" #EXPR "`"); \
65
+ } while (0)
66
+
67
+ // cusolver related headers are only supported on cuda now
68
+ #ifdef CUDART_VERSION
69
+
70
+ namespace at::cuda::solver {
71
+ C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status);
72
+
73
+ constexpr const char* _cusolver_backend_suggestion = \
74
+ "If you keep seeing this error, you may use " \
75
+ "`torch.backends.cuda.preferred_linalg_library()` to try " \
76
+ "linear algebra operators with other supported backends. " \
77
+ "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library";
78
+
79
+ } // namespace at::cuda::solver
80
+
81
+ // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
82
+ // When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
83
+ #define TORCH_CUSOLVER_CHECK(EXPR) \
84
+ do { \
85
+ cusolverStatus_t __err = EXPR; \
86
+ if ((CUDA_VERSION < 11500 && \
87
+ __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \
88
+ (CUDA_VERSION >= 11500 && \
89
+ __err == CUSOLVER_STATUS_INVALID_VALUE)) { \
90
+ TORCH_CHECK_LINALG( \
91
+ false, \
92
+ "cusolver error: ", \
93
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
94
+ ", when calling `" #EXPR "`", \
95
+ ". This error may appear if the input matrix contains NaN. ", \
96
+ at::cuda::solver::_cusolver_backend_suggestion); \
97
+ } else { \
98
+ TORCH_CHECK( \
99
+ __err == CUSOLVER_STATUS_SUCCESS, \
100
+ "cusolver error: ", \
101
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
102
+ ", when calling `" #EXPR "`. ", \
103
+ at::cuda::solver::_cusolver_backend_suggestion); \
104
+ } \
105
+ } while (0)
106
+
107
+ #else
108
+ #define TORCH_CUSOLVER_CHECK(EXPR) EXPR
109
+ #endif
110
+
111
+ #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR)
112
+
113
+ // For CUDA Driver API
114
+ //
115
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
116
+ // in ATen, and we need to use its nvrtcGetErrorString.
117
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
118
+ #if !defined(USE_ROCM)
119
+
120
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
121
+ do { \
122
+ CUresult __err = EXPR; \
123
+ if (__err != CUDA_SUCCESS) { \
124
+ const char* err_str; \
125
+ CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
126
+ if (get_error_str_err != CUDA_SUCCESS) { \
127
+ AT_ERROR("CUDA driver error: unknown error"); \
128
+ } else { \
129
+ AT_ERROR("CUDA driver error: ", err_str); \
130
+ } \
131
+ } \
132
+ } while (0)
133
+
134
+ #else
135
+
136
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
137
+ do { \
138
+ CUresult __err = EXPR; \
139
+ if (__err != CUDA_SUCCESS) { \
140
+ AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \
141
+ } \
142
+ } while (0)
143
+
144
+ #endif
145
+
146
+ // For CUDA NVRTC
147
+ //
148
+ // Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE,
149
+ // incorrectly produces the error string "NVRTC unknown error."
150
+ // The following maps it correctly.
151
+ //
152
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
153
+ // in ATen, and we need to use its nvrtcGetErrorString.
154
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
155
+ #define AT_CUDA_NVRTC_CHECK(EXPR) \
156
+ do { \
157
+ nvrtcResult __err = EXPR; \
158
+ if (__err != NVRTC_SUCCESS) { \
159
+ if (static_cast<int>(__err) != 7) { \
160
+ AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
161
+ } else { \
162
+ AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
163
+ } \
164
+ } \
165
+ } while (0)
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <limits.h>
5
+ #include <math.h>
6
+ #include <float.h>
7
+
8
+ // NumericLimits.cuh is a holder for numeric limits definitions of commonly used
9
+ // types. This header is very specific to ROCm HIP and may be removed in the future.
10
+ // This header is derived from the legacy THCNumerics.cuh.
11
+
12
+ // The lower_bound and upper_bound constants are same as lowest and max for
13
+ // integral types, but are -inf and +inf for floating point types. They are
14
+ // useful in implementing min, max, etc.
15
+
16
+ namespace at {
17
+
18
+ template <typename T>
19
+ struct numeric_limits {
20
+ };
21
+
22
+ // WARNING: the following at::numeric_limits definitions are there only to support
23
+ // HIP compilation for the moment. Use std::numeric_limits if you are not
24
+ // compiling for ROCm.
25
+ // from @colesbury: "The functions on numeric_limits aren't marked with
26
+ // __device__ which is why they don't work with ROCm. CUDA allows them
27
+ // because they're constexpr."
28
+
29
+ namespace {
30
+ // ROCm doesn't like INFINITY too.
31
+ constexpr double inf = INFINITY;
32
+ }
33
+
34
+ template <>
35
+ struct numeric_limits<bool> {
36
+ static inline __host__ __device__ bool lowest() { return false; }
37
+ static inline __host__ __device__ bool max() { return true; }
38
+ static inline __host__ __device__ bool lower_bound() { return false; }
39
+ static inline __host__ __device__ bool upper_bound() { return true; }
40
+ };
41
+
42
+ template <>
43
+ struct numeric_limits<uint8_t> {
44
+ static inline __host__ __device__ uint8_t lowest() { return 0; }
45
+ static inline __host__ __device__ uint8_t max() { return UINT8_MAX; }
46
+ static inline __host__ __device__ uint8_t lower_bound() { return 0; }
47
+ static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; }
48
+ };
49
+
50
+ template <>
51
+ struct numeric_limits<int8_t> {
52
+ static inline __host__ __device__ int8_t lowest() { return INT8_MIN; }
53
+ static inline __host__ __device__ int8_t max() { return INT8_MAX; }
54
+ static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; }
55
+ static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
56
+ };
57
+
58
+ template <>
59
+ struct numeric_limits<int16_t> {
60
+ static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
61
+ static inline __host__ __device__ int16_t max() { return INT16_MAX; }
62
+ static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; }
63
+ static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
64
+ };
65
+
66
+ template <>
67
+ struct numeric_limits<int32_t> {
68
+ static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
69
+ static inline __host__ __device__ int32_t max() { return INT32_MAX; }
70
+ static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; }
71
+ static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
72
+ };
73
+
74
+ template <>
75
+ struct numeric_limits<int64_t> {
76
+ #ifdef _MSC_VER
77
+ static inline __host__ __device__ int64_t lowest() { return _I64_MIN; }
78
+ static inline __host__ __device__ int64_t max() { return _I64_MAX; }
79
+ static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; }
80
+ static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; }
81
+ #else
82
+ static inline __host__ __device__ int64_t lowest() { return INT64_MIN; }
83
+ static inline __host__ __device__ int64_t max() { return INT64_MAX; }
84
+ static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; }
85
+ static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; }
86
+ #endif
87
+ };
88
+
89
+ template <>
90
+ struct numeric_limits<at::Half> {
91
+ static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); }
92
+ static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); }
93
+ static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); }
94
+ static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); }
95
+ };
96
+
97
+ template <>
98
+ struct numeric_limits<at::BFloat16> {
99
+ static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); }
100
+ static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); }
101
+ static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); }
102
+ static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); }
103
+ };
104
+
105
+ template <>
106
+ struct numeric_limits<float> {
107
+ static inline __host__ __device__ float lowest() { return -FLT_MAX; }
108
+ static inline __host__ __device__ float max() { return FLT_MAX; }
109
+ static inline __host__ __device__ float lower_bound() { return -static_cast<float>(inf); }
110
+ static inline __host__ __device__ float upper_bound() { return static_cast<float>(inf); }
111
+ };
112
+
113
+ template <>
114
+ struct numeric_limits<double> {
115
+ static inline __host__ __device__ double lowest() { return -DBL_MAX; }
116
+ static inline __host__ __device__ double max() { return DBL_MAX; }
117
+ static inline __host__ __device__ double lower_bound() { return -inf; }
118
+ static inline __host__ __device__ double upper_bound() { return inf; }
119
+ };
120
+
121
+ } // namespace at