applied-ai-018 commited on
Commit
6b6381c
·
verified ·
1 Parent(s): 7d35f20

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h +277 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h +72 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Backend.h +350 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h +56 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h +97 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h +126 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h +15 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h +44 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Device.h +215 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h +24 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h +732 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h +928 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h +119 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h +107 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h +44 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Layout.h +73 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h +287 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h +31 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h +49 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h +383 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h +57 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SingletonSymNodeImpl.h +176 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Storage.h +261 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h +237 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Stream.h +169 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h +109 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h +362 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h +71 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h +210 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h +773 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h +37 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h +44 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h +336 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h +104 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h +30 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h +61 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h +431 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h +110 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h +256 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h +164 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h +242 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h +192 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h +26 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h +308 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h +65 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h +93 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h +12 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/cow/COW.h +29 -0
.gitattributes CHANGED
@@ -202,3 +202,4 @@ llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-3
202
  llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filter=lfs diff=lfs merge=lfs -text
203
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
204
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
 
 
202
  llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filter=lfs diff=lfs merge=lfs -text
203
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
204
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
205
+ env-llmeval/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stddef.h>
4
+ #include <memory>
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/ThreadLocalDebugInfo.h>
9
+ #include <c10/util/UniqueVoidPtr.h>
10
+
11
+ namespace c10 {
12
+
13
+ // A DataPtr is a unique pointer (with an attached deleter and some
14
+ // context for the deleter) to some memory, which also records what
15
+ // device is for its data.
16
+ //
17
+ // nullptr DataPtrs can still have a nontrivial device; this allows
18
+ // us to treat zero-size allocations uniformly with non-zero allocations.
19
+ //
20
+ class C10_API DataPtr {
21
+ private:
22
+ c10::detail::UniqueVoidPtr ptr_;
23
+ Device device_;
24
+
25
+ public:
26
+ // Choice of CPU here is arbitrary; if there's an "undefined" device
27
+ // we could use that too
28
+ DataPtr() : ptr_(), device_(DeviceType::CPU) {}
29
+ DataPtr(void* data, Device device) : ptr_(data), device_(device) {}
30
+ DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device)
31
+ : ptr_(data, ctx, ctx_deleter), device_(device) {}
32
+ void* operator->() const {
33
+ return ptr_.get();
34
+ }
35
+ void clear() {
36
+ ptr_.clear();
37
+ }
38
+ void* get() const {
39
+ return ptr_.get();
40
+ }
41
+ void* mutable_get() {
42
+ return ptr_.get();
43
+ }
44
+ void* get_context() const {
45
+ return ptr_.get_context();
46
+ }
47
+ void* release_context() {
48
+ return ptr_.release_context();
49
+ }
50
+ std::unique_ptr<void, DeleterFnPtr>&& move_context() {
51
+ return ptr_.move_context();
52
+ }
53
+ operator bool() const {
54
+ return static_cast<bool>(ptr_);
55
+ }
56
+ template <typename T>
57
+ T* cast_context(DeleterFnPtr expected_deleter) const {
58
+ return ptr_.cast_context<T>(expected_deleter);
59
+ }
60
+ DeleterFnPtr get_deleter() const {
61
+ return ptr_.get_deleter();
62
+ }
63
+ /**
64
+ * Compare the deleter in a DataPtr to expected_deleter.
65
+ * If it matches, replace the deleter with new_deleter
66
+ * and return true; otherwise, does nothing and returns
67
+ * false.
68
+ *
69
+ * In general, it is not safe to unconditionally set the
70
+ * deleter on a DataPtr, because you don't know what
71
+ * the deleter is, and thus will have a hard time properly
72
+ * disposing of the deleter without storing the original
73
+ * deleter (this is difficult to do, because DeleterFnPtr
74
+ * is not a closure, and because the context on DataPtr is
75
+ * only a single word, you generally don't have enough
76
+ * space to store both the original deleter and its context).
77
+ * However, in some cases, you know /exactly/ what the deleter
78
+ * is, and you have a new deleter that manually wraps
79
+ * the old one. In this case, you can safely swap the deleter
80
+ * after asserting that the deleters line up.
81
+ *
82
+ * What are the requirements on new_deleter? It must still
83
+ * properly dispose of the void* pointer passed in as its argument,
84
+ * where void* is whatever the context of the original deleter
85
+ * is. So in general, you expect the new deleter to look something
86
+ * like this:
87
+ *
88
+ * [](void* ptr) {
89
+ * some_new_stuff(ptr);
90
+ * get_orig_allocator()->raw_deleter(ptr);
91
+ * }
92
+ *
93
+ * Note that it won't work to close over the original
94
+ * allocator; you don't have enough space to do that! Also,
95
+ * it's unsafe to assume that the passed in pointer in
96
+ * question is the memory pointer in question; it might not
97
+ * be; be sure to read the source code of the Allocator
98
+ * in question to confirm this.
99
+ */
100
+ C10_NODISCARD bool compare_exchange_deleter(
101
+ DeleterFnPtr expected_deleter,
102
+ DeleterFnPtr new_deleter) {
103
+ return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
104
+ }
105
+ Device device() const {
106
+ return device_;
107
+ }
108
+ // Unsafely mutates the device on a DataPtr. Under normal use,
109
+ // you should never actually need to call this function.
110
+ // We need this for the implementation of the hack detailed
111
+ // in Note [Masquerading as CUDA]
112
+ void unsafe_set_device(Device device) {
113
+ device_ = device;
114
+ }
115
+ };
116
+
117
+ // NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a
118
+ // CPU nullptr
119
+
120
+ inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept {
121
+ return !dp;
122
+ }
123
+ inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept {
124
+ return !dp;
125
+ }
126
+ inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept {
127
+ return dp;
128
+ }
129
+ inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept {
130
+ return dp;
131
+ }
132
+
133
+ // Note [raw_allocate/raw_deallocate and Thrust]
134
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
135
+ // Thrust's support for custom allocators requires us to write something
136
+ // like this:
137
+ //
138
+ // class ThrustAllocator {
139
+ // char* allocate(size_t);
140
+ // void deallocate(char*, size_t);
141
+ // };
142
+ //
143
+ // This is not good for our unique_ptr based allocator interface, as
144
+ // there is no way to get to the context when we free.
145
+ //
146
+ // However, in some cases the context is exactly the same as
147
+ // the data pointer. In this case, we can support the "raw"
148
+ // allocate and deallocate interface. This is what
149
+ // raw_deleter signifies. By default, it returns a nullptr, which means that
150
+ // the raw interface is not implemented. Be sure to implement it whenever
151
+ // possible, or the raw interface will incorrectly reported as unsupported,
152
+ // when it is actually possible.
153
+
154
+ struct C10_API Allocator {
155
+ virtual ~Allocator() = default;
156
+
157
+ virtual DataPtr allocate(size_t n) const = 0;
158
+
159
+ // If this returns a non nullptr, it means that allocate()
160
+ // is guaranteed to return a unique_ptr with this deleter attached;
161
+ // it means the rawAllocate and rawDeallocate APIs are safe to use.
162
+ // This function MUST always return the same BoundDeleter.
163
+ virtual DeleterFnPtr raw_deleter() const {
164
+ return nullptr;
165
+ }
166
+ void* raw_allocate(size_t n) {
167
+ auto dptr = allocate(n);
168
+ AT_ASSERT(dptr.get() == dptr.get_context());
169
+ return dptr.release_context();
170
+ }
171
+ void raw_deallocate(void* ptr) {
172
+ auto d = raw_deleter();
173
+ AT_ASSERT(d);
174
+ d(ptr);
175
+ }
176
+ };
177
+
178
+ // This context is used to generate DataPtr which have arbitrary
179
+ // std::function deleters associated with them. In some user facing
180
+ // functions, we give a (user-friendly) interface for constructing
181
+ // tensors from external data which take an arbitrary std::function
182
+ // deleter. Grep for InefficientStdFunctionContext to find these
183
+ // occurrences.
184
+ //
185
+ // This context is inefficient because we have to do a dynamic
186
+ // allocation InefficientStdFunctionContext, on top of the dynamic
187
+ // allocation which is implied by std::function itself.
188
+ struct C10_API InefficientStdFunctionContext {
189
+ std::unique_ptr<void, std::function<void(void*)>> ptr_;
190
+ InefficientStdFunctionContext(
191
+ std::unique_ptr<void, std::function<void(void*)>>&& ptr)
192
+ : ptr_(std::move(ptr)) {}
193
+ static DataPtr makeDataPtr(
194
+ void* ptr,
195
+ const std::function<void(void*)>& deleter,
196
+ Device device);
197
+ };
198
+
199
+ /** Set the allocator for DeviceType `t`. The passed in allocator pointer is
200
+ * expected to have static lifetime; this function does NOT take ownership
201
+ * of the raw pointer. (The reason for this is to prevent existing pointers
202
+ * to an allocator of a particular device from being invalidated when
203
+ * SetAllocator is called.)
204
+ *
205
+ * Also note that this is not thread-safe, and we assume this function will
206
+ * only be called during initialization.
207
+ *
208
+ * The 'priority' flag is introduced when we want to overwrite the default
209
+ * allocator, since the allocators are set statically. The default priority
210
+ * is 0, which means the lowest. Only higher or equal priority can overwrite
211
+ * existing ones.
212
+ */
213
+ C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0);
214
+ C10_API Allocator* GetAllocator(const DeviceType& t);
215
+
216
+ template <DeviceType t>
217
+ struct AllocatorRegisterer {
218
+ explicit AllocatorRegisterer(Allocator* alloc) {
219
+ SetAllocator(t, alloc);
220
+ }
221
+ };
222
+
223
+ #define REGISTER_ALLOCATOR(t, f) \
224
+ namespace { \
225
+ static c10::AllocatorRegisterer<t> g_allocator_d(f); \
226
+ }
227
+
228
+ // An interface for reporting thread local memory usage
229
+ // per device
230
+ struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase {
231
+ MemoryReportingInfoBase();
232
+ ~MemoryReportingInfoBase() override = default;
233
+
234
+ /**
235
+ * alloc_size corresponds to the size of the ptr.
236
+ *
237
+ * total_allocated corresponds to total allocated memory.
238
+ *
239
+ * total_reserved corresponds to total size of memory pool, both used and
240
+ * unused, if applicable.
241
+ */
242
+ virtual void reportMemoryUsage(
243
+ void* ptr,
244
+ int64_t alloc_size,
245
+ size_t total_allocated,
246
+ size_t total_reserved,
247
+ Device device) = 0;
248
+
249
+ virtual void reportOutOfMemory(
250
+ int64_t alloc_size,
251
+ size_t total_allocated,
252
+ size_t total_reserved,
253
+ Device device);
254
+
255
+ virtual bool memoryProfilingEnabled() const = 0;
256
+ };
257
+
258
+ C10_API bool memoryProfilingEnabled();
259
+ C10_API void reportMemoryUsageToProfiler(
260
+ void* ptr,
261
+ int64_t alloc_size,
262
+ size_t total_allocated,
263
+ size_t total_reserved,
264
+ Device device);
265
+
266
+ C10_API void reportOutOfMemoryToProfiler(
267
+ int64_t alloc_size,
268
+ size_t total_allocated,
269
+ size_t total_reserved,
270
+ Device device);
271
+
272
+ // used to hold traceback information in allocators
273
+ struct GatheredContext {
274
+ virtual ~GatheredContext() = default;
275
+ };
276
+
277
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+
7
+ // Structure used to pack all the thread local boolean
8
+ // flags used by autograd
9
+ struct C10_API AutogradState {
10
+ static AutogradState& get_tls_state();
11
+ static void set_tls_state(AutogradState state);
12
+
13
+ AutogradState(
14
+ bool grad_mode,
15
+ bool inference_mode,
16
+ bool fw_grad_mode,
17
+ bool multithreading_enabled)
18
+ : grad_mode_(grad_mode),
19
+ inference_mode_(inference_mode),
20
+ fw_grad_mode_(fw_grad_mode),
21
+ multithreading_enabled_(multithreading_enabled),
22
+ view_replay_enabled_(false) {}
23
+
24
+ void set_grad_mode(bool enabled) {
25
+ grad_mode_ = enabled;
26
+ }
27
+
28
+ void set_fw_grad_mode(bool enabled) {
29
+ fw_grad_mode_ = enabled;
30
+ }
31
+
32
+ void set_inference_mode(bool enabled) {
33
+ inference_mode_ = enabled;
34
+ }
35
+
36
+ void set_multithreading_enabled(bool multithreading_enabled) {
37
+ multithreading_enabled_ = multithreading_enabled;
38
+ }
39
+
40
+ void set_view_replay_enabled(bool view_replay_enabled) {
41
+ view_replay_enabled_ = view_replay_enabled;
42
+ }
43
+
44
+ bool get_grad_mode() const {
45
+ return grad_mode_;
46
+ }
47
+
48
+ bool get_fw_grad_mode() const {
49
+ return fw_grad_mode_;
50
+ }
51
+
52
+ bool get_inference_mode() const {
53
+ return inference_mode_;
54
+ }
55
+
56
+ bool get_multithreading_enabled() const {
57
+ return multithreading_enabled_;
58
+ }
59
+
60
+ bool get_view_replay_enabled() const {
61
+ return view_replay_enabled_;
62
+ }
63
+
64
+ private:
65
+ bool grad_mode_ : 1;
66
+ bool inference_mode_ : 1;
67
+ bool fw_grad_mode_ : 1;
68
+ bool multithreading_enabled_ : 1;
69
+ bool view_replay_enabled_ : 1;
70
+ };
71
+
72
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Backend.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <stdexcept>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * This legacy enum class defines the set of backends supported by old school,
14
+ * code generated Type-based ATen. A "backend" in this sense roughly
15
+ * corresponds to the cartesian product of (device type, layout), but restricted
16
+ * only to combinations which we actually have kernels for. Backend does NOT
17
+ * include dtype.
18
+ *
19
+ * The reason we are sunsetting this enum class is because it doesn't allow for
20
+ * open registration; e.g., if you want to add SparseXLA, you'd have to
21
+ * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is
22
+ * the replacement for Backend which supports open registration.
23
+ *
24
+ * NB: The concept of 'Backend' here disagrees with the notion of backend
25
+ * exposed to users in torch.backends. Backend here is something like "CPU"
26
+ * or "SparseCUDA"; backend in torch.backends is something like "MKL" or
27
+ * "CUDNN".
28
+ */
29
+ enum class Backend {
30
+ CPU,
31
+ CUDA,
32
+ HIP,
33
+ VE,
34
+ FPGA,
35
+ IPU,
36
+ XPU,
37
+ SparseCPU,
38
+ SparseCUDA,
39
+ SparseCsrCPU,
40
+ SparseCsrCUDA,
41
+ SparseHIP,
42
+ SparseVE,
43
+ SparseXPU,
44
+ SparsePrivateUse1,
45
+ ORT,
46
+ XLA,
47
+ Vulkan,
48
+ Metal,
49
+ Meta,
50
+ QuantizedCPU,
51
+ QuantizedCUDA,
52
+ QuantizedXPU,
53
+ QuantizedPrivateUse1,
54
+ Undefined,
55
+ MkldnnCPU,
56
+ MPS,
57
+ HPU,
58
+ Lazy,
59
+ MTIA,
60
+ PrivateUse1,
61
+ NumOptions
62
+ };
63
+
64
+ static inline Backend dispatchKeyToBackend(DispatchKey t) {
65
+ if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) {
66
+ return Backend::CPU;
67
+ } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) {
68
+ return Backend::CUDA;
69
+ } else if (t == DispatchKey::HIP) {
70
+ return Backend::HIP;
71
+ } else if (t == DispatchKey::VE) {
72
+ return Backend::VE;
73
+ } else if (t == DispatchKey::FPGA) {
74
+ return Backend::FPGA;
75
+ } else if (t == DispatchKey::ORT) {
76
+ return Backend::ORT;
77
+ } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
78
+ return Backend::XLA;
79
+ } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
80
+ return Backend::Lazy;
81
+ } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) {
82
+ return Backend::MPS;
83
+ } else if (t == DispatchKey::Vulkan) {
84
+ return Backend::Vulkan;
85
+ } else if (t == DispatchKey::Metal) {
86
+ return Backend::Metal;
87
+ } else if (t == DispatchKey::Meta) {
88
+ return Backend::Meta;
89
+ } else if (t == DispatchKey::SparseCPU) {
90
+ return Backend::SparseCPU;
91
+ } else if (t == DispatchKey::SparseCUDA) {
92
+ return Backend::SparseCUDA;
93
+ } else if (t == DispatchKey::SparseHIP) {
94
+ return Backend::SparseHIP;
95
+ } else if (t == DispatchKey::SparseVE) {
96
+ return Backend::SparseVE;
97
+ } else if (t == DispatchKey::SparsePrivateUse1) {
98
+ return Backend::SparsePrivateUse1;
99
+ } else if (t == DispatchKey::SparseCsrCPU) {
100
+ return Backend::SparseCsrCPU;
101
+ } else if (t == DispatchKey::SparseCsrCUDA) {
102
+ return Backend::SparseCsrCUDA;
103
+ } else if (t == DispatchKey::MkldnnCPU) {
104
+ return Backend::MkldnnCPU;
105
+ } else if (t == DispatchKey::QuantizedCPU) {
106
+ return Backend::QuantizedCPU;
107
+ } else if (t == DispatchKey::QuantizedCUDA) {
108
+ return Backend::QuantizedCUDA;
109
+ } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) {
110
+ return Backend::IPU;
111
+ } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) {
112
+ return Backend::XPU;
113
+ } else if (t == DispatchKey::SparseXPU) {
114
+ return Backend::SparseXPU;
115
+ } else if (t == DispatchKey::QuantizedXPU) {
116
+ return Backend::QuantizedXPU;
117
+ } else if (t == DispatchKey::QuantizedPrivateUse1) {
118
+ return Backend::QuantizedPrivateUse1;
119
+ } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) {
120
+ return Backend::HPU;
121
+ } else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) {
122
+ return Backend::MTIA;
123
+ } else if (
124
+ t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) {
125
+ return Backend::PrivateUse1;
126
+ } else if (t == DispatchKey::Undefined) {
127
+ return Backend::Undefined;
128
+ } else {
129
+ TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
130
+ }
131
+ }
132
+
133
+ static inline DispatchKey backendToDispatchKey(Backend b) {
134
+ switch (b) {
135
+ case Backend::CPU:
136
+ return DispatchKey::CPU;
137
+ case Backend::CUDA:
138
+ return DispatchKey::CUDA;
139
+ case Backend::HIP:
140
+ return DispatchKey::HIP;
141
+ case Backend::VE:
142
+ return DispatchKey::VE;
143
+ case Backend::FPGA:
144
+ return DispatchKey::FPGA;
145
+ case Backend::ORT:
146
+ return DispatchKey::ORT;
147
+ case Backend::XLA:
148
+ return DispatchKey::XLA;
149
+ case Backend::Lazy:
150
+ return DispatchKey::Lazy;
151
+ case Backend::IPU:
152
+ return DispatchKey::IPU;
153
+ case Backend::XPU:
154
+ return DispatchKey::XPU;
155
+ case Backend::SparseXPU:
156
+ return DispatchKey::SparseXPU;
157
+ case Backend::SparseCPU:
158
+ return DispatchKey::SparseCPU;
159
+ case Backend::SparseCUDA:
160
+ return DispatchKey::SparseCUDA;
161
+ case Backend::SparseHIP:
162
+ return DispatchKey::SparseHIP;
163
+ case Backend::SparseVE:
164
+ return DispatchKey::SparseVE;
165
+ case Backend::SparsePrivateUse1:
166
+ return DispatchKey::SparsePrivateUse1;
167
+ case Backend::SparseCsrCPU:
168
+ return DispatchKey::SparseCsrCPU;
169
+ case Backend::SparseCsrCUDA:
170
+ return DispatchKey::SparseCsrCUDA;
171
+ case Backend::MkldnnCPU:
172
+ return DispatchKey::MkldnnCPU;
173
+ case Backend::Vulkan:
174
+ return DispatchKey::Vulkan;
175
+ case Backend::Metal:
176
+ return DispatchKey::Metal;
177
+ case Backend::Meta:
178
+ return DispatchKey::Meta;
179
+ case Backend::QuantizedCPU:
180
+ return DispatchKey::QuantizedCPU;
181
+ case Backend::QuantizedCUDA:
182
+ return DispatchKey::QuantizedCUDA;
183
+ case Backend::QuantizedPrivateUse1:
184
+ return DispatchKey::QuantizedPrivateUse1;
185
+ case Backend::Undefined:
186
+ return DispatchKey::Undefined;
187
+ case Backend::MPS:
188
+ return DispatchKey::MPS;
189
+ case Backend::HPU:
190
+ return DispatchKey::HPU;
191
+ case Backend::MTIA:
192
+ return DispatchKey::MTIA;
193
+ case Backend::PrivateUse1:
194
+ return DispatchKey::PrivateUse1;
195
+ default:
196
+ throw std::runtime_error("Unknown backend");
197
+ }
198
+ }
199
+
200
+ static inline DeviceType backendToDeviceType(Backend b) {
201
+ switch (b) {
202
+ case Backend::CPU:
203
+ case Backend::MkldnnCPU:
204
+ case Backend::SparseCPU:
205
+ case Backend::SparseCsrCPU:
206
+ case Backend::QuantizedCPU:
207
+ return DeviceType::CPU;
208
+ case Backend::CUDA:
209
+ case Backend::SparseCUDA:
210
+ case Backend::QuantizedCUDA:
211
+ case Backend::SparseCsrCUDA:
212
+ return DeviceType::CUDA;
213
+ case Backend::HIP:
214
+ return DeviceType::HIP;
215
+ case Backend::VE:
216
+ return DeviceType::VE;
217
+ case Backend::FPGA:
218
+ return DeviceType::FPGA;
219
+ case Backend::ORT:
220
+ return DeviceType::ORT;
221
+ case Backend::XLA:
222
+ return DeviceType::XLA;
223
+ case Backend::Lazy:
224
+ return DeviceType::Lazy;
225
+ case Backend::SparseHIP:
226
+ return DeviceType::HIP;
227
+ case Backend::SparseVE:
228
+ return DeviceType::VE;
229
+ case Backend::IPU:
230
+ return DeviceType::IPU;
231
+ case Backend::XPU:
232
+ case Backend::SparseXPU:
233
+ case Backend::QuantizedXPU:
234
+ return DeviceType::XPU;
235
+ case Backend::Vulkan:
236
+ return DeviceType::Vulkan;
237
+ case Backend::Metal:
238
+ return DeviceType::Metal;
239
+ case Backend::Meta:
240
+ return DeviceType::Meta;
241
+ case Backend::MPS:
242
+ return DeviceType::MPS;
243
+ case Backend::HPU:
244
+ return DeviceType::HPU;
245
+ case Backend::MTIA:
246
+ return DeviceType::MTIA;
247
+ case Backend::PrivateUse1:
248
+ case Backend::SparsePrivateUse1:
249
+ case Backend::QuantizedPrivateUse1:
250
+ return DeviceType::PrivateUse1;
251
+ case Backend::Undefined:
252
+ TORCH_CHECK(false, "Undefined backend is not a valid device type");
253
+ default:
254
+ TORCH_CHECK(false, "Unknown backend");
255
+ }
256
+ }
257
+
258
+ // TODO: This probably shouldn't actually be static inline
259
+ static inline const char* toString(Backend b) {
260
+ switch (b) {
261
+ case Backend::CPU:
262
+ return "CPU";
263
+ case Backend::CUDA:
264
+ return "CUDA";
265
+ case Backend::HIP:
266
+ return "HIP";
267
+ case Backend::VE:
268
+ return "VE";
269
+ case Backend::FPGA:
270
+ return "FPGA";
271
+ case Backend::XPU:
272
+ return "XPU";
273
+ case Backend::IPU:
274
+ return "IPU";
275
+ case Backend::ORT:
276
+ return "ORT";
277
+ case Backend::XLA:
278
+ return "XLA";
279
+ case Backend::Lazy:
280
+ return "Lazy";
281
+ case Backend::MPS:
282
+ return "MPS";
283
+ case Backend::SparseCPU:
284
+ return "SparseCPU";
285
+ case Backend::SparseCUDA:
286
+ return "SparseCUDA";
287
+ case Backend::SparseHIP:
288
+ return "SparseHIP";
289
+ case Backend::SparseVE:
290
+ return "SparseVE";
291
+ case Backend::SparseXPU:
292
+ return "SparseXPU";
293
+ case Backend::SparsePrivateUse1:
294
+ return "SparsePrivateUse1";
295
+ case Backend::SparseCsrCPU:
296
+ return "SparseCsrCPU";
297
+ case Backend::SparseCsrCUDA:
298
+ return "SparseCsrCUDA";
299
+ case Backend::MkldnnCPU:
300
+ return "MkldnnCPU";
301
+ case Backend::Vulkan:
302
+ return "Vulkan";
303
+ case Backend::Metal:
304
+ return "Metal";
305
+ case Backend::Meta:
306
+ return "Meta";
307
+ case Backend::QuantizedCPU:
308
+ return "QuantizedCPU";
309
+ case Backend::QuantizedCUDA:
310
+ return "QuantizedCUDA";
311
+ case Backend::QuantizedXPU:
312
+ return "QuantizedXPU";
313
+ case Backend::QuantizedPrivateUse1:
314
+ return "QuantizedPrivateUse1";
315
+ case Backend::HPU:
316
+ return "HPU";
317
+ case Backend::MTIA:
318
+ return "MTIA";
319
+ case Backend::PrivateUse1:
320
+ return "PrivateUseOne";
321
+ default:
322
+ return "UNKNOWN_BACKEND";
323
+ }
324
+ }
325
+
326
+ static inline bool isSparse(Backend b) {
327
+ switch (b) {
328
+ case Backend::SparseXPU:
329
+ case Backend::SparseCPU:
330
+ case Backend::SparseCUDA:
331
+ case Backend::SparseHIP:
332
+ case Backend::SparseVE:
333
+ case Backend::SparsePrivateUse1:
334
+ return true;
335
+ default:
336
+ return false;
337
+ }
338
+ }
339
+
340
+ static inline bool isSparseCsr(Backend b) {
341
+ switch (b) {
342
+ case Backend::SparseCsrCPU:
343
+ case Backend::SparseCsrCUDA:
344
+ return true;
345
+ default:
346
+ return false;
347
+ }
348
+ }
349
+
350
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+
5
+ namespace c10 {
6
+
7
+ /**
8
+ * Represent a function pointer as a C++ type.
9
+ * This allows using the function pointer as a type
10
+ * in a template and calling it from inside the template
11
+ * allows the compiler to inline the call because it
12
+ * knows the function pointer at compile time.
13
+ *
14
+ * Example 1:
15
+ * int add(int a, int b) {return a + b;}
16
+ * using Add = TORCH_FN_TYPE(add);
17
+ * template<class Func> struct Executor {
18
+ * int execute(int a, int b) {
19
+ * return Func::func_ptr()(a, b);
20
+ * }
21
+ * };
22
+ * Executor<Add> executor;
23
+ * EXPECT_EQ(3, executor.execute(1, 2));
24
+ *
25
+ * Example 2:
26
+ * int add(int a, int b) {return a + b;}
27
+ * template<class Func> int execute(Func, int a, int b) {
28
+ * return Func::func_ptr()(a, b);
29
+ * }
30
+ * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2));
31
+ */
32
+ template <class FuncType_, FuncType_* func_ptr_>
33
+ struct CompileTimeFunctionPointer final {
34
+ static_assert(
35
+ guts::is_function_type<FuncType_>::value,
36
+ "TORCH_FN can only wrap function types.");
37
+ using FuncType = FuncType_;
38
+
39
+ static constexpr FuncType* func_ptr() {
40
+ return func_ptr_;
41
+ }
42
+ };
43
+
44
+ template <class T>
45
+ struct is_compile_time_function_pointer : std::false_type {};
46
+ template <class FuncType, FuncType* func_ptr>
47
+ struct is_compile_time_function_pointer<
48
+ CompileTimeFunctionPointer<FuncType, func_ptr>> : std::true_type {};
49
+
50
+ } // namespace c10
51
+
52
+ #define TORCH_FN_TYPE(func) \
53
+ ::c10::CompileTimeFunctionPointer< \
54
+ std::remove_pointer_t<std::remove_reference_t<decltype(func)>>, \
55
+ func>
56
+ #define TORCH_FN(func) TORCH_FN_TYPE(func)()
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/SymNodeImpl.h>
2
+ #include <variant>
3
+
4
+ namespace c10 {
5
+
6
+ // Unlike other SymNodeImpl, this cannot be "dispatched" conventionally,
7
+ // as it typically needs to defer to another SymNodeImpl
8
+ //
9
+ // Can either represent a bool, int (don't support float yet) this is useful
10
+ // for representing otherwise unrepresentable large negative integer constant.
11
+ template <typename T>
12
+ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
13
+ static_assert(
14
+ std::is_same<T, int64_t>::value || std::is_same<T, bool>::value,
15
+ "ConstantSymNodeImpl can only accept int64_t or bool types");
16
+
17
+ public:
18
+ ConstantSymNodeImpl(T val) : value_(val) {}
19
+
20
+ bool is_int() override {
21
+ return is_int_();
22
+ }
23
+ bool is_bool() override {
24
+ return is_bool_();
25
+ }
26
+ bool is_float() override {
27
+ return false;
28
+ }
29
+ int64_t guard_int(const char* file, int64_t line) override {
30
+ TORCH_CHECK(is_int(), "not an int");
31
+ return int_();
32
+ }
33
+ bool guard_bool(const char* file, int64_t line) override {
34
+ TORCH_CHECK(is_bool(), "not a bool");
35
+ return bool_();
36
+ }
37
+ double guard_float(const char* file, int64_t line) override {
38
+ TORCH_CHECK(false, "not a float");
39
+ }
40
+ int64_t int_() override {
41
+ TORCH_CHECK(is_int(), "not an int");
42
+ return std::get<int64_t>(value_);
43
+ }
44
+ bool bool_() override {
45
+ TORCH_CHECK(is_bool(), "not a bool");
46
+ return std::get<bool>(value_);
47
+ }
48
+ bool has_hint() override {
49
+ return true;
50
+ }
51
+ c10::SymNode eq(const c10::SymNode& other) override;
52
+ c10::SymNode ne(const c10::SymNode& other) override;
53
+ c10::SymNode ge(const c10::SymNode& other) override;
54
+ c10::SymNode le(const c10::SymNode& other) override;
55
+ c10::SymNode lt(const c10::SymNode& other) override;
56
+ c10::SymNode gt(const c10::SymNode& other) override;
57
+ c10::SymNode mul(const c10::SymNode& other) override;
58
+ std::string str() override {
59
+ if constexpr (is_int_()) {
60
+ return std::to_string(std::get<int64_t>(value_));
61
+ } else {
62
+ return std::get<bool>(value_) ? "true" : "false";
63
+ }
64
+ }
65
+ c10::optional<int64_t> constant_int() override {
66
+ if constexpr (is_int_()) {
67
+ return std::get<int64_t>(value_);
68
+ } else {
69
+ return c10::nullopt;
70
+ }
71
+ }
72
+ c10::optional<bool> constant_bool() override {
73
+ if constexpr (is_bool_()) {
74
+ return std::get<bool>(value_);
75
+ } else {
76
+ return c10::nullopt;
77
+ }
78
+ }
79
+ bool is_constant() override {
80
+ return true;
81
+ }
82
+ bool is_symbolic() override {
83
+ return false;
84
+ }
85
+
86
+ private:
87
+ std::variant<int64_t, bool> value_;
88
+
89
+ static constexpr bool is_int_() {
90
+ return std::is_same<T, int64_t>::value;
91
+ }
92
+ static constexpr bool is_bool_() {
93
+ return std::is_same<T, bool>::value;
94
+ }
95
+ };
96
+
97
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/ArrayRef.h>
3
+ #include <c10/util/SmallVector.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ #include <algorithm>
7
+ #include <cstdint>
8
+
9
+ namespace c10 {
10
+
11
+ template <typename T>
12
+ bool _compute_contiguous(ArrayRef<T> sizes, ArrayRef<T> strides, T numel) {
13
+ bool is_contiguous = true;
14
+ if (numel == 0)
15
+ return is_contiguous;
16
+ T z = 1;
17
+ // NB: make sure we do signed arithmetic
18
+ for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) {
19
+ const auto& size_d = sizes[d];
20
+ if (size_d != 1) {
21
+ if (strides[d] == z) {
22
+ z *= size_d;
23
+ } else {
24
+ is_contiguous = false;
25
+ break;
26
+ }
27
+ }
28
+ }
29
+ return is_contiguous;
30
+ }
31
+
32
+ template <typename T>
33
+ bool _compute_channels_last_contiguous_2d(
34
+ ArrayRef<T> sizes,
35
+ ArrayRef<T> strides) {
36
+ // Please don't combine these code, constant array is used here to let
37
+ // compiler fully unroll the loop to get better performance
38
+ switch (sizes.size()) {
39
+ case 4: {
40
+ T expected = 1;
41
+ for (auto& d : {1, 3, 2, 0}) {
42
+ const auto& size_d = sizes[d];
43
+ if (size_d != 1) {
44
+ if (strides[d] != expected) {
45
+ return false;
46
+ }
47
+ expected *= size_d;
48
+ }
49
+ }
50
+ return true;
51
+ }
52
+ // NOLINTNEXTLINE(bugprone-branch-clone)
53
+ case 3:
54
+ // TODO dim == 3 case will be enabled once it is fully tested
55
+ return false;
56
+ default:
57
+ return false;
58
+ }
59
+ }
60
+
61
+ template <typename T>
62
+ bool _compute_channels_last_contiguous_3d(
63
+ ArrayRef<T> sizes,
64
+ ArrayRef<T> strides) {
65
+ // Please don't combine these code, constant array is used here to let
66
+ // compiler fully unroll the loop to get better performance
67
+ switch (sizes.size()) {
68
+ case 5: {
69
+ T expected = 1;
70
+ for (auto& d : {1, 4, 3, 2, 0}) {
71
+ const auto& size_d = sizes[d];
72
+ if (size_d != 1) {
73
+ if (strides[d] != expected) {
74
+ return false;
75
+ }
76
+ expected *= size_d;
77
+ }
78
+ }
79
+ return true;
80
+ }
81
+ // NOLINTNEXTLINE(bugprone-branch-clone)
82
+ case 4:
83
+ // TODO dim == 4 case will be enabled once it is fully tested
84
+ return false;
85
+ default:
86
+ return false;
87
+ }
88
+ }
89
+
90
+ template <typename T>
91
+ bool _compute_non_overlapping_and_dense(
92
+ ArrayRef<T> sizes,
93
+ ArrayRef<T> strides) {
94
+ auto dim = sizes.size();
95
+ if (dim == 1) {
96
+ return sizes[0] < 2 || strides[0] == 1;
97
+ }
98
+ SmallVector<int64_t, 5> perm;
99
+ perm.resize(dim);
100
+ for (const auto i : c10::irange(dim)) {
101
+ perm[i] = i;
102
+ }
103
+ // Sort by strides, leaving 0 and 1 sized dims at the end of the array
104
+ std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) {
105
+ if (sizes[a] < 2) {
106
+ return false;
107
+ } else if (sizes[b] < 2) {
108
+ return true;
109
+ }
110
+ return strides[a] < strides[b];
111
+ });
112
+ T require_stride = 1;
113
+ for (const auto i : c10::irange(dim)) {
114
+ const auto& size_perm_i = sizes[perm[i]];
115
+ if (size_perm_i < 2) {
116
+ return true;
117
+ }
118
+ if (strides[perm[i]] != require_stride) {
119
+ return false;
120
+ }
121
+ require_stride *= size_perm_i;
122
+ }
123
+ return true;
124
+ }
125
+
126
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace caffe2 {
7
+ class TypeMeta;
8
+ } // namespace caffe2
9
+
10
+ namespace c10 {
11
+ C10_API void set_default_dtype(caffe2::TypeMeta dtype);
12
+ C10_API const caffe2::TypeMeta get_default_dtype();
13
+ C10_API ScalarType get_default_dtype_as_scalartype();
14
+ C10_API const caffe2::TypeMeta get_default_complex_dtype();
15
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/Layout.h>
6
+ #include <c10/core/ScalarType.h>
7
+
8
+ namespace c10 {
9
+
10
+ struct TensorOptions;
11
+
12
+ /// Like TensorOptions, but all fields are guaranteed to be filled.
13
+ struct DefaultTensorOptions {
14
+ DefaultTensorOptions() = default;
15
+
16
+ caffe2::TypeMeta dtype() const noexcept {
17
+ return dtype_;
18
+ }
19
+ Device device() const noexcept {
20
+ return device_;
21
+ }
22
+ Layout layout() const noexcept {
23
+ return layout_;
24
+ }
25
+ bool requires_grad() const noexcept {
26
+ return requires_grad_;
27
+ }
28
+
29
+ // Defined in TensorOptions.h
30
+ inline DefaultTensorOptions& merge(const TensorOptions& options);
31
+
32
+ private:
33
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 64-bit
34
+ Device device_ = at::kCPU; // 32-bit
35
+ Layout layout_ = at::kStrided; // 8-bit
36
+ bool requires_grad_ = false; // 8-bit
37
+ };
38
+
39
+ inline const DefaultTensorOptions& getDefaultTensorOptions() {
40
+ static const auto options = DefaultTensorOptions();
41
+ return options;
42
+ }
43
+
44
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Device.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <cstddef>
8
+ #include <functional>
9
+ #include <iosfwd>
10
+ #include <string>
11
+
12
+ namespace c10 {
13
+
14
+ /// An index representing a specific device; e.g., the 1 in GPU 1.
15
+ /// A DeviceIndex is not independently meaningful without knowing
16
+ /// the DeviceType it is associated; try to use Device rather than
17
+ /// DeviceIndex directly.
18
+ using DeviceIndex = int8_t;
19
+
20
+ /// Represents a compute device on which a tensor is located. A device is
21
+ /// uniquely identified by a type, which specifies the type of machine it is
22
+ /// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
23
+ /// specific compute device when there is more than one of a certain type. The
24
+ /// device index is optional, and in its defaulted state represents (abstractly)
25
+ /// "the current device". Further, there are two constraints on the value of the
26
+ /// device index, if one is explicitly stored:
27
+ /// 1. A negative index represents the current device, a non-negative index
28
+ /// represents a specific, concrete device,
29
+ /// 2. When the device type is CPU, the device index must be zero.
30
+ struct C10_API Device final {
31
+ using Type = DeviceType;
32
+
33
+ /// Constructs a new `Device` from a `DeviceType` and an optional device
34
+ /// index.
35
+ /* implicit */ Device(DeviceType type, DeviceIndex index = -1)
36
+ : type_(type), index_(index) {
37
+ validate();
38
+ }
39
+
40
+ /// Constructs a `Device` from a string description, for convenience.
41
+ /// The string supplied must follow the following schema:
42
+ /// `(cpu|cuda)[:<device-index>]`
43
+ /// where `cpu` or `cuda` specifies the device type, and
44
+ /// `:<device-index>` optionally specifies a device index.
45
+ /* implicit */ Device(const std::string& device_string);
46
+
47
+ /// Returns true if the type and index of this `Device` matches that of
48
+ /// `other`.
49
+ bool operator==(const Device& other) const noexcept {
50
+ return this->type_ == other.type_ && this->index_ == other.index_;
51
+ }
52
+
53
+ /// Returns true if the type or index of this `Device` differs from that of
54
+ /// `other`.
55
+ bool operator!=(const Device& other) const noexcept {
56
+ return !(*this == other);
57
+ }
58
+
59
+ /// Sets the device index.
60
+ void set_index(DeviceIndex index) {
61
+ index_ = index;
62
+ }
63
+
64
+ /// Returns the type of device this is.
65
+ DeviceType type() const noexcept {
66
+ return type_;
67
+ }
68
+
69
+ /// Returns the optional index.
70
+ DeviceIndex index() const noexcept {
71
+ return index_;
72
+ }
73
+
74
+ /// Returns true if the device has a non-default index.
75
+ bool has_index() const noexcept {
76
+ return index_ != -1;
77
+ }
78
+
79
+ /// Return true if the device is of CUDA type.
80
+ bool is_cuda() const noexcept {
81
+ return type_ == DeviceType::CUDA;
82
+ }
83
+
84
+ /// Return true if the device is of PrivateUse1 type.
85
+ bool is_privateuseone() const noexcept {
86
+ return type_ == DeviceType::PrivateUse1;
87
+ }
88
+
89
+ /// Return true if the device is of MPS type.
90
+ bool is_mps() const noexcept {
91
+ return type_ == DeviceType::MPS;
92
+ }
93
+
94
+ /// Return true if the device is of HIP type.
95
+ bool is_hip() const noexcept {
96
+ return type_ == DeviceType::HIP;
97
+ }
98
+
99
+ /// Return true if the device is of VE type.
100
+ bool is_ve() const noexcept {
101
+ return type_ == DeviceType::VE;
102
+ }
103
+
104
+ /// Return true if the device is of XPU type.
105
+ bool is_xpu() const noexcept {
106
+ return type_ == DeviceType::XPU;
107
+ }
108
+
109
+ /// Return true if the device is of IPU type.
110
+ bool is_ipu() const noexcept {
111
+ return type_ == DeviceType::IPU;
112
+ }
113
+
114
+ /// Return true if the device is of XLA type.
115
+ bool is_xla() const noexcept {
116
+ return type_ == DeviceType::XLA;
117
+ }
118
+
119
+ /// Return true if the device is of MTIA type.
120
+ bool is_mtia() const noexcept {
121
+ return type_ == DeviceType::MTIA;
122
+ }
123
+
124
+ /// Return true if the device is of HPU type.
125
+ bool is_hpu() const noexcept {
126
+ return type_ == DeviceType::HPU;
127
+ }
128
+
129
+ /// Return true if the device is of Lazy type.
130
+ bool is_lazy() const noexcept {
131
+ return type_ == DeviceType::Lazy;
132
+ }
133
+
134
+ /// Return true if the device is of Vulkan type.
135
+ bool is_vulkan() const noexcept {
136
+ return type_ == DeviceType::Vulkan;
137
+ }
138
+
139
+ /// Return true if the device is of Metal type.
140
+ bool is_metal() const noexcept {
141
+ return type_ == DeviceType::Metal;
142
+ }
143
+
144
+ /// Return true if the device is of ORT type.
145
+ bool is_ort() const noexcept {
146
+ return type_ == DeviceType::ORT;
147
+ }
148
+
149
+ /// Return true if the device is of META type.
150
+ bool is_meta() const noexcept {
151
+ return type_ == DeviceType::Meta;
152
+ }
153
+
154
+ /// Return true if the device is of CPU type.
155
+ bool is_cpu() const noexcept {
156
+ return type_ == DeviceType::CPU;
157
+ }
158
+
159
+ /// Return true if the device supports arbitrary strides.
160
+ bool supports_as_strided() const noexcept {
161
+ return type_ != DeviceType::IPU && type_ != DeviceType::XLA &&
162
+ type_ != DeviceType::Lazy && type_ != DeviceType::MTIA;
163
+ }
164
+
165
+ /// Same string as returned from operator<<.
166
+ std::string str() const;
167
+
168
+ private:
169
+ DeviceType type_;
170
+ DeviceIndex index_ = -1;
171
+ void validate() {
172
+ // Removing these checks in release builds noticeably improves
173
+ // performance in micro-benchmarks.
174
+ // This is safe to do, because backends that use the DeviceIndex
175
+ // have a later check when we actually try to switch to that device.
176
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
177
+ index_ >= -1,
178
+ "Device index must be -1 or non-negative, got ",
179
+ static_cast<int>(index_));
180
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
181
+ !is_cpu() || index_ <= 0,
182
+ "CPU device index must be -1 or zero, got ",
183
+ static_cast<int>(index_));
184
+ }
185
+ };
186
+
187
+ C10_API std::ostream& operator<<(std::ostream& stream, const Device& device);
188
+
189
+ } // namespace c10
190
+
191
+ namespace std {
192
+ template <>
193
+ struct hash<c10::Device> {
194
+ size_t operator()(c10::Device d) const noexcept {
195
+ // Are you here because this static assert failed? Make sure you ensure
196
+ // that the bitmasking code below is updated accordingly!
197
+ static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit");
198
+ static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit");
199
+ // Note [Hazard when concatenating signed integers]
200
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
201
+ // We must first convert to a same-sized unsigned type, before promoting to
202
+ // the result type, to prevent sign extension when any of the values is -1.
203
+ // If sign extension occurs, you'll clobber all of the values in the MSB
204
+ // half of the resulting integer.
205
+ //
206
+ // Technically, by C/C++ integer promotion rules, we only need one of the
207
+ // uint32_t casts to the result type, but we put in both for explicitness's
208
+ // sake.
209
+ uint32_t bits = static_cast<uint32_t>(static_cast<uint8_t>(d.type()))
210
+ << 16 |
211
+ static_cast<uint32_t>(static_cast<uint8_t>(d.index()));
212
+ return std::hash<uint32_t>{}(bits);
213
+ }
214
+ };
215
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/Allocator.h>
2
+
3
+ namespace c10 {
4
+
5
+ template <typename T>
6
+ class DeviceArray {
7
+ public:
8
+ DeviceArray(c10::Allocator& allocator, size_t size)
9
+ : data_ptr_(allocator.allocate(size * sizeof(T))) {
10
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
11
+ TORCH_INTERNAL_ASSERT(
12
+ 0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
13
+ "c10::DeviceArray: Allocated memory is not aligned for this data type");
14
+ }
15
+
16
+ T* get() {
17
+ return static_cast<T*>(data_ptr_.get());
18
+ }
19
+
20
+ private:
21
+ c10::DataPtr data_ptr_;
22
+ };
23
+
24
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h ADDED
@@ -0,0 +1,732 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <cstdint>
6
+ #include <ostream>
7
+ #include <string>
8
+
9
+ namespace c10 {
10
+
11
+ // Semantically, each value of BackendComponent identifies a "backend" for our
12
+ // dispatch. Some functionalities that we may dispatch to are allowed to
13
+ // register different handlers for each backend. The BackendComponent is then
14
+ // used to figure out which backend implementation to dispatch to.
15
+
16
+ // In implementation terms, the backend component identifies a specific "bit" in
17
+ // a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom
18
+ // ~12 "BackendComponent" bits, while the remaining upper bits are assigned to
19
+ // functionalities. When we encounter a functionality bit that is known to be
20
+ // customizable per-backend, then we also look at the lower BackendComponent
21
+ // bits and take the highest bit to determine which backend's implementation to
22
+ // use.
23
+
24
+ // WARNING! If you add a new backend component to the end of this list,
25
+ // make sure you register it before Meta.
26
+ // Meta must be at the end so that meta key in tls triggers meta kernels.
27
+ // (But you shouldn't: private use keys should have higher precedence than all
28
+ // built-in keys)
29
+
30
+ // If you add a new (non-privateuse) backend here,
31
+ // make sure to add an Autograd<Backend> fallthrough kernel
32
+ // in aten/src/ATen/core/VariableFallbackKernel.cpp
33
+
34
+ #define C10_FORALL_BACKEND_COMPONENTS(_, extra) \
35
+ _(CPU, extra) \
36
+ _(CUDA, extra) \
37
+ _(HIP, extra) \
38
+ _(XLA, extra) \
39
+ _(MPS, extra) \
40
+ _(IPU, extra) \
41
+ _(XPU, extra) \
42
+ _(HPU, extra) \
43
+ _(VE, extra) \
44
+ _(Lazy, extra) \
45
+ _(MTIA, extra) \
46
+ _(PrivateUse1, extra) \
47
+ _(PrivateUse2, extra) \
48
+ _(PrivateUse3, extra) \
49
+ _(Meta, extra)
50
+
51
+ // WARNING! If we add a new per-backend functionality key that has higher
52
+ // priority than Autograd, then make sure you update EndOfRuntimeBackendKeys
53
+
54
+ #define C10_FORALL_FUNCTIONALITY_KEYS(_) \
55
+ _(Dense, ) \
56
+ _(Quantized, Quantized) \
57
+ _(Sparse, Sparse) \
58
+ _(NestedTensor, NestedTensor) \
59
+ _(AutogradFunctionality, Autograd)
60
+
61
+ enum class BackendComponent : uint8_t {
62
+
63
+ // A "backend" is colloquially used to refer to handlers for dispatch
64
+ // which actually implement the numerics of an operation in question.
65
+ //
66
+ // Due to the nature of the enum, these backends are specified in
67
+ // an ordered way, but for most backends this order is not semantically
68
+ // meaningful (e.g., it's valid to reorder these backends without changing
69
+ // semantics). The only situation when backend ordering is meaningful
70
+ // is when the backend participates in multiple dispatch with another
71
+ // backend; e.g., CPU and CUDA (cuda must have higher priority).
72
+
73
+ // These keys don't correspond to individual kernels.
74
+ // Instead, they represent the backends that are allowed to override specific
75
+ // pieces of functionality:
76
+ // - dense kernels (e.g. DispatchKey::CPU)
77
+ // - sparse kernels (e.g. DispatchKey::SparseCPU)
78
+ // - quantized kernels (e.g. DispatchKey::QuantizedCPU)
79
+ // - autograd kernels (e.g. DispatchKey::AutogradCPU)
80
+ // We reserve space in the runtime operator table for this full cross product
81
+ // of
82
+ // [backends in this enum] x [keys below that are explicitly marked as having
83
+ // per-backend functionality]
84
+ //
85
+ // A meta tensor is a tensor without any data associated with it. (They
86
+ // have also colloquially been referred to as tensors on the "null" device).
87
+ // A meta tensor can be used to dry run operators without actually doing any
88
+ // computation, e.g., add on two meta tensors would give you another meta
89
+ // tensor with the output shape and dtype, but wouldn't actually add anything.
90
+
91
+ InvalidBit = 0,
92
+ #define DEFINE_BACKEND_COMPONENT(n, _) n##Bit,
93
+ C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused)
94
+ #undef DEFINE_BACKEND_COMPONENT
95
+
96
+ // Define an alias to represent end of backend dispatch keys.
97
+ // If you add new backend keys after PrivateUse3, please also update it here.
98
+ EndOfBackendKeys = MetaBit,
99
+ };
100
+
101
+ // Semantically, a dispatch key identifies a possible "level" in our
102
+ // dispatch, for which a handler may be registered. Each handler corresponds
103
+ // to a type of functionality.
104
+ //
105
+ // In implementation terms, the dispatch key identifies a specific "bit" in a
106
+ // DispatchKeySet. Higher bit indexes get handled by dispatching first (because
107
+ // we "count leading zeros" when we extract the highest priority dispatch
108
+ // key.)
109
+ //
110
+ // Note [DispatchKey Classification]
111
+ // This enum actually contains several types of keys, which are explained
112
+ // in more detail further down:
113
+ // (1) non-customizable backends (e.g. FPGA)
114
+ // (2) non-customizable functionalities (e.g. Functionalize)
115
+ // (3) functionalized that are customizable per backend (e.g. Dense, Sparse,
116
+ // AutogradFunctionality) (4) per-backend instances of customizable
117
+ // functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g.
118
+ // CompositeImplicitAutograd)
119
+ //
120
+ // Of the categories above, it's important to note:
121
+ // (a) which keys are assigned individual bits in a DispatchKeySet
122
+ // (b) which keys are assigned individual slots in the runtime operator table
123
+ // ("Runtime keys")
124
+ //
125
+ // (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet.
126
+ // (1), (2) and (4) all get their own dedicated slots in the runtime operator
127
+ // table.
128
+
129
+ // See Note [DispatchKeySet Internal Representation] for more details.
130
+ //
131
+ // NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py
132
+ enum class DispatchKey : uint16_t {
133
+
134
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
135
+ // This is not a "real" functionality, but it exists to give us a "nullopt"
136
+ // element we can return for cases when a DispatchKeySet contains no elements.
137
+ // You can think a more semantically accurate definition of DispatchKey is:
138
+ //
139
+ // using DispatchKey = optional<RealDispatchKey>
140
+ //
141
+ // and Undefined == nullopt. We didn't actually represent
142
+ // it this way because optional<RealDispatchKey> would take two
143
+ // words, when DispatchKey fits in eight bits.
144
+
145
+ Undefined = 0,
146
+
147
+ // Define an alias for Undefined to represent CatchAll (long term
148
+ // this will get eliminated, but for now it's convenient)
149
+ CatchAll = Undefined,
150
+
151
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ //
152
+ // Every value in the enum (up to EndOfFunctionalityKeys)
153
+ // corresponds to an individual "functionality" that can be dispatched to.
154
+ // This is represented in the DispatchKeySet by assigning each of these enum
155
+ // values
156
+ // to each of the remaining (64 - len(BackendComponent)) bits.
157
+ //
158
+ // Most of these functionalities have a single handler assigned to them,
159
+ // making them "runtime keys".
160
+ // That map to a single slot in the runtime operator table.
161
+ //
162
+ // A few functionalities are allowed to be customizable per backend.
163
+ // See [Note: Per-Backend Functionality Dispatch Keys] for details.
164
+
165
+ // See [Note: Per-Backend Functionality Dispatch Keys]
166
+ Dense,
167
+
168
+ // Below are non-extensible backends.
169
+ // These are backends that currently don't have their own overrides for
170
+ // Autograd/Sparse/Quantized kernels,
171
+ // and we therefore don't waste space in the runtime operator table allocating
172
+ // space for them.
173
+ // If any of these backends ever need to customize, e.g., Autograd, then we'll
174
+ // need to add a DispatchKey::*Bit for them.
175
+
176
+ // TODO: put this in BackendComponents
177
+ FPGA, // Xilinx support lives out of tree at
178
+ // https://gitlab.com/pytorch-complex/vitis_kernels
179
+
180
+ // TODO: put this in BackendComponents
181
+ // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and
182
+ // https://github.com/microsoft/onnxruntime, and is also used to test general
183
+ // backend/extension machinery in the core. cf:
184
+ // - test/cpp_extensions/ort_extension.cpp
185
+ // - test/test_torch.py
186
+ // - aten/src/ATen/test/extension_backend_test.cpp
187
+ ORT,
188
+
189
+ Vulkan, // TODO: put this in BackendComponents
190
+ Metal, // TODO: put this in BackendComponents
191
+
192
+ // See [Note: Per-Backend Functionality Dispatch Keys]
193
+ Quantized,
194
+
195
+ // This backend is to support custom RNGs; it lets you go
196
+ // to a different kernel if you pass in a generator that is not a
197
+ // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this
198
+ // key:
199
+ // 1) set it as a second parameter of at::Generator constructor call in
200
+ // the user-defined PRNG class.
201
+ // 2) use it as a dispatch key while registering custom kernels
202
+ // (templatized kernels specialized for user-defined PRNG class)
203
+ // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp
204
+ CustomRNGKeyId,
205
+
206
+ // TODO: Make Mkldnn a functionality key, so we can give it Meta
207
+ // support
208
+ // Here are backends which specify more specialized operators
209
+ // based on the layout of the tensor. Note that the sparse backends
210
+ // are one case where ordering matters: sparse multi-dispatches with
211
+ // the corresponding dense tensors, and must be handled before them.
212
+ MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp
213
+ // NB: not to be confused with MKLDNN, which is Caffe2 only
214
+
215
+ // See [Note: Per-Backend Functionality Dispatch Keys]
216
+ Sparse,
217
+
218
+ // TODO: Make SparseCsr a functionality key
219
+ SparseCsrCPU,
220
+ SparseCsrCUDA,
221
+
222
+ NestedTensor,
223
+
224
+ // In some situations, it is not immediately obvious what the correct
225
+ // backend for function is, because the function in question doesn't
226
+ // have any "tensor" arguments. In this case, a BackendSelect function
227
+ // can be registered to implement the custom determination of the
228
+ // correct backend.
229
+ BackendSelect,
230
+
231
+ Python,
232
+
233
+ // Out-of-core key for Fake Tensor in torchdistx.
234
+ // See https://pytorch.org/torchdistx/latest/fake_tensor.html
235
+ // TODO: delete this in favor of Python-implemented fake tensor
236
+ Fake,
237
+ // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key
238
+ // is to insert code after the "autograd subsystem" runs, so this key should
239
+ // be directly after ADInplaceOrView and all of the autograd keys.
240
+ FuncTorchDynamicLayerBackMode,
241
+
242
+ // Alias and mutation removal.
243
+ // If some backends want to opt into only alias removal or only mutation
244
+ // removal,
245
+ // we can consider adding separate keys dedicated to those individual passes.
246
+ // See Note [Functionalization Pass In Core] for details.
247
+ Functionalize,
248
+
249
+ // The named dispatch key is set for any tensors with named dimensions.
250
+ // Although we have a dispatch key for named tensors, for historical reasons,
251
+ // this dispatch key doesn't do any of the substantive functionality for named
252
+ // tensor (though, hypothetically, it could!) At the moment, it's just
253
+ // responsible for letting us give good error messages when operations
254
+ // don't support named tensors.
255
+ //
256
+ // NB: If you ever consider moving named tensor functionality into
257
+ // this dispatch key, note that it might be necessary add another dispatch
258
+ // key that triggers before composite operators, in case a composite operator
259
+ // has named dimension propagation that doesn't match that of its
260
+ // constituent parts.
261
+ // TODO: delete this once torchdim lands in functorch
262
+ Named,
263
+
264
+ // The Conjugate dispatch key is set for any tensors that need to perform
265
+ // conjugation
266
+ // This is implemented at a dispatch level right before any backends run
267
+ Conjugate,
268
+
269
+ // The Negative dispatch key is set for any tensors that need to perform
270
+ // negation
271
+ // This is implemented at a dispatch level right before any backends run
272
+ Negative,
273
+
274
+ ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp
275
+
276
+ // Note [ADInplaceOrView key]
277
+ // ADInplaceOrView key is used by inplace or view ops to register a kernel
278
+ // that does additional setup for future autograd computation.
279
+ //
280
+ // 1. For inplace ops this kernel does version bump
281
+ // 2. For view ops this kernel does `as_view` setup where we properly setup
282
+ // DifferentiableViewMeta on the view tensors.
283
+ //
284
+ // For other ops it's fallthrough kernel since there's no extra
285
+ // work to do.
286
+ //
287
+ // Note [Dream: skip VariableType kernel when requires_grad=false]
288
+ //
289
+ // In an ideal world where we can skip VariableType kernel for inputs
290
+ // with requires_grad=false, instead of a fallthrough kernel, we'll
291
+ // register a kernel shown below to all functional ops as well:
292
+ // torch::Tensor my_functional_op(...) {
293
+ // {
294
+ // // Note for every op in VariableType, you need to go through
295
+ // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the
296
+ // // key to TLS excluded set. If you don't go through it at all,
297
+ // // inplace/view ops called through `at::` inside your backend
298
+ // // kernel will dispatch to ADInplaceOrView kernels and do a lot
299
+ // // of extra work.
300
+ // at::AutoDispatchBelowADInplaceOrView guard;
301
+ // at::redispatch::my_functional_op(...);
302
+ // }
303
+ // }
304
+ // But this work is currently blocked since it adds an extra dispatch
305
+ // for all ops and it's non-trivial overhead at model level(a few percents).
306
+ // Thus our current approach takes advantage of the fact every kernel go
307
+ // through VariableType kernel first and pulls the
308
+ // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops
309
+ // up to the `VariableType` kernel. Thus we only add the extra dispatch
310
+ // to view/inplace ops to minimize its perf impact to real models.
311
+ ADInplaceOrView,
312
+ // Note [Alias Dispatch Key : Autograd]
313
+ // All backends are oblivious to autograd; autograd is handled as a
314
+ // layer which happens on top of all backends. It inspects the autograd
315
+ // metadata of all inputs, determines what autograd metadata should be
316
+ // constructed by the output, and otherwise defers to the backend to
317
+ // actually do the numeric computation. Autograd contains
318
+ // the bulk of this logic.
319
+
320
+ // Autograd is now an alias dispatch key which by default maps to all
321
+ // backend-specific autograd keys.
322
+ // Backend-specific allow backends to override the default kernel registered
323
+ // to Autograd key as needed.
324
+ // For example, XLA wants to define autograd for einsum directly.
325
+ // Registering a custom autograd implementation at the XLA key won't work
326
+ // because we process Autograd before XLA. This key has higher priority and
327
+ // gets processed first. You generally should NOT redispatch after handling
328
+ // autograd here (since that would result in execution of the Autograd
329
+ // operator, which you're trying to skip). In AutogradXLA implementations,
330
+ // you are responsible for handling autograd yourself, or deferring to other
331
+ // operators which support autograd.
332
+
333
+ // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and
334
+ // reserved user-defined backends. All other in-tree backends share the
335
+ // AutogradOther key. We can add specific autograd key for those backends
336
+ // upon request.
337
+ AutogradOther,
338
+
339
+ // See [Note: Per-Backend Functionality Dispatch Keys]
340
+ AutogradFunctionality,
341
+
342
+ // NestedTensor is an example of something that isn't a "real backend"
343
+ // (because it mostly consists of redispatching kernels)
344
+ // but it would like to override autograd functionality in C++.
345
+ // We can handle cases like this by adding an extra functionality key
346
+ // exclusively for handling autograd for NestedTensor.
347
+ // lives out of tree at
348
+ // https://github.com/pytorch/nestedtensor
349
+ AutogradNestedTensor,
350
+
351
+ Tracer,
352
+
353
+ // TODO: make Autocast a functionality key
354
+ // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed
355
+ // and inputs are saved for backward in the post-autocast type.
356
+ AutocastCPU,
357
+ AutocastXPU,
358
+ AutocastIPU,
359
+ AutocastHPU,
360
+ AutocastXLA,
361
+ // AutocastXLA is only being used for TPUs. XLA GPUs continue to use
362
+ // AutocastCUDA.
363
+ AutocastCUDA,
364
+ AutocastPrivateUse1,
365
+
366
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
367
+ // There are a number of alternative modes which may want to handle before
368
+ // autograd; for example, error checking, tracing, profiling or vmap. They
369
+ // go here.
370
+
371
+ FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype]
372
+
373
+ // Dispatch key for BatchedTensorImpl wrapping a nested tensor.
374
+ BatchedNestedTensor,
375
+
376
+ FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype]
377
+
378
+ // This is the dispatch key for BatchedTensorImpl, which is used to implement
379
+ // batching rules for vmap.
380
+ Batched,
381
+
382
+ // When we are inside a vmap, all tensors dispatch on this key.
383
+ // See Note: [DispatchKey::VmapMode usage] for more details.
384
+ VmapMode,
385
+
386
+ FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype]
387
+
388
+ // Out-of-core key for Deferred Module Initialization in torchdistx.
389
+ // See https://pytorch.org/torchdistx/latest/deferred_init.html
390
+ DeferredInit,
391
+
392
+ // Used by Python key logic to know the set of tls on entry to the dispatcher
393
+ // This kernel assumes it is the top-most non-functorch-related DispatchKey.
394
+ // If you add a key above, make sure to update the fallback implementation for
395
+ // this.
396
+ PythonTLSSnapshot,
397
+
398
+ // This key should be at the very top of the dispatcher
399
+ FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype]
400
+
401
+ // TESTING: This is intended to be a generic testing tensor type id.
402
+ // Don't use it for anything real; its only acceptable use is within a single
403
+ // process test. Use it by creating a TensorImpl with this DispatchKey, and
404
+ // then registering operators to operate on this type id. See
405
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example.
406
+ TESTING_ONLY_GenericWrapper,
407
+
408
+ // TESTING: This is intended to be a generic testing tensor type id.
409
+ // Don't use it for anything real; its only acceptable use is within a ingle
410
+ // process test. Use it by toggling the mode on and off via
411
+ // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators
412
+ // to operate on this type id. See
413
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp
414
+ // for a usage example
415
+ TESTING_ONLY_GenericMode,
416
+
417
+ // This key is used for pre-dispatch tracing in make_fx.
418
+ // It has lower priority than the PythonDispatcher key
419
+ // because we use the PythonDispatcher to intercept the key from python,
420
+ // and avoid having to implement it in C++.
421
+ PreDispatch,
422
+
423
+ // This is a bypass that allows you to skip running the C++ dispatcher
424
+ // entirely
425
+ PythonDispatcher,
426
+
427
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
428
+ EndOfFunctionalityKeys, // End of functionality keys.
429
+
430
+ // ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ //
431
+ // Here are backends which you think of as traditionally specifying
432
+ // how to implement operations on some device.
433
+
434
+ #define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n,
435
+
436
+ #define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \
437
+ StartOf##fullname##Backends, \
438
+ C10_FORALL_BACKEND_COMPONENTS( \
439
+ DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \
440
+ EndOf##fullname##Backends = prefix##Meta,
441
+
442
+ C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS)
443
+
444
+ #undef DEFINE_PER_BACKEND_KEYS
445
+ #undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND
446
+
447
+ EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends,
448
+
449
+ // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ //
450
+ // Note [Alias Dispatch Keys]
451
+ // Alias dispatch keys are synthetic dispatch keys which map to multiple
452
+ // runtime dispatch keys. Alisa keys have precedence, but they are always
453
+ // lower precedence than runtime keys. You can register a kernel to an
454
+ // alias key, the kernel might be populated to the mapped runtime keys
455
+ // during dispatch table computation.
456
+ // If a runtime dispatch key has multiple kernels from alias keys, which
457
+ // kernel wins is done based on the precedence of alias keys (but runtime
458
+ // keys always have precedence over alias keys).
459
+ // Alias keys won't be directly called during runtime.
460
+
461
+ // See Note [Alias Dispatch Key : Autograd]
462
+ Autograd,
463
+ CompositeImplicitAutograd, // registered at
464
+ // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp
465
+
466
+ // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from
467
+ // all
468
+ // other alias keysets
469
+ // and so precedence order doesn't matter
470
+ FuncTorchBatchedDecomposition, // registered at
471
+ // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp
472
+ // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is
473
+ // disjoint from all other alias keysets
474
+ CompositeImplicitAutogradNestedTensor, // registered at
475
+ // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp
476
+ CompositeExplicitAutograd, // registered at
477
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
478
+ // See Note [CompositeExplicitAutogradNonFunctional Key]
479
+ CompositeExplicitAutogradNonFunctional, // registered at
480
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
481
+
482
+ // Define an alias key to represent end of alias dispatch keys.
483
+ // If you add new alias keys after Autograd, please also update it here.
484
+ StartOfAliasKeys = Autograd,
485
+ EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, //
486
+
487
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
488
+ // The aliases exist for backwards compatibility reasons, they shouldn't
489
+ // be used
490
+ CPUTensorId = CPU,
491
+ CUDATensorId = CUDA,
492
+ DefaultBackend = CompositeExplicitAutograd,
493
+ PrivateUse1_PreAutograd = AutogradPrivateUse1,
494
+ PrivateUse2_PreAutograd = AutogradPrivateUse2,
495
+ PrivateUse3_PreAutograd = AutogradPrivateUse3,
496
+ Autocast = AutocastCUDA,
497
+ };
498
+
499
+ // Note [Private use DispatchKey]
500
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
501
+ // Private use tensor IDs are preallocated tensor type IDs for use in user
502
+ // applications. Similar to private use fields in HTTP, they can be used
503
+ // by end users for experimental or private applications, without needing
504
+ // to "standardize" the tensor ID (which would be done by submitting a PR
505
+ // to PyTorch to add your type ID).
506
+ //
507
+ // Private use tensor IDs are appropriate to use if you want to experiment
508
+ // with adding a new tensor type (without having to patch PyTorch first) or
509
+ // have a private, non-distributed application that needs to make use of a
510
+ // new tensor type. Private use tensor IDs are NOT appropriate to use for
511
+ // libraries intended to be distributed to further users: please contact
512
+ // the PyTorch developers to get a type ID registered in this case.
513
+ //
514
+ // We provide two classes of private user tensor id: regular DispatchKeys
515
+ // and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend"
516
+ // DispatchKeys; if you were adding support for a new type of accelerator, you
517
+ // would use a backend DispatchKey, and ideally automatically reuse
518
+ // AutogradOther definitions already defined in PyTorch. AutogradPrivateUse
519
+ // DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for
520
+ // tensors that compose multiple internal tensors, and for cases when the
521
+ // built-in autograd formulas for operators are not appropriate.
522
+
523
+ static_assert(
524
+ (static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) +
525
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys)) <= 64,
526
+ "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)"
527
+ " both map to backend and functionality bits"
528
+ " into a 64-bit bitmask; you must have less than 64 total entries between them");
529
+
530
+ // Check if a DispatchKey is an alias mapping to other runtime keys.
531
+ constexpr bool isAliasDispatchKey(DispatchKey k) {
532
+ return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys;
533
+ }
534
+
535
+ // [Note: Per-Backend Functionality Dispatch Keys]
536
+ // Check if a DispatchKey is a per-backend functionality key
537
+ // Any functionalities that can be customized per-backend should be added here.
538
+ // These keys correspond to functionalities that can be customized individually
539
+ // per backend. While they only take up one bit in the `DispatchKeySet` bitset,
540
+ // they map to (# backends) slots in the operator table.
541
+ // Each of these keys also has a separate set of "runtime keys" in the dispatch
542
+ // key enum, per backend, which *do* map to the individual operator table slots.
543
+ // For example, the "Sparse" key maps to an individual bit in the
544
+ // DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual
545
+ // slots in the runtime operator table.
546
+
547
+ constexpr bool isPerBackendFunctionalityKey(DispatchKey k) {
548
+ if (k == DispatchKey::Dense || k == DispatchKey::Quantized ||
549
+ k == DispatchKey::Sparse || k == DispatchKey::AutogradFunctionality ||
550
+ k == DispatchKey::NestedTensor) {
551
+ return true;
552
+ } else {
553
+ return false;
554
+ }
555
+ }
556
+
557
+ // Note that this includes Undefined in the total count.
558
+ // BUT EndOfFunctionalityKeys is its own (placeholder) key.
559
+ // e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3.
560
+ // In the above example, there are 3 total functionality keys.
561
+ constexpr uint8_t num_functionality_keys =
562
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys);
563
+
564
+ constexpr uint8_t num_backends =
565
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys);
566
+
567
+ // Note [No More Than 16 Backends]
568
+ // Search for this note to find places in the code where the "no more than 16
569
+ // backends" invariant is baked in.
570
+ static_assert(
571
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) <= 16,
572
+ "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \
573
+ there are a few places where this invariant is baked in");
574
+
575
+ constexpr uint8_t numPerBackendFunctionalityKeys() {
576
+ uint8_t count = 0;
577
+ for (uint8_t k = 0; k <= num_functionality_keys; ++k) {
578
+ if (isPerBackendFunctionalityKey(static_cast<DispatchKey>(k)))
579
+ ++count;
580
+ }
581
+ return count;
582
+ }
583
+
584
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
585
+ // See [Note: Trimmed Mobile Dispatch Keys]
586
+ constexpr uint16_t num_runtime_entries = 8;
587
+ #else
588
+ constexpr uint16_t num_runtime_entries = num_functionality_keys +
589
+ (numPerBackendFunctionalityKeys() * (num_backends - 1));
590
+ #endif
591
+
592
+ // See Note [No More Than 16 Backends]
593
+ constexpr uint16_t full_backend_mask =
594
+ (static_cast<uint16_t>(1) << num_backends) - 1;
595
+
596
+ C10_API const char* toString(DispatchKey);
597
+ C10_API const char* toString(BackendComponent);
598
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
599
+ C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
600
+
601
+ C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
602
+
603
+ // Parses a string into a dispatch key.
604
+ // If the string cannot be correctly parsed, throws an exception.
605
+ C10_API c10::DispatchKey parseDispatchKey(const std::string& k);
606
+
607
+ // These are some convenience identifiers for dispatch keys which are
608
+ // shorter to type than their long counterparts. Note that some of these
609
+ // dispatch keys directly correspond to DeviceType; and most APIs that
610
+ // accept DispatchKey also accept DeviceType; e.g.,
611
+ // torch::dispatch(torch::kCPU, ...) is also valid.
612
+ constexpr DispatchKey kAutograd = DispatchKey::Autograd;
613
+
614
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
615
+ // This function relies on the invariant that the dispatch keys between
616
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
617
+ // in the same order as `BackendComponent`.
618
+ constexpr BackendComponent toBackendComponent(DispatchKey k) {
619
+ if (k >= DispatchKey::StartOfDenseBackends &&
620
+ k <= DispatchKey::EndOfDenseBackends) {
621
+ return static_cast<BackendComponent>(
622
+ static_cast<uint8_t>(k) -
623
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends));
624
+ } else if (
625
+ k >= DispatchKey::StartOfQuantizedBackends &&
626
+ k <= DispatchKey::EndOfQuantizedBackends) {
627
+ return static_cast<BackendComponent>(
628
+ static_cast<uint8_t>(k) -
629
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends));
630
+ } else if (
631
+ k >= DispatchKey::StartOfSparseBackends &&
632
+ k <= DispatchKey::EndOfSparseBackends) {
633
+ return static_cast<BackendComponent>(
634
+ static_cast<uint8_t>(k) -
635
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends));
636
+ } else if (
637
+ k >= DispatchKey::StartOfNestedTensorBackends &&
638
+ k <= DispatchKey::EndOfNestedTensorBackends) {
639
+ return static_cast<BackendComponent>(
640
+ static_cast<uint8_t>(k) -
641
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends));
642
+ } else if (
643
+ k >= DispatchKey::StartOfAutogradFunctionalityBackends &&
644
+ k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
645
+ return static_cast<BackendComponent>(
646
+ static_cast<uint8_t>(k) -
647
+ static_cast<uint8_t>(
648
+ DispatchKey::StartOfAutogradFunctionalityBackends));
649
+ } else {
650
+ return BackendComponent::InvalidBit;
651
+ }
652
+ }
653
+
654
+ constexpr DispatchKey toFunctionalityKey(DispatchKey k) {
655
+ if (k <= DispatchKey::EndOfFunctionalityKeys) {
656
+ return k;
657
+ } else if (k <= DispatchKey::EndOfDenseBackends) {
658
+ return DispatchKey::Dense;
659
+ } else if (k <= DispatchKey::EndOfQuantizedBackends) {
660
+ return DispatchKey::Quantized;
661
+ } else if (k <= DispatchKey::EndOfSparseBackends) {
662
+ return DispatchKey::Sparse;
663
+ } else if (k <= DispatchKey::EndOfNestedTensorBackends) {
664
+ return DispatchKey::NestedTensor;
665
+ } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
666
+ return DispatchKey::AutogradFunctionality;
667
+ } else {
668
+ return DispatchKey::Undefined;
669
+ }
670
+ }
671
+
672
+ BackendComponent toBackendComponent(DeviceType device_type);
673
+
674
+ // Given (DispatchKey::Dense, BackendComponent::CUDABit), returns
675
+ // DispatchKey::CUDA.
676
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
677
+ // This function relies on the invariant that the dispatch keys between
678
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
679
+ // in the same order as `BackendComponent`.
680
+ constexpr DispatchKey toRuntimePerBackendFunctionalityKey(
681
+ DispatchKey functionality_k,
682
+ BackendComponent backend_k) {
683
+ if (functionality_k == DispatchKey::Dense) {
684
+ return static_cast<DispatchKey>(
685
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends) +
686
+ static_cast<uint8_t>(backend_k));
687
+ }
688
+ if (functionality_k == DispatchKey::Sparse) {
689
+ return static_cast<DispatchKey>(
690
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends) +
691
+ static_cast<uint8_t>(backend_k));
692
+ }
693
+ if (functionality_k == DispatchKey::Quantized) {
694
+ return static_cast<DispatchKey>(
695
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends) +
696
+ static_cast<uint8_t>(backend_k));
697
+ }
698
+ if (functionality_k == DispatchKey::NestedTensor) {
699
+ return static_cast<DispatchKey>(
700
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends) +
701
+ static_cast<uint8_t>(backend_k));
702
+ }
703
+ if (functionality_k == DispatchKey::AutogradFunctionality) {
704
+ return static_cast<DispatchKey>(
705
+ static_cast<uint8_t>(
706
+ DispatchKey::StartOfAutogradFunctionalityBackends) +
707
+ static_cast<uint8_t>(backend_k));
708
+ }
709
+ return DispatchKey::Undefined;
710
+ }
711
+
712
+ } // namespace c10
713
+
714
+ namespace torch {
715
+ // Expose the constant, but not the TYPE (DispatchKey is an implementation
716
+ // detail!)
717
+ using c10::kAutograd;
718
+ } // namespace torch
719
+
720
+ // NB: You really shouldn't use this instance; this enum is guaranteed
721
+ // to be pretty small so a regular array should be acceptable.
722
+ namespace std {
723
+ template <>
724
+ struct hash<c10::DispatchKey> {
725
+ typedef size_t result_type;
726
+ typedef c10::DispatchKey argument_type;
727
+
728
+ size_t operator()(c10::DispatchKey x) const {
729
+ return static_cast<size_t>(x);
730
+ }
731
+ };
732
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h ADDED
@@ -0,0 +1,928 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/DispatchKey.h>
3
+ #include <c10/util/Exception.h>
4
+ #include <c10/util/Metaprogramming.h>
5
+ #include <c10/util/llvmMathExtras.h>
6
+ #include <array>
7
+ #include <ostream>
8
+
9
+ namespace c10 {
10
+
11
+ struct FunctionalityOffsetAndMask {
12
+ // empty constructor shouldn't be used; only needed to initialize
13
+ // the array before populating it.
14
+ FunctionalityOffsetAndMask() = default;
15
+ FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask)
16
+ : offset(offset), mask(mask) {}
17
+ // This needs to big enough to cover the size of the operator table.
18
+ uint16_t offset{};
19
+ // See Note [No More Than 16 Backends]
20
+ // This mask needs to be big enough to mask all of the backend bits.
21
+ // We probably don't ever want to have more than 16 backend bits, so uint16_t
22
+ // should be enough.
23
+ uint16_t mask{};
24
+ };
25
+ static_assert(
26
+ c10::num_runtime_entries < 65536,
27
+ "The dispatcher currently only supports up to 2^16 runtime entries");
28
+
29
+ C10_API std::array<FunctionalityOffsetAndMask, num_functionality_keys>
30
+ initializeFunctionalityOffsetsAndMasks();
31
+
32
+ C10_ALWAYS_INLINE static const std::
33
+ array<FunctionalityOffsetAndMask, num_functionality_keys>&
34
+ offsetsAndMasks() {
35
+ static auto offsets_and_masks_ = initializeFunctionalityOffsetsAndMasks();
36
+ return offsets_and_masks_;
37
+ }
38
+
39
+ // A representation of a set of DispatchKeys. A DispatchKeySet contains both
40
+ // "functionality" bits and "backend bits", and every tensor holds its own
41
+ // DispatchKeySet. The Dispatcher implements multiple dispatch by grabbing the
42
+ // keyset on every input tensor, or’ing them together, and dispatching to a
43
+ // specific piece of functionality. The functionality bits are *ordered*. When
44
+ // multiple functionality bits are set, we use the highest priority
45
+ // functionality. Similarly, multiple backend bits can theoretically be set if
46
+ // you call an operator with multiple tensors from difference devices (e.g. CPU
47
+ // and CUDA), although support for mixed device dispatch is limited (the only
48
+ // kernels that gracefully handle mixed device inputs for now are cuda kernels
49
+ // that take in a scalar cpu tensor).
50
+
51
+ // A representation of a set of DispatchKeys. A tensor may have multiple
52
+ // tensor type ids, e.g., a Variable tensor can also be a CPU tensor; the
53
+ // DispatchKeySet specifies what type ids apply. The internal representation is
54
+ // as a 64-bit bit set (this means only 64 tensor type ids are supported).
55
+ //
56
+ // As mentioned above, DispatchKeys are ordered; thus, we can ask questions like
57
+ // "what is the highest priority DispatchKey in the set"? (The set itself is
58
+ // not ordered; two sets with the same ids will always have the ids ordered in
59
+ // the same way.)
60
+ //
61
+ // Note [DispatchKeySet Internal Representation]
62
+ // Internally, dispatch keys are packed into 64-bit DispatchKeySet objects
63
+ // that get passed around at runtime.
64
+ // However, there isn't necessarily a 1-to-1 mapping between bits in the keyset
65
+ // and individual dispatch keys.
66
+ //
67
+ // First: why do we have this distinction, and why not map every dispatch key
68
+ // directly to a bit? This is mostly because we have several types of
69
+ // functionalities that different backends would like to customize. For example,
70
+ // we have:
71
+ // - "Dense": CPU, CUDA, XLA, ... (~12 keys)
72
+ // - "Sparse": SparseCPU, SparseCUDA, ...
73
+ // - "Quantized": QuantizedCPU, QuantizedCUDA, QuantizedXLA, ...
74
+ // - "Autograd": AutogradCPU, AutogradCUDA, Autograd XLA, ...
75
+ // The problem is that total number of keys grows quadratically with [#
76
+ // backends] x [# functionalities], making it very difficult to map each key
77
+ // directly to a bit in a bitset without dramatically increasing the size of the
78
+ // bitset over time.
79
+ //
80
+ // The two enums (BackendComponent and DispatchKey) can be divided roughly into
81
+ // 5 categories.
82
+ //
83
+ // (1) "Building block" keys
84
+ // (a) backends: Everything in the BackendComponent enum (e.g. CPUBit,
85
+ // CUDABit) (b) functionalities: (per-backend) functionality-bit DispatchKeys
86
+ // (e.g. AutogradFunctionality, Sparse, Dense)
87
+ // (2) "Runtime" keys
88
+ // (a) "non-customizable backends" (e.g. FPGA)
89
+ // (b) "non-customizable functionalities" (e.g. Functionalize)
90
+ // (c) "per-backend instances of customizable functionalities" (e.g. CPU,
91
+ // SparseCPU, AutogradCPU)
92
+ // (3) "Alias" DispatchKeys (see Note [Alias Dispatch Keys])
93
+ //
94
+ // (1) Building block keys always correspond to individual bits in a
95
+ // DispatchKeySet. They can also be combined in a DispatchKeySet to form actual
96
+ // runtime keys. e.g.
97
+ // auto dense_cpu_ks = DispatchKeySet({DispatchKey::CPUBit,
98
+ // DispatchKey::Dense});
99
+ // // The keyset has the runtime dense-cpu key.
100
+ // dense_cpu_ks.has(DispatchKey::CPU);
101
+ // // And it contains the building block keys too.
102
+ // dense_cpu_ks.has(DispatchKey::CPUBit);
103
+ // dense_cpu_ks.has(DispatchKey::Dense);
104
+ //
105
+ // Not every backend and not every functionality counts as a "building block
106
+ // key". This is mostly to give us more levers to pull in the design space.
107
+ // Backend keys and functionality keys that count as "building blocks" will
108
+ // contribute to a full cross product of functionality that can be overriden.
109
+ //
110
+ // For example, right now we have at least 12 "backend" building blocks (CPU,
111
+ // CUDA, XLA, ...) and at least 4 "functionality" building blocks (Dense,
112
+ // Sparse, Quantized, AutogradFunctionality, ...). These keys together allow
113
+ // every dispatcher operator to be customized in up to 12*4 different ways. Each
114
+ // of those requires a slot in the operator table of every dispatcher operator.
115
+ // Not every piece of functionality necessarily needs to be customizable
116
+ // per-backend, and not every backend necessarily needs to be able to customize
117
+ // every type of functionality.
118
+ //
119
+ //
120
+ // (2) Every runtime key corresponds directly to a slot in an operator's runtime
121
+ // dispatch table, and you can directly register kernels to a runtime dispatch
122
+ // key.
123
+ //
124
+ // For per-backend functionalities like "Dense" or "AutogradFunctionality",
125
+ // you can think of the corresponding runtime dispatch keys as "instances" of
126
+ // that functionality, per backend. E.g. "CPU", "CUDA", "XLA", etc. are all
127
+ // runtime instances of the "Dense" building block key.
128
+
129
+ // (2a) and (2b) are represented identically in the DispatchKeySet logic:
130
+ // - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT
131
+ // customizable per backend.
132
+ // In order to do so, we'd need to promote it to a per-backend functionality
133
+ // "building block" key.
134
+ // - non-customizable backends (e.g. FPGA) can NOT customize existing
135
+ // functionality like Sparse, Autograd, etc.
136
+ // In order to do so, we'd need to promote it to a backend "building block"
137
+ // key.
138
+ //
139
+ // In both cases, these keys directly correspond to runtime slots in the
140
+ // operator table.
141
+ //
142
+ //
143
+ // (3) "Alias" keys
144
+ // See Note [Alias Dispatch Keys]
145
+ //
146
+ // Final note: for anyone making future changes to the Dispatcher +
147
+ // DispatchKeySet internals, there's a closed PR with a basic
148
+ // python-implementation of the Dispatcher that might be useful in quickly
149
+ // testing out and validating changes. See it at
150
+ // https://github.com/pytorch/pytorch/pull/68743
151
+
152
+ // An undefined tensor is one with an empty tensor type set.
153
+ class DispatchKeySet final {
154
+ public:
155
+ enum Full { FULL };
156
+ enum FullAfter { FULL_AFTER };
157
+ enum Raw { RAW };
158
+
159
+ // NB: default constructor representation as zero is MANDATORY as
160
+ // use of DispatchKeySet in TLS requires this.
161
+ constexpr DispatchKeySet() = default;
162
+
163
+ constexpr DispatchKeySet(Full)
164
+ : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {}
165
+
166
+ constexpr DispatchKeySet(FullAfter, DispatchKey t)
167
+ // LSB after t are OK, but not t itself.
168
+ // "functionalities" have a notion of ordering (e.g. Autograd > Sparse >
169
+ // Quantized > Dense). But backends don't really have an ordering.
170
+ // Therefore, we're enforcing that FullAfter can only be used on
171
+ // "functionality" keys.
172
+ : repr_(
173
+ (1ULL
174
+ << (num_backends + static_cast<uint8_t>(toFunctionalityKey(t)) -
175
+ 1)) -
176
+ 1) {
177
+ *this = add(DispatchKey::PythonDispatcher);
178
+ }
179
+
180
+ // Public version of DispatchKeySet(uint64_t) API; external users
181
+ // must be explicit when they do this!
182
+ constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {}
183
+
184
+ constexpr explicit DispatchKeySet(BackendComponent k) {
185
+ if (k == BackendComponent::InvalidBit) {
186
+ repr_ = 0;
187
+ } else {
188
+ repr_ = 1ULL << (static_cast<uint8_t>(k) - 1);
189
+ }
190
+ }
191
+
192
+ constexpr explicit DispatchKeySet(DispatchKey k) {
193
+ if (k == DispatchKey::Undefined) {
194
+ // Case 1: handle Undefined specifically
195
+ repr_ = 0;
196
+ } else if (k <= DispatchKey::EndOfFunctionalityKeys) {
197
+ // Case 2: handle "functionality-only" keys
198
+ // These keys have a functionality bit set, but no backend bits
199
+ // These can technically be either:
200
+ // - valid runtime keys (e.g. DispatchKey::AutogradOther,
201
+ // DispatchKey::FuncTorchBatched, etc)
202
+ // - "building block" keys that aren't actual runtime keys (e.g.
203
+ // DispatchKey::Dense or Sparse)
204
+ uint64_t functionality_val = 1ULL
205
+ << (num_backends + static_cast<uint8_t>(k) - 1);
206
+ repr_ = functionality_val;
207
+ } else if (k <= DispatchKey::EndOfRuntimeBackendKeys) {
208
+ // Case 3: "runtime" keys that have a functionality bit AND a backend bit.
209
+ // First compute which bit to flip for the functionality.
210
+ auto functionality_k = toFunctionalityKey(k);
211
+ // The - 1 is because Undefined is technically a "functionality" that
212
+ // doesn't show up in the bitset. So e.g. Dense is technically the second
213
+ // functionality, but the lowest functionality bit.
214
+ uint64_t functionality_val = 1ULL
215
+ << (num_backends + static_cast<uint8_t>(functionality_k) - 1);
216
+
217
+ // then compute which bit to flip for the backend
218
+ // Case 4a: handle the runtime instances of "per-backend functionality"
219
+ // keys For example, given DispatchKey::CPU, we should set:
220
+ // - the Dense functionality bit
221
+ // - the CPUBit backend bit
222
+ // first compute which bit to flip for the backend
223
+ auto backend_k = toBackendComponent(k);
224
+ uint64_t backend_val = backend_k == BackendComponent::InvalidBit
225
+ ? 0
226
+ : 1ULL << (static_cast<uint8_t>(backend_k) - 1);
227
+ repr_ = functionality_val + backend_val;
228
+ } else {
229
+ // At this point, we should have covered every case except for alias keys.
230
+ // Technically it would be possible to add alias dispatch keys to a
231
+ // DispatchKeySet, but the semantics are a little confusing and this
232
+ // currently isn't needed anywhere.
233
+ repr_ = 0;
234
+ }
235
+ }
236
+
237
+ constexpr uint64_t keys_to_repr(std::initializer_list<DispatchKey> ks) {
238
+ uint64_t repr = 0;
239
+ for (auto k : ks) {
240
+ repr |= DispatchKeySet(k).repr_;
241
+ }
242
+ return repr;
243
+ }
244
+
245
+ constexpr uint64_t backend_bits_to_repr(
246
+ std::initializer_list<BackendComponent> ks) {
247
+ uint64_t repr = 0;
248
+ for (auto k : ks) {
249
+ repr |= DispatchKeySet(k).repr_;
250
+ }
251
+ return repr;
252
+ }
253
+
254
+ explicit constexpr DispatchKeySet(std::initializer_list<DispatchKey> ks)
255
+ : repr_(keys_to_repr(ks)) {}
256
+
257
+ explicit constexpr DispatchKeySet(std::initializer_list<BackendComponent> ks)
258
+ // Note: for some reason, putting this logic directly in the constructor
259
+ // appears to fail to compile on CUDA 10.1.
260
+ // See an example internal failure at
261
+ // https://www.internalfb.com/intern/skycastle/run/76561193669136035/artifact/actionlog.76561193742069401.stderr
262
+ : repr_(backend_bits_to_repr(ks)) {}
263
+
264
+ // Test if a DispatchKey is in the set
265
+ inline bool has(DispatchKey t) const {
266
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t != DispatchKey::Undefined);
267
+ return has_all(DispatchKeySet(t));
268
+ }
269
+ constexpr bool has_backend(BackendComponent t) const {
270
+ return has_all(DispatchKeySet(t));
271
+ }
272
+
273
+ // Test if a DispatchKey is in the set
274
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
275
+ // keys, tests if all of them are in the current set.
276
+ constexpr bool has_all(DispatchKeySet ks) const {
277
+ return static_cast<bool>((repr_ & ks.repr_) == ks.repr_);
278
+ }
279
+
280
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
281
+ // keys, tests if any of them are in the current set. This could technically
282
+ // be pretty easily implemented using has(). It is strictly a perf
283
+ // optimization though. There are many places in the code base where we want
284
+ // to test for multiple functionality keys together. HOWEVER, runtime
285
+ // per-backend functionality keys aren't allowed to be used with this
286
+ // function, because you can end up with weird results. e.g.
287
+ // DispatchKeySet(DispatchKey::AutogradCPU).has_any(DispatchKeySet(DispatchKey::CPU))
288
+ // would return true.
289
+ inline bool has_any(DispatchKeySet ks) const {
290
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
291
+ // Either there are no backend bits in the input keyset
292
+ ((ks.repr_ & full_backend_mask) == 0) ||
293
+ // or there are no per-backend-functionality bits
294
+ // See [Note: Per-Backend Functionality Dispatch Keys]
295
+ ((ks &
296
+ DispatchKeySet({
297
+ DispatchKey::Dense,
298
+ DispatchKey::Quantized,
299
+ DispatchKey::Sparse,
300
+ DispatchKey::AutogradFunctionality,
301
+ })
302
+ .repr_) == 0));
303
+ return static_cast<bool>((repr_ & ks.repr_) != 0);
304
+ }
305
+ // Test if DispatchKeySet is a superset of ks.
306
+ bool isSupersetOf(DispatchKeySet ks) const {
307
+ return (repr_ & ks.repr_) == ks.repr_;
308
+ }
309
+ // Perform set union
310
+ constexpr DispatchKeySet operator|(DispatchKeySet other) const {
311
+ return DispatchKeySet(repr_ | other.repr_);
312
+ }
313
+ // Perform set intersection
314
+ constexpr DispatchKeySet operator&(DispatchKeySet other) const {
315
+ return DispatchKeySet(repr_ & other.repr_);
316
+ }
317
+ // Compute the set difference self - other,
318
+ // but ONLY for the functionality keys.
319
+ // Any backend bits set on self will remain unchanged.
320
+ // See Note [Removing keys from DispatchKeySet Only Affects Functionality
321
+ // Keys]
322
+ constexpr DispatchKeySet operator-(DispatchKeySet other) const {
323
+ return DispatchKeySet(repr_ & (full_backend_mask | ~other.repr_));
324
+ }
325
+
326
+ // Compute self ^ other
327
+ constexpr DispatchKeySet operator^(DispatchKeySet other) const {
328
+ return DispatchKeySet(repr_ ^ other.repr_);
329
+ }
330
+ bool operator==(DispatchKeySet other) const {
331
+ return repr_ == other.repr_;
332
+ }
333
+ bool operator!=(DispatchKeySet other) const {
334
+ return repr_ != other.repr_;
335
+ }
336
+ // Add a DispatchKey to the DispatchKey set. Does NOT mutate,
337
+ // returns the extended DispatchKeySet!
338
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const {
339
+ return *this | DispatchKeySet(t);
340
+ }
341
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const {
342
+ return *this | ks;
343
+ }
344
+
345
+ // Remove a DispatchKey from the DispatchKey set.
346
+ // This is generally not an operation you should be doing
347
+ // (it's used to implement the printing overload, operator<<)
348
+ //
349
+ // Note [Removing keys from DispatchKeySet Only Affects Functionality Keys]
350
+ // Only functionality bits are allowed to be removed from a keyset.
351
+ // For now, we're only allowing removal of "functionality bits" from the
352
+ // keyset, which is specifically needed by the fallthrough key calculation
353
+ // logic. Why is removing backend bits problematic? Consider this example:
354
+ //
355
+ // DispatchKeySet([DispatchKey.CPU, DispatchKey.AutogradCUDA,
356
+ // DispatchKey.CUDA]).remove(DispatchKey.AutogradCUDA)
357
+ // DispatchKeySet([DispatchKey.CPU,
358
+ // DispatchKey.AutogradCUDA]).remove(DispatchKey.AutogradCUDA)
359
+ //
360
+ // What do we want to happen?
361
+ // Technically, we'd like it to be true that after removal,
362
+ // the first keyset still has the CUDA dispatch key while the second doesn't.
363
+ // Unfortunately there's no way to represent that, because the two keysets are
364
+ // represented the same way internally: functionality bits: Autograd, Dense
365
+ // backend bits: CPU, CUDA
366
+ //
367
+ // Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd"
368
+ // bit from the bitset.
369
+ C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const {
370
+ return DispatchKeySet(
371
+ repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask));
372
+ }
373
+ // You're allowed to remove a backend bit from a DispatchKeySet,
374
+ // but you have to be explicit about it (remove_backend() instead of
375
+ // remove()).
376
+ constexpr DispatchKeySet remove_backend(BackendComponent b) const {
377
+ return DispatchKeySet(repr_ & ~(DispatchKeySet(b).repr_));
378
+ }
379
+ // Is the set empty? (AKA undefined tensor)
380
+ bool empty() const {
381
+ return repr_ == 0;
382
+ }
383
+ uint64_t raw_repr() {
384
+ return repr_;
385
+ }
386
+
387
+ DispatchKey highestFunctionalityKey() const {
388
+ auto functionality_idx = indexOfHighestBit();
389
+ // This means that none of the functionality bits were set.
390
+ if (functionality_idx < num_backends)
391
+ return DispatchKey::Undefined;
392
+ // The first num_backend bits in the keyset don't correspond to real
393
+ // dispatch keys.
394
+ return static_cast<DispatchKey>(functionality_idx - num_backends);
395
+ }
396
+
397
+ // This is similar like toBackendComponent(DispatchKey), but less restrictive.
398
+ // toBackendComponent() errors out if the key that it was passed has no
399
+ // backend bits, which is useful for error checking. We need a version of that
400
+ // here that can also handle "fake" backends like FPGA, because they need to
401
+ // map to the AutogradOther key. For those backends, we return
402
+ // BackendComponent::InvalidBit.
403
+ BackendComponent highestBackendKey() const {
404
+ // mask to mask out functionality bits
405
+ auto backend_idx =
406
+ DispatchKeySet(repr_ & full_backend_mask).indexOfHighestBit();
407
+ // all zeros across the backend bits means that no backend bits are set.
408
+ if (backend_idx == 0)
409
+ return BackendComponent::InvalidBit;
410
+ return static_cast<BackendComponent>(backend_idx);
411
+ }
412
+
413
+ // returns the DispatchKey of highest priority in the set.
414
+ DispatchKey highestPriorityTypeId() const {
415
+ auto functionality_k = highestFunctionalityKey();
416
+ if (isPerBackendFunctionalityKey(functionality_k)) {
417
+ return toRuntimePerBackendFunctionalityKey(
418
+ functionality_k, highestBackendKey());
419
+ }
420
+ return functionality_k;
421
+ }
422
+
423
+ // Returns the index of the most-significant bit in the keyset.
424
+ // This is used to as part of the calculation into the operator table to get:
425
+ // - the highest "functionality" bit in the keyset.
426
+ // - the highest "backend" bit in the keyset.
427
+ uint8_t indexOfHighestBit() const {
428
+ return 64 - llvm::countLeadingZeros(repr_);
429
+ }
430
+
431
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
432
+ // [Note: Trimmed Mobile Dispatch Keys]
433
+ /**
434
+ * The method below maps the dispatch key in the enum DispatchKey to an
435
+ * integer index in the dispatchTable_ array in OperatorEntry. The array
436
+ * is trimmed for mobile to reduce peak memory usage since it's
437
+ * unnecessary to reserve additional space for dispatch keys that will
438
+ * never be used on mobile.
439
+ */
440
+ int getDispatchTableIndexForDispatchKeySet() const {
441
+ auto dk = highestPriorityTypeId();
442
+ switch (dk) {
443
+ case DispatchKey::Undefined:
444
+ return 0;
445
+ case DispatchKey::CPU:
446
+ return 1;
447
+ case DispatchKey::QuantizedCPU:
448
+ return 2;
449
+ case DispatchKey::SparseCPU:
450
+ return 3;
451
+ case DispatchKey::BackendSelect:
452
+ return 4;
453
+ case DispatchKey::ADInplaceOrView:
454
+ return 5;
455
+ case DispatchKey::AutogradOther:
456
+ return 6;
457
+ case DispatchKey::AutogradCPU:
458
+ return 7;
459
+ default:
460
+ return -1;
461
+ }
462
+ }
463
+ #else
464
+ // returns the index in the operator table of highest priority key in the the
465
+ // keyset Note that we could in theory implement this using
466
+ // highestPriorityTypeId(), but this code is very hotpath and we can do it
467
+ // faster without it.
468
+ int getDispatchTableIndexForDispatchKeySet() const {
469
+ auto functionality_idx =
470
+ DispatchKeySet(repr_ >> num_backends).indexOfHighestBit();
471
+ auto offset_and_mask = offsetsAndMasks()[functionality_idx];
472
+ // Mask the functionality bits out first, then right-shift by 1.
473
+ // right-shifting by 1 because everything is zero-indexed.
474
+ // E.g. 000001 (CPU) should give us an offset of 0, 000010 (CUDA) should
475
+ // give us an offset of 1, etc.
476
+ auto backend_idx =
477
+ DispatchKeySet((repr_ & offset_and_mask.mask) >> 1).indexOfHighestBit();
478
+ return offset_and_mask.offset + backend_idx;
479
+ }
480
+ #endif
481
+
482
+ // returns the "index" of the highest priority backend in the keyset.
483
+ // This is pretty similar to getBackendKey(), but:
484
+ // - It's hotpath code (part of the runtime bitset calculation)
485
+ // - I's returns an integer index, not an enum value
486
+ // - Everything is shifted to the right by 1.
487
+ // BackendComponent::InvalidBit is technically the lowest enum value,
488
+ // but it isn't included in the runtime table. So CPUBit = 1, CUDABit = 2,
489
+ // etc.
490
+ uint64_t getBackendIndex() const {
491
+ return DispatchKeySet((repr_ & full_backend_mask) >> 1).indexOfHighestBit();
492
+ }
493
+
494
+ private:
495
+ constexpr DispatchKeySet(uint64_t repr) : repr_(repr) {}
496
+ uint64_t repr_ = 0;
497
+
498
+ public:
499
+ // STL iterator for DispatchKeySet. Iterates through all runtime DispatchKeys
500
+ // in the set. The iterator is only invalidated by the destruction of the
501
+ // underlying DispatchKeySet as the iterator stores a pointer to the raw
502
+ // representation of the DispatchKeySet. Note: When we encounter a per-backend
503
+ // functionality (e.g. Dense or Sparse), we will iterate through EVERY backend
504
+ // in the keyset, for that functionality. For example, if the next
505
+ // functionality key to iterate over is Autograd, and the backend bits in the
506
+ // keyset correspond to [BackendComponent::CPUBit, BackendComponent::CUDABit],
507
+ // then the next two keys we return will be DispatchKey::AutogradCPU,
508
+ // DispatchKey::AutogradCUDA (CPU first because it has lower precedence than
509
+ // CUDA in DispatchKey.h).
510
+ class iterator {
511
+ public:
512
+ using self_type = iterator;
513
+ using iterator_category = std::input_iterator_tag;
514
+ using value_type = DispatchKey;
515
+ using difference_type = ptrdiff_t;
516
+ using reference = value_type&;
517
+ using pointer = value_type*;
518
+ // final mask value should mask out the entire keyset
519
+ static const uint8_t end_iter_mask_val =
520
+ num_backends + num_functionality_keys;
521
+ // final key value should be the last DispatchKey
522
+ static const uint8_t end_iter_key_val = num_functionality_keys;
523
+
524
+ // current_dispatchkey_idx_ will iterate through all functionality bits.
525
+ // current_backendcomponent_idx_ will iterate through all backend bits.
526
+ explicit iterator(
527
+ const uint64_t* data_ptr,
528
+ uint8_t next_functionality = num_backends,
529
+ uint8_t next_backend = 0)
530
+ : data_ptr_(data_ptr),
531
+ next_functionality_(next_functionality),
532
+ next_backend_(next_backend),
533
+ // These are in an invalid state at construction time, and set by the
534
+ // first increment call
535
+ current_dispatchkey_idx_(end_iter_key_val),
536
+ current_backendcomponent_idx_(end_iter_key_val) {
537
+ // Go to the first key in the set
538
+ TORCH_INTERNAL_ASSERT(
539
+ next_functionality_ >= num_backends,
540
+ "num_backends=",
541
+ static_cast<uint32_t>(num_backends),
542
+ "next_functionality_=",
543
+ static_cast<uint32_t>(next_functionality_));
544
+ ++(*this);
545
+ }
546
+
547
+ C10_API self_type& operator++();
548
+
549
+ self_type operator++(int) {
550
+ self_type previous_iterator = *this;
551
+ ++(*this);
552
+ return previous_iterator;
553
+ }
554
+
555
+ bool operator==(const self_type& rhs) const {
556
+ return next_functionality_ == rhs.next_functionality_ &&
557
+ current_dispatchkey_idx_ == rhs.current_dispatchkey_idx_ &&
558
+ next_backend_ == rhs.next_backend_ &&
559
+ current_backendcomponent_idx_ == rhs.current_backendcomponent_idx_;
560
+ }
561
+ bool operator!=(const self_type& rhs) const {
562
+ return next_functionality_ != rhs.next_functionality_ ||
563
+ current_dispatchkey_idx_ != rhs.current_dispatchkey_idx_ ||
564
+ next_backend_ != rhs.next_backend_ ||
565
+ current_backendcomponent_idx_ != rhs.current_backendcomponent_idx_;
566
+ }
567
+ DispatchKey operator*() const {
568
+ auto functionality_key =
569
+ static_cast<DispatchKey>(current_dispatchkey_idx_);
570
+ if (isPerBackendFunctionalityKey(functionality_key)) {
571
+ auto next_key = toRuntimePerBackendFunctionalityKey(
572
+ functionality_key,
573
+ static_cast<BackendComponent>(current_backendcomponent_idx_));
574
+ // We expect all of the Dense, Sparse, Quantized, and Autograd keys to
575
+ // be ordered the same way with respect to their backends
576
+ TORCH_INTERNAL_ASSERT(
577
+ toBackendComponent(next_key) ==
578
+ static_cast<BackendComponent>(current_backendcomponent_idx_),
579
+ "Tried to map functionality key ",
580
+ toString(functionality_key),
581
+ " and backend bit ",
582
+ toString(
583
+ static_cast<BackendComponent>(current_backendcomponent_idx_)),
584
+ " to a runtime key, but ended up with ",
585
+ toString(next_key),
586
+ ". This can happen if the order of the backend dispatch keys in DispatchKey.h isn't consistent.",
587
+ " Please double check that enum for inconsistencies.");
588
+ return next_key;
589
+ } else {
590
+ return functionality_key;
591
+ }
592
+ }
593
+
594
+ private:
595
+ const uint64_t* data_ptr_;
596
+ uint8_t next_functionality_;
597
+ uint8_t next_backend_;
598
+ uint8_t current_dispatchkey_idx_;
599
+ uint8_t current_backendcomponent_idx_;
600
+ };
601
+
602
+ public:
603
+ // Returns iterator to the first key in the set. If no keys are in the
604
+ // set, then will return the end iterator.
605
+ iterator begin() const {
606
+ return iterator(&repr_);
607
+ }
608
+
609
+ // We do not need to iterate beyond EndOfFunctionalityKeys so we will treat
610
+ // this as the end iterator.
611
+ iterator end() const {
612
+ return iterator(&repr_, iterator::end_iter_mask_val);
613
+ }
614
+ };
615
+
616
+ C10_API std::string toString(DispatchKeySet);
617
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
618
+
619
+ C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
620
+ return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
621
+ }
622
+
623
+ // Alias key DispatchKey::Autograd maps to
624
+ // (autograd_dispatch_keyset x full_backend_mask)
625
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
626
+ //
627
+ // Note [autograd_dispatch_keyset Does Not Include Backend Bits]
628
+ // We don't want to include any backend bits (BackendComponent::CPUBit, etc)
629
+ // directly in autograd_dispatch_keyset.
630
+ // Why? keysets like autograd_dispatch_keyset are commonly used to remove
631
+ // autograd keys from a DispatchKeySet throughout the code base. However, you
632
+ // are only allowed to remove functionality bits from a keyset, not backend
633
+ // bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality
634
+ // Keys] for details. To be consistent and avoid confusion, we're explicitly
635
+ // setting up autograd_dispatch_keyset to not have any backend bits.
636
+ constexpr DispatchKeySet autograd_dispatch_keyset = DispatchKeySet({
637
+ DispatchKey::AutogradFunctionality,
638
+ DispatchKey::AutogradOther,
639
+ DispatchKey::AutogradNestedTensor,
640
+ });
641
+
642
+ constexpr DispatchKeySet autocast_dispatch_keyset = DispatchKeySet({
643
+ DispatchKey::AutocastCPU,
644
+ DispatchKey::AutocastCUDA,
645
+ DispatchKey::AutocastXPU,
646
+ DispatchKey::AutocastIPU,
647
+ DispatchKey::AutocastHPU,
648
+ DispatchKey::AutocastXLA,
649
+ DispatchKey::AutocastPrivateUse1,
650
+ });
651
+
652
+ // See Note [TLS Initialization]
653
+ constexpr DispatchKeySet default_included_set = DispatchKeySet({
654
+ DispatchKey::BackendSelect,
655
+ DispatchKey::ADInplaceOrView,
656
+ });
657
+
658
+ constexpr DispatchKeySet default_excluded_set = DispatchKeySet({
659
+ DispatchKey::AutocastCPU,
660
+ DispatchKey::AutocastCUDA,
661
+ DispatchKey::AutocastXPU,
662
+ DispatchKey::AutocastIPU,
663
+ DispatchKey::AutocastHPU,
664
+ DispatchKey::AutocastXLA,
665
+ DispatchKey::AutocastPrivateUse1,
666
+ });
667
+
668
+ constexpr DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView =
669
+ autograd_dispatch_keyset | DispatchKeySet(DispatchKey::ADInplaceOrView);
670
+
671
+ constexpr DispatchKeySet python_ks = DispatchKeySet({
672
+ DispatchKey::Python,
673
+ DispatchKey::PythonTLSSnapshot,
674
+ });
675
+
676
+ constexpr DispatchKeySet sparse_ks = DispatchKeySet(DispatchKey::Sparse);
677
+
678
+ constexpr DispatchKeySet sparse_csr_ks =
679
+ DispatchKeySet({DispatchKey::SparseCsrCPU, DispatchKey::SparseCsrCUDA});
680
+
681
+ constexpr DispatchKeySet mkldnn_ks = DispatchKeySet(DispatchKey::MkldnnCPU);
682
+
683
+ // backend dispatch keys that map to DispatchKey::AutogradOther
684
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
685
+ constexpr DispatchKeySet autogradother_backends =
686
+ DispatchKeySet(
687
+ // HIP and VE aren't in this list: they now have their own backend bits
688
+ // which means that they can now have their own Autograd keys.
689
+ // Technically, HIP will now redispatch to its own custom AutogradHIP
690
+ // slot in the runtime table.
691
+ {DispatchKey::FPGA,
692
+ DispatchKey::ORT,
693
+ DispatchKey::Vulkan,
694
+ DispatchKey::Metal,
695
+ DispatchKey::SparseCsrCPU,
696
+ DispatchKey::SparseCsrCUDA,
697
+ DispatchKey::CustomRNGKeyId,
698
+ DispatchKey::MkldnnCPU,
699
+ // Sparse and Quantized backends also live here.
700
+ DispatchKey::Sparse,
701
+ DispatchKey::Quantized})
702
+ // Including the backend bits because this keyset is used during op
703
+ // registration, which requires looping over all runtime autogradother
704
+ // backend keys.
705
+ | DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
706
+
707
+ // The set of dispatch keys that come after autograd
708
+ // n.b. this relies on the fact that AutogradOther is currently the lowest
709
+ // Autograd key
710
+ constexpr DispatchKeySet after_autograd_keyset =
711
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::AutogradOther);
712
+
713
+ // The set of dispatch keys that come after ADInplaceOrView
714
+ constexpr DispatchKeySet after_ADInplaceOrView_keyset = DispatchKeySet(
715
+ DispatchKeySet::FULL_AFTER,
716
+ c10::DispatchKey::ADInplaceOrView);
717
+
718
+ // The set of dispatch keys that come after Functionalize
719
+ constexpr DispatchKeySet after_func_keyset =
720
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::Functionalize)
721
+ .remove(
722
+ // NOTE: we also need to remove ADInplaceOrView from the keyset when
723
+ // redispatching after the func kernels. This is because we're not
724
+ // calling the same op; we originally called an inplace op, and now
725
+ // we aren't. The original key calculation figured out which keys
726
+ // were Fallthrough based on the inplace op. That means that it did
727
+ // not include the ADInPlaceOrView kernel as a fallthrough key.
728
+ // However, we WANT the ADInPlaceOrView kernel to be ignored now
729
+ // that we're calling an out-of-place op. Re-invoking
730
+ // Dispatcher::call would re-run the Fallthrough key calculation and
731
+ // get us that, But at::redispatch is more performant. We can get
732
+ // away with it by explicitly removing the key here.
733
+ c10::DispatchKey::ADInplaceOrView);
734
+
735
+ constexpr DispatchKeySet backend_bitset_mask =
736
+ DispatchKeySet(DispatchKeySet::RAW, (1ULL << num_backends) - 1);
737
+
738
+ constexpr auto inplace_or_view_ks =
739
+ DispatchKeySet(DispatchKey::ADInplaceOrView);
740
+ constexpr auto autograd_cpu_ks = DispatchKeySet(DispatchKey::AutogradCPU);
741
+ constexpr auto autograd_ipu_ks = DispatchKeySet(DispatchKey::AutogradIPU);
742
+ constexpr auto autograd_xpu_ks = DispatchKeySet(DispatchKey::AutogradXPU);
743
+ constexpr auto autograd_cuda_ks = DispatchKeySet(DispatchKey::AutogradCUDA);
744
+ constexpr auto autograd_xla_ks = DispatchKeySet(DispatchKey::AutogradXLA);
745
+ constexpr auto autograd_lazy_ks = DispatchKeySet(DispatchKey::AutogradLazy);
746
+ constexpr auto autograd_meta_ks = DispatchKeySet(DispatchKey::AutogradMeta);
747
+ constexpr auto autograd_mps_ks = DispatchKeySet(DispatchKey::AutogradMPS);
748
+ constexpr auto autograd_hpu_ks = DispatchKeySet(DispatchKey::AutogradHPU);
749
+ constexpr auto autograd_privateuse1_ks =
750
+ DispatchKeySet(DispatchKey::AutogradPrivateUse1);
751
+ constexpr auto autograd_privateuse2_ks =
752
+ DispatchKeySet(DispatchKey::AutogradPrivateUse2);
753
+ constexpr auto autograd_privateuse3_ks =
754
+ DispatchKeySet(DispatchKey::AutogradPrivateUse3);
755
+ constexpr auto autograd_other_ks = DispatchKeySet(DispatchKey::AutogradOther);
756
+ constexpr auto autograd_nested =
757
+ DispatchKeySet(DispatchKey::AutogradNestedTensor);
758
+ // keyset corresponding to functorch keys that have their own dedicated
759
+ // TensorImpl subclass.
760
+ constexpr auto functorch_transforms_ks = DispatchKeySet(
761
+ {DispatchKey::FuncTorchBatched,
762
+ DispatchKey::FuncTorchVmapMode,
763
+ DispatchKey::Batched,
764
+ DispatchKey::VmapMode,
765
+ DispatchKey::FuncTorchGradWrapper});
766
+
767
+ constexpr auto functorch_batched_ks =
768
+ DispatchKeySet({DispatchKey::FuncTorchBatched});
769
+
770
+ // This keyset has:
771
+ // (1) the functionality bits corresponding to backends (dense, sparse,
772
+ // quantized) (2) all of the backend bits set
773
+ constexpr DispatchKeySet backend_functionality_keys =
774
+ DispatchKeySet({
775
+ DispatchKey::Dense,
776
+ DispatchKey::Quantized,
777
+ DispatchKey::Sparse,
778
+ }) |
779
+ DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
780
+
781
+ struct OpTableOffsetAndMask {
782
+ uint16_t offset;
783
+ uint16_t backend_mask;
784
+ };
785
+
786
+ static_assert(
787
+ num_backends <= 16,
788
+ "Right now we expect the number of backends not to exceed 16. In the (unlikely) event"
789
+ " that this changes, the size of OpTableOffsetAndMask::backend_mask needs to be increased too.");
790
+
791
+ // true if t is a backend dispatch key
792
+ C10_API bool isBackendDispatchKey(DispatchKey t);
793
+
794
+ // Resolve alias dispatch key to DispatchKeySet if applicable
795
+ C10_API DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t);
796
+
797
+ // Resolve alias dispatch key to DispatchKeySet if applicable,
798
+ // and check if k is a part of that set
799
+ C10_API bool runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k);
800
+
801
+ // Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key
802
+ // t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd.
803
+ C10_API DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t);
804
+
805
+ // Returns a DispatchKeySet of autograd related keys mapped to backend.
806
+ // for a given backend key, use the associated autograd key.
807
+ // for non-backend keys, use AutogradOther as a default.
808
+ // Note: it's convenient and fast to return a default here rather than (say)
809
+ // returning an optional<DispatchKey>, or throwing. But it makes callers
810
+ // responsible for either a) enforcing the invariant that only backend keys
811
+ // be passed as arguments, or b) interpreting our return value carefully.
812
+ inline DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t) {
813
+ switch (t) {
814
+ case BackendComponent::CPUBit:
815
+ return inplace_or_view_ks | autograd_cpu_ks;
816
+ case BackendComponent::IPUBit:
817
+ return inplace_or_view_ks | autograd_ipu_ks;
818
+ case BackendComponent::XPUBit:
819
+ return inplace_or_view_ks | autograd_xpu_ks;
820
+ case BackendComponent::CUDABit:
821
+ return inplace_or_view_ks | autograd_cuda_ks;
822
+ case BackendComponent::XLABit:
823
+ return inplace_or_view_ks | autograd_xla_ks;
824
+ case BackendComponent::LazyBit:
825
+ return inplace_or_view_ks | autograd_lazy_ks;
826
+ case BackendComponent::MetaBit:
827
+ return inplace_or_view_ks | autograd_meta_ks;
828
+ case BackendComponent::MPSBit:
829
+ return inplace_or_view_ks | autograd_mps_ks;
830
+ case BackendComponent::HPUBit:
831
+ return inplace_or_view_ks | autograd_hpu_ks;
832
+ case BackendComponent::PrivateUse1Bit:
833
+ return inplace_or_view_ks | autograd_privateuse1_ks;
834
+ case BackendComponent::PrivateUse2Bit:
835
+ return inplace_or_view_ks | autograd_privateuse2_ks;
836
+ case BackendComponent::PrivateUse3Bit:
837
+ return inplace_or_view_ks | autograd_privateuse3_ks;
838
+ default:
839
+ return inplace_or_view_ks | autograd_other_ks;
840
+ }
841
+ }
842
+
843
+ // Returns a DispatchKeySet of autocast related keys mapped to backend.
844
+ inline DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t) {
845
+ constexpr auto autocast_cpu_ks = DispatchKeySet(DispatchKey::AutocastCPU);
846
+ constexpr auto autocast_xpu_ks = DispatchKeySet(DispatchKey::AutocastXPU);
847
+ constexpr auto autocast_ipu_ks = DispatchKeySet(DispatchKey::AutocastIPU);
848
+ constexpr auto autocast_hpu_ks = DispatchKeySet(DispatchKey::AutocastHPU);
849
+ constexpr auto autocast_cuda_ks = DispatchKeySet(DispatchKey::AutocastCUDA);
850
+ constexpr auto autocast_xla_ks = DispatchKeySet(DispatchKey::AutocastXLA);
851
+ constexpr auto autocast_privateuse1_ks =
852
+ DispatchKeySet(DispatchKey::AutocastPrivateUse1);
853
+ switch (t) {
854
+ case BackendComponent::CPUBit:
855
+ return autocast_cpu_ks;
856
+ case BackendComponent::XPUBit:
857
+ return autocast_xpu_ks;
858
+ case BackendComponent::IPUBit:
859
+ return autocast_ipu_ks;
860
+ case BackendComponent::HPUBit:
861
+ return autocast_hpu_ks;
862
+ case BackendComponent::CUDABit:
863
+ return autocast_cuda_ks;
864
+ case BackendComponent::XLABit:
865
+ return autocast_xla_ks;
866
+ case BackendComponent::PrivateUse1Bit:
867
+ return autocast_privateuse1_ks;
868
+ default:
869
+ return DispatchKeySet();
870
+ }
871
+ }
872
+
873
+ // returns the "backend" DispatchKey of highest priority in the set.
874
+ // This is basically like highestBackendKey(), except that we have some
875
+ // "functionality" bits that correspond to backends (Sparse, Quantized)
876
+ inline DispatchKey highestPriorityBackendTypeId(DispatchKeySet ks) {
877
+ return (ks & backend_functionality_keys).highestPriorityTypeId();
878
+ }
879
+
880
+ // This API exists because we have a use case for checking
881
+ // getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined)
882
+ // in OperatorEntry.cpp but we disallow it in has() API.
883
+ C10_API bool isIncludedInAlias(DispatchKey k, DispatchKey alias);
884
+
885
+ // Historically, every tensor only had a single DispatchKey, and it was always
886
+ // something like CPU, and there wasn't any of this business where TLS
887
+ // could cause the DispatchKey of a tensor to change. But we still have some
888
+ // legacy code that is still using DispatchKey for things like instanceof
889
+ // checks; if at all possible, refactor the code to stop using DispatchKey in
890
+ // those cases.
891
+ static inline DispatchKey legacyExtractDispatchKey(DispatchKeySet s) {
892
+ // NB: If you add any extra keys that can be stored in TensorImpl on
893
+ // top of existing "backend" keys like CPU/CUDA, you need to add it
894
+ // here. At the moment, autograd keys and ADInplaceOrView key need this
895
+ // treatment;
896
+ return (s - autograd_dispatch_keyset_with_ADInplaceOrView -
897
+ autocast_dispatch_keyset -
898
+ DispatchKeySet(
899
+ {DispatchKey::Functionalize,
900
+ DispatchKey::PythonTLSSnapshot,
901
+ DispatchKey::Python}))
902
+ .highestPriorityTypeId();
903
+ }
904
+
905
+ template <class T>
906
+ using is_not_DispatchKeySet = guts::negation<std::is_same<DispatchKeySet, T>>;
907
+
908
+ // Given a function type, constructs a function_traits type that drops the first
909
+ // parameter type if the first parameter is of type DispatchKeySet. NB:
910
+ // DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid
911
+ // pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through
912
+ // the Dispatcher] for details). If at any point in the future we need to expose
913
+ // this type to JIT, revisit the usage of this type alias.
914
+ template <class FuncType>
915
+ using remove_DispatchKeySet_arg_from_func = guts::make_function_traits_t<
916
+ typename guts::infer_function_traits_t<FuncType>::return_type,
917
+ typename std::conditional_t<
918
+ std::is_same<
919
+ DispatchKeySet,
920
+ typename guts::typelist::head_with_default_t<
921
+ void,
922
+ typename guts::infer_function_traits_t<
923
+ FuncType>::parameter_types>>::value,
924
+ guts::typelist::drop_if_nonempty_t<
925
+ typename guts::infer_function_traits_t<FuncType>::parameter_types,
926
+ 1>,
927
+ typename guts::infer_function_traits_t<FuncType>::parameter_types>>;
928
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Load.h>
6
+ #include <c10/util/TypeCast.h>
7
+
8
+ namespace c10 {
9
+
10
+ // Dynamic type casting utils:
11
+ // - fetch_and_cast
12
+ // - cast_and_store
13
+ //
14
+ // fetch_and_cast fetch a value with dynamic type specified by a ScalarType
15
+ // from a void pointer and cast it to a static type.
16
+ //
17
+ // cast_and_store casts a static typed value into dynamic type specified
18
+ // by a ScalarType, and store it into a void pointer.
19
+ //
20
+ // NOTE:
21
+ //
22
+ // Dynamic casting allows us to support type promotion without blowing up
23
+ // the combination space: For example, without dynamic cast, in order to
24
+ // implement `add_` with type promotion, we would need something like
25
+ //
26
+ // AT_DISPATCH_ALL_TYPES(output.dtype(),
27
+ // AT_DISPATCH_ALL_TYPES(input1.dtype(),
28
+ // AT_DISPATCH_ALL_TYPES(input2.dtype(),
29
+ // [](arg0_t a, arg1_t b) -> out_t { return a + b; }
30
+ // )
31
+ // )
32
+ // )
33
+ //
34
+ // If we support N dtypes, the above code would generate the a+b kernel for
35
+ // all the N * N * N different supported types, the compilation time and
36
+ // binary size would become horrible.
37
+ //
38
+ // Dynamic casting might sounds like a bad idea in terms of performance.
39
+ // Especially if you ever do it in a loop, you are going to do a billion tests.
40
+ // But in practice it is not as bad as it might look:
41
+ //
42
+ // - on CPU, this is a branch that always has the same outcome, therefore
43
+ // hopefully the branch predictor could do the job pretty well
44
+ // - on GPU, these branches will not diverge, so we could still have the same
45
+ // warp executing the same line of code
46
+ // - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to
47
+ // check an integer does not hurt the performance much because the ALUs would
48
+ // wait for load instructions anyway.
49
+ //
50
+ // For the discussion and benchmark, refer to:
51
+ // - https://github.com/pytorch/pytorch/pull/28343
52
+ // - https://github.com/pytorch/pytorch/pull/28344
53
+ // - https://github.com/pytorch/pytorch/pull/28345
54
+ //
55
+
56
+ #ifdef C10_HOST_DEVICE
57
+ #define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false);
58
+ #else
59
+ #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
60
+ #endif
61
+
62
+ // Fetch a value with dynamic type src_type from ptr, and cast it to static type
63
+ // dest_t.
64
+ #define FETCH_AND_CAST_CASE(type, scalartype) \
65
+ case ScalarType::scalartype: \
66
+ return c10::convert<dest_t>(c10::load<type>(ptr));
67
+
68
+ template <typename dest_t>
69
+ C10_HOST_DEVICE inline dest_t fetch_and_cast(
70
+ const ScalarType src_type,
71
+ const void* ptr) {
72
+ switch (src_type) {
73
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE)
74
+ default:
75
+ ERROR_UNSUPPORTED_CAST
76
+ }
77
+ return dest_t(0); // just to avoid compiler warning
78
+ }
79
+
80
+ // Cast a value with static type src_t into dynamic dest_type, and store it to
81
+ // ptr.
82
+ #define CAST_AND_STORE_CASE(type, scalartype) \
83
+ case ScalarType::scalartype: \
84
+ *(type*)ptr = c10::convert<type>(value); \
85
+ return;
86
+ template <typename src_t>
87
+ C10_HOST_DEVICE inline void cast_and_store(
88
+ const ScalarType dest_type,
89
+ void* ptr,
90
+ src_t value) {
91
+ switch (dest_type) {
92
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE)
93
+ default:;
94
+ }
95
+ ERROR_UNSUPPORTED_CAST
96
+ }
97
+
98
+ #define DEFINE_UNCASTABLE(T, scalartype_) \
99
+ template <> \
100
+ C10_HOST_DEVICE inline T fetch_and_cast<T>( \
101
+ const ScalarType src_type, const void* ptr) { \
102
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
103
+ return c10::load<T>(ptr); \
104
+ } \
105
+ template <> \
106
+ C10_HOST_DEVICE inline void cast_and_store<T>( \
107
+ const ScalarType dest_type, void* ptr, T value) { \
108
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
109
+ *(T*)ptr = value; \
110
+ }
111
+
112
+ AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
113
+
114
+ #undef FETCH_AND_CAST_CASE
115
+ #undef CAST_AND_STORE_CASE
116
+ #undef DEFINE_UNCASTABLE
117
+ #undef ERROR_UNSUPPORTED_CAST
118
+
119
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <mutex>
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/core/TensorImpl.h>
9
+ #include <c10/macros/Export.h>
10
+ #include <c10/util/intrusive_ptr.h>
11
+ #include <c10/util/python_stub.h>
12
+
13
+ /**
14
+ * Note [Generator]
15
+ * ~~~~~~~~~~~~~~~~
16
+ * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm
17
+ * to generate a seemingly random sequence of numbers, that may be later be used
18
+ * in creating a random distribution. Such an engine almost always maintains a
19
+ * state and requires a seed to start off the creation of random numbers. Often
20
+ * times, users have found it beneficial to be able to explicitly create,
21
+ * retain, and destroy PRNG states and also be able to have control over the
22
+ * seed value.
23
+ *
24
+ * A Generator in ATen gives users the ability to read, write and modify a PRNG
25
+ * engine. For instance, it does so by letting users seed a PRNG engine, fork
26
+ * the state of the engine, etc.
27
+ *
28
+ * By default, there is one generator per device, and a device's generator is
29
+ * lazily created. A user can use the torch.Generator() api to create their own
30
+ * generator. Currently torch.Generator() can only create a CPUGeneratorImpl.
31
+ */
32
+
33
+ /**
34
+ * Note [Acquire lock when using random generators]
35
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
36
+ * Generator and its derived classes are NOT thread-safe. Please note that most
37
+ * of the places where we have inserted locking for generators are historically
38
+ * based, and we haven't actually checked that everything is truly thread safe
39
+ * (and it probably isn't). Please use the public mutex_ when using any methods
40
+ * from these classes, except for the read-only methods. You can learn about the
41
+ * usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp)
42
+ * and other places where we have used lock_guard.
43
+ *
44
+ * TODO: Look into changing the threading semantics of Generators in ATen (e.g.,
45
+ * making them non-thread safe and instead making the generator state
46
+ * splittable, to accommodate forks into other threads).
47
+ */
48
+
49
+ namespace c10 {
50
+
51
+ // The default seed is selected to be a large number
52
+ // with good distribution of 0s and 1s in bit representation
53
+ constexpr uint64_t default_rng_seed_val = 67280421310721;
54
+
55
+ struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
56
+ // Constructors
57
+ GeneratorImpl(Device device_in, DispatchKeySet key_set);
58
+
59
+ // Delete all copy and move assignment in favor of clone()
60
+ // method
61
+ GeneratorImpl(const GeneratorImpl& other) = delete;
62
+ GeneratorImpl(GeneratorImpl&& other) = delete;
63
+ GeneratorImpl& operator=(const GeneratorImpl& other) = delete;
64
+
65
+ ~GeneratorImpl() override = default;
66
+ c10::intrusive_ptr<GeneratorImpl> clone() const;
67
+
68
+ // Common methods for all generators
69
+ virtual void set_current_seed(uint64_t seed) = 0;
70
+ virtual void set_offset(uint64_t offset) = 0;
71
+ virtual uint64_t get_offset() const = 0;
72
+ virtual uint64_t current_seed() const = 0;
73
+ virtual uint64_t seed() = 0;
74
+ virtual void set_state(const c10::TensorImpl& new_state) = 0;
75
+ virtual c10::intrusive_ptr<c10::TensorImpl> get_state() const = 0;
76
+ Device device() const;
77
+
78
+ // See Note [Acquire lock when using random generators]
79
+ std::mutex mutex_;
80
+
81
+ DispatchKeySet key_set() const {
82
+ return key_set_;
83
+ }
84
+
85
+ inline void set_pyobj(PyObject* pyobj) noexcept {
86
+ pyobj_ = pyobj;
87
+ }
88
+
89
+ inline PyObject* pyobj() const noexcept {
90
+ return pyobj_;
91
+ }
92
+
93
+ protected:
94
+ Device device_;
95
+ DispatchKeySet key_set_;
96
+ PyObject* pyobj_ = nullptr;
97
+
98
+ virtual GeneratorImpl* clone_impl() const = 0;
99
+ };
100
+
101
+ namespace detail {
102
+
103
+ C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
104
+
105
+ } // namespace detail
106
+
107
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct C10_API GradMode {
9
+ static bool is_enabled();
10
+ static void set_enabled(bool enabled);
11
+ };
12
+
13
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
14
+ // construction, and sets it back to the original value upon destruction.
15
+ struct C10_API AutoGradMode {
16
+ AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
17
+ GradMode::set_enabled(enabled);
18
+ }
19
+ ~AutoGradMode() {
20
+ GradMode::set_enabled(prev_mode);
21
+ }
22
+ bool prev_mode;
23
+ };
24
+
25
+ // A RAII, thread local (!) guard that stops future operations from building
26
+ // gradients.
27
+ struct C10_API NoGradGuard : public AutoGradMode {
28
+ NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
29
+ };
30
+
31
+ // A RAII, thread local (!) guard that enables or disables forward grad mode
32
+ // upon construction, and sets it back to the original value upon destruction.
33
+ struct C10_API AutoFwGradMode {
34
+ AutoFwGradMode(bool enabled)
35
+ : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
36
+ AutogradState::get_tls_state().set_fw_grad_mode(enabled);
37
+ }
38
+ ~AutoFwGradMode() {
39
+ AutogradState::get_tls_state().set_fw_grad_mode(prev_mode);
40
+ }
41
+ bool prev_mode;
42
+ };
43
+
44
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Layout.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <ostream>
7
+
8
+ namespace c10 {
9
+ enum class Layout : int8_t {
10
+ Strided,
11
+ Sparse,
12
+ SparseCsr,
13
+ Mkldnn,
14
+ SparseCsc,
15
+ SparseBsr,
16
+ SparseBsc,
17
+ Jagged,
18
+ NumOptions
19
+ };
20
+
21
+ constexpr auto kStrided = Layout::Strided;
22
+ constexpr auto kSparse = Layout::Sparse;
23
+ constexpr auto kSparseCsr = Layout::SparseCsr;
24
+ constexpr auto kMkldnn = Layout::Mkldnn;
25
+ constexpr auto kSparseCsc = Layout::SparseCsc;
26
+ constexpr auto kSparseBsr = Layout::SparseBsr;
27
+ constexpr auto kSparseBsc = Layout::SparseBsc;
28
+ constexpr auto kJagged = Layout::Jagged;
29
+
30
+ inline Layout layout_from_backend(Backend backend) {
31
+ switch (backend) {
32
+ case Backend::SparseCPU:
33
+ case Backend::SparseCUDA:
34
+ case Backend::SparseHIP:
35
+ case Backend::SparseVE:
36
+ case Backend::SparseXPU:
37
+ return Layout::Sparse;
38
+ case Backend::MkldnnCPU:
39
+ return Layout::Mkldnn;
40
+ case Backend::SparseCsrCPU:
41
+ case Backend::SparseCsrCUDA:
42
+ TORCH_CHECK(
43
+ false,
44
+ "Cannot map Backend SparseCsrCPU|SparseCsrCUDA to a unique layout.");
45
+ default:
46
+ return Layout::Strided;
47
+ }
48
+ }
49
+
50
+ inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
51
+ switch (layout) {
52
+ case at::kStrided:
53
+ return stream << "Strided";
54
+ case at::kSparse:
55
+ return stream << "Sparse";
56
+ case at::kSparseCsr:
57
+ return stream << "SparseCsr";
58
+ case at::kSparseCsc:
59
+ return stream << "SparseCsc";
60
+ case at::kSparseBsr:
61
+ return stream << "SparseBsr";
62
+ case at::kSparseBsc:
63
+ return stream << "SparseBsc";
64
+ case at::kMkldnn:
65
+ return stream << "Mkldnn";
66
+ case at::kJagged:
67
+ return stream << "Jagged";
68
+ default:
69
+ TORCH_CHECK(false, "Unknown layout");
70
+ }
71
+ }
72
+
73
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <ostream>
8
+
9
+ // Memory format is not the property of a Tensor. It is the way to tell an
10
+ // operator how the result should be organized in memory and nothing more. That
11
+ // means memory format should never be used as return value for any tensor state
12
+ // interrogation functions (internally and externally).
13
+ //
14
+ // Possible options are:
15
+ // Preserve:
16
+ // If any of the input tensors is in channels_last format, operator output
17
+ // should be in channels_last format
18
+ //
19
+ // Contiguous:
20
+ // Regardless of input tensors format, the output should be contiguous
21
+ // Tensor.
22
+ //
23
+ // ChannelsLast:
24
+ // Regardless of input tensors format, the output should be in channels_last
25
+ // format.
26
+
27
+ namespace c10 {
28
+ enum class MemoryFormat : int8_t {
29
+ Contiguous,
30
+ Preserve,
31
+ ChannelsLast,
32
+ ChannelsLast3d,
33
+ NumOptions
34
+ };
35
+
36
+ // If you are seeing this, it means that this call site was not checked if
37
+ // the memory format could be preserved, and it was switched to old default
38
+ // behaviour of contiguous
39
+ #define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format()
40
+
41
+ inline MemoryFormat get_contiguous_memory_format() {
42
+ return MemoryFormat::Contiguous;
43
+ }
44
+
45
+ inline std::ostream& operator<<(
46
+ std::ostream& stream,
47
+ at::MemoryFormat memory_format) {
48
+ switch (memory_format) {
49
+ case MemoryFormat::Preserve:
50
+ return stream << "Preserve";
51
+ case MemoryFormat::Contiguous:
52
+ return stream << "Contiguous";
53
+ case MemoryFormat::ChannelsLast:
54
+ return stream << "ChannelsLast";
55
+ case MemoryFormat::ChannelsLast3d:
56
+ return stream << "ChannelsLast3d";
57
+ default:
58
+ TORCH_CHECK(false, "Unknown memory format ", memory_format);
59
+ }
60
+ }
61
+
62
+ // Note: Hardcoded the channel last stride indices here to get better
63
+ // performance
64
+ template <typename T>
65
+ inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) {
66
+ std::vector<T> strides(sizes.size());
67
+ switch (sizes.size()) {
68
+ case 4:
69
+ strides[1] = 1;
70
+ strides[3] = sizes[1];
71
+ strides[2] = strides[3] * sizes[3];
72
+ strides[0] = strides[2] * sizes[2];
73
+ return strides;
74
+ case 3:
75
+ strides[0] = 1;
76
+ strides[2] = sizes[0];
77
+ strides[1] = strides[2] * sizes[2];
78
+ return strides;
79
+ default:
80
+ TORCH_INTERNAL_ASSERT(
81
+ false, "ChannelsLast2d doesn't support size ", sizes.size());
82
+ }
83
+ }
84
+
85
+ inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) {
86
+ return get_channels_last_strides_2d<int64_t>(sizes);
87
+ }
88
+
89
+ template <typename T>
90
+ std::vector<T> get_channels_last_strides_3d(ArrayRef<T> sizes) {
91
+ std::vector<T> strides(sizes.size());
92
+ switch (sizes.size()) {
93
+ case 5:
94
+ strides[1] = 1;
95
+ strides[4] = sizes[1];
96
+ strides[3] = strides[4] * sizes[4];
97
+ strides[2] = strides[3] * sizes[3];
98
+ strides[0] = strides[2] * sizes[2];
99
+ return strides;
100
+ case 4:
101
+ strides[0] = 1;
102
+ strides[3] = sizes[0];
103
+ strides[2] = strides[3] * sizes[3];
104
+ strides[1] = strides[2] * sizes[2];
105
+ return strides;
106
+ default:
107
+ TORCH_INTERNAL_ASSERT(
108
+ false, "ChannelsLast3d doesn't support size ", sizes.size());
109
+ }
110
+ }
111
+
112
+ inline std::vector<int64_t> get_channels_last_strides_3d(IntArrayRef sizes) {
113
+ return get_channels_last_strides_3d<int64_t>(sizes);
114
+ }
115
+
116
+ // NOTE:
117
+ // Below are Helper functions for is_channels_last_strides_xd.
118
+ // 1. Please do not combine these helper functions, each helper function handles
119
+ // exactly one case of sizes + memory_format, by doing this, the strides indices
120
+ // will be a constant array and we can access it using constant index number,
121
+ // the compiler will fully unroll the loop on strides indices to gain a better
122
+ // performance.
123
+ // 2. No error check in helper function, caller ensures the correctness of the
124
+ // input
125
+ // 3. All helper functions have similar comments, only 1st helper function is
126
+ // commented here.
127
+ template <typename T>
128
+ inline bool is_channels_last_strides_2d_s4(
129
+ const ArrayRef<T> sizes,
130
+ const ArrayRef<T> strides) {
131
+ T min = 0;
132
+ // special case for trivial C dimension. default to NCHW
133
+ if (strides[1] == 0) {
134
+ return false;
135
+ }
136
+ // loop strides indices
137
+ for (auto& d : {1, 3, 2, 0}) {
138
+ if (sizes[d] == 0) {
139
+ return false;
140
+ }
141
+ if (strides[d] < min) {
142
+ return false;
143
+ }
144
+ // Fallback to NCHW as default layout for ambiguous cases
145
+ // This is the flaw of implicit memory_format from strides.
146
+ // N111 tensor with identical strides for size 1 dimension;
147
+ // Two cases could lead us here:
148
+ // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
149
+ // b. N11W contiguous Tensor sliced on the W-dimension.
150
+ // ([N,1,1,1]@[W,W,W,W])
151
+ if (d == 0 && min == strides[1]) {
152
+ return false;
153
+ }
154
+ // This is necessary to:
155
+ // 1. distinguish the memory_format of N1H1;
156
+ // [H, 1, 1, 1] channels_last stride
157
+ // [H, H, 1, 1] contiguous stride
158
+ // 2. permutation of 1C1W:
159
+ // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
160
+ // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last
161
+ min = strides[d];
162
+ if (sizes[d] > 1) {
163
+ min *= sizes[d];
164
+ }
165
+ }
166
+ return true;
167
+ }
168
+
169
+ template <typename T>
170
+ inline bool is_channels_last_strides_3d_s5(
171
+ const ArrayRef<T> sizes,
172
+ const ArrayRef<T> strides) {
173
+ T min = 0;
174
+ if (strides[1] == 0) {
175
+ return false;
176
+ }
177
+ for (auto& d : {1, 4, 3, 2, 0}) {
178
+ if (sizes[d] == 0) {
179
+ return false;
180
+ }
181
+ if (strides[d] < min) {
182
+ return false;
183
+ }
184
+ if (d == 0 && min == strides[1]) {
185
+ return false;
186
+ }
187
+ min = strides[d];
188
+ if (sizes[d] > 1) {
189
+ min *= sizes[d];
190
+ }
191
+ }
192
+ return true;
193
+ }
194
+
195
+ // Note [Ambiguous is_channels_last_strides_xd]
196
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
197
+ // The flaw of carrying memory_format implicitly through strides is very hard
198
+ // to WAR properly. issue #24090
199
+ // Without the history of permutation, we can't infer the memory_format of a
200
+ // tensor from the snapshot of its size & stride
201
+ // e.g.
202
+ //
203
+ // 1. We can NOT specify the memory_format of N111 tensor through strides in a
204
+ // meaningful way;
205
+ //
206
+ // 2. Two path that ended up with identical size/stride
207
+ // N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W]
208
+ // NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C]
209
+ // So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer
210
+ // the memory_format of the original tensor.
211
+ //
212
+ // Due to the limitations, our temporary WAR `is_channels_last_strides` does the
213
+ // best effort to infer whether the original memory_format of a tensor is
214
+ // at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered
215
+ // by their importance):
216
+ // 1. Ensure that normal shape manipulation does not accidentally change the
217
+ // MemoryFormat of an existing tensor.
218
+ // 2. Allows user to mark MemoryFormat::ChannelsLast to tensors;
219
+ //
220
+ // The function does so via checking strides of the tensor, including strides of
221
+ // size-1 dimensions. Although conventionally PyTorch implies no restriction on
222
+ // trivial stride (stride for size-1 dimension).
223
+ //
224
+ // Note that this approach is a compromise. We did not solve the problem
225
+ // completely. Many cases we will not be able to infer the correct memory
226
+ // format.
227
+ // The implementation of `is_channels_last_strides` is to serve the objectives:
228
+ // MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental
229
+ // conversion); Best effort to maintain the ChannelsLast flag.
230
+ //
231
+ // Due to the fact that this is not a bulletproof solution, through testing
232
+ // (aten/src/ATen/test/memory_format_test.cpp)
233
+ // a. we ensure that the common tasks are supported;
234
+ // a. we identify corner cases where the implementation compromises on.
235
+ //
236
+ // By the time accumulated permutation is enabled to replace implicit
237
+ // memory_format through strides, we should be updating our tests and fix the
238
+ // issues in our tests.
239
+ //
240
+ // We use Channels Last 2d as an example above.
241
+ // This is a general problem for all the is_channels_last_strides_xd
242
+ // implementation. Please check the helper functions
243
+ // (is_channels_last_strides_*d_s*) for more details.
244
+
245
+ template <typename T>
246
+ inline bool is_channels_last_strides_2d(
247
+ const ArrayRef<T> sizes,
248
+ const ArrayRef<T> strides) {
249
+ switch (sizes.size()) {
250
+ case 4:
251
+ return is_channels_last_strides_2d_s4(sizes, strides);
252
+ case 3:
253
+ // TODO dim == 3 case will be enabled once it is fully tested
254
+ return false;
255
+ default:
256
+ return false;
257
+ }
258
+ }
259
+
260
+ template <typename T>
261
+ inline bool is_channels_last_strides_3d(
262
+ const ArrayRef<T> sizes,
263
+ const ArrayRef<T> strides) {
264
+ switch (sizes.size()) {
265
+ case 5:
266
+ return is_channels_last_strides_3d_s5(sizes, strides);
267
+ case 4:
268
+ // TODO dim == 4 case will be enabled once it is fully tested
269
+ return false;
270
+ default:
271
+ return false;
272
+ }
273
+ }
274
+
275
+ inline bool is_channels_last_strides_2d(
276
+ const IntArrayRef sizes,
277
+ const IntArrayRef strides) {
278
+ return is_channels_last_strides_2d<int64_t>(sizes, strides);
279
+ }
280
+
281
+ inline bool is_channels_last_strides_3d(
282
+ const IntArrayRef sizes,
283
+ const IntArrayRef strides) {
284
+ return is_channels_last_strides_3d<int64_t>(sizes, strides);
285
+ }
286
+
287
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ template <typename T>
6
+ class OptionalRef {
7
+ public:
8
+ OptionalRef() : data_(nullptr) {}
9
+ OptionalRef(const T* data) : data_(data) {
10
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
11
+ }
12
+ OptionalRef(const T& data) : data_(&data) {}
13
+
14
+ bool has_value() const {
15
+ return data_ != nullptr;
16
+ }
17
+
18
+ const T& get() const {
19
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
20
+ return *data_;
21
+ }
22
+
23
+ operator bool() const {
24
+ return has_value();
25
+ }
26
+
27
+ private:
28
+ const T* data_;
29
+ };
30
+
31
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ namespace c10 {
7
+
8
+ /**
9
+ * QScheme is an enum that specifies the type of quantization. This has a one
10
+ * to one correspondence with Quantizer
11
+ * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes.
12
+ * Keep this file in sync with torch/nn/_qscheme.py
13
+ */
14
+ enum class QScheme : uint8_t {
15
+ PER_TENSOR_AFFINE = 0,
16
+ PER_CHANNEL_AFFINE = 1,
17
+ PER_TENSOR_SYMMETRIC = 2,
18
+ PER_CHANNEL_SYMMETRIC = 3,
19
+ PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4,
20
+ COMPILE_TIME_NUM_QSCHEMES = 5,
21
+ };
22
+
23
+ constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE;
24
+ constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE;
25
+ constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC;
26
+ constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC;
27
+ constexpr auto kPerChannelAffineFloatQParams =
28
+ QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS;
29
+ constexpr int COMPILE_TIME_NUM_QSCHEMES =
30
+ static_cast<int>(QScheme::COMPILE_TIME_NUM_QSCHEMES);
31
+
32
+ inline std::string toString(QScheme qscheme) {
33
+ switch (qscheme) {
34
+ case kPerTensorAffine:
35
+ return "per_tensor_affine";
36
+ case kPerChannelAffine:
37
+ return "per_channel_affine";
38
+ case kPerTensorSymmetric:
39
+ return "per_tensor_symmetric";
40
+ case kPerChannelSymmetric:
41
+ return "per_channel_symmetric";
42
+ case kPerChannelAffineFloatQParams:
43
+ return "per_channel_affine_float_qparams";
44
+ default:
45
+ TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast<int>(qscheme));
46
+ }
47
+ }
48
+
49
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stdint.h>
4
+ #include <stdexcept>
5
+ #include <type_traits>
6
+ #include <utility>
7
+
8
+ #include <c10/core/OptionalRef.h>
9
+ #include <c10/core/ScalarType.h>
10
+ #include <c10/core/SymFloat.h>
11
+ #include <c10/core/SymInt.h>
12
+ #include <c10/macros/Macros.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/Half.h>
15
+ #include <c10/util/TypeCast.h>
16
+ #include <c10/util/intrusive_ptr.h>
17
+
18
+ C10_CLANG_DIAGNOSTIC_PUSH()
19
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
20
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
21
+ #endif
22
+
23
+ namespace c10 {
24
+
25
+ /**
26
+ * Scalar represents a 0-dimensional tensor which contains a single element.
27
+ * Unlike a tensor, numeric literals (in C++) are implicitly convertible to
28
+ * Scalar (which is why, for example, we provide both add(Tensor) and
29
+ * add(Scalar) overloads for many operations). It may also be used in
30
+ * circumstances where you statically know a tensor is 0-dim and single size,
31
+ * but don't know its type.
32
+ */
33
+ class C10_API Scalar {
34
+ public:
35
+ Scalar() : Scalar(int64_t(0)) {}
36
+
37
+ void destroy() {
38
+ if (Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag) {
39
+ raw::intrusive_ptr::decref(v.p);
40
+ v.p = nullptr;
41
+ }
42
+ }
43
+
44
+ ~Scalar() {
45
+ destroy();
46
+ }
47
+
48
+ #define DEFINE_IMPLICIT_CTOR(type, name) \
49
+ Scalar(type vv) : Scalar(vv, true) {}
50
+
51
+ AT_FORALL_SCALAR_TYPES_AND7(
52
+ Half,
53
+ BFloat16,
54
+ Float8_e5m2,
55
+ Float8_e4m3fn,
56
+ Float8_e5m2fnuz,
57
+ Float8_e4m3fnuz,
58
+ ComplexHalf,
59
+ DEFINE_IMPLICIT_CTOR)
60
+ AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR)
61
+
62
+ #undef DEFINE_IMPLICIT_CTOR
63
+
64
+ // Value* is both implicitly convertible to SymbolicVariable and bool which
65
+ // causes ambiguity error. Specialized constructor for bool resolves this
66
+ // problem.
67
+ template <
68
+ typename T,
69
+ typename std::enable_if<std::is_same<T, bool>::value, bool>::type* =
70
+ nullptr>
71
+ Scalar(T vv) : tag(Tag::HAS_b) {
72
+ v.i = convert<int64_t, bool>(vv);
73
+ }
74
+
75
+ template <
76
+ typename T,
77
+ typename std::enable_if<std::is_same<T, c10::SymBool>::value, bool>::
78
+ type* = nullptr>
79
+ Scalar(T vv) : tag(Tag::HAS_sb) {
80
+ v.i = convert<int64_t, c10::SymBool>(vv);
81
+ }
82
+
83
+ #define DEFINE_ACCESSOR(type, name) \
84
+ type to##name() const { \
85
+ if (Tag::HAS_d == tag) { \
86
+ return checked_convert<type, double>(v.d, #type); \
87
+ } else if (Tag::HAS_z == tag) { \
88
+ return checked_convert<type, c10::complex<double>>(v.z, #type); \
89
+ } \
90
+ if (Tag::HAS_b == tag) { \
91
+ return checked_convert<type, bool>(v.i, #type); \
92
+ } else if (Tag::HAS_i == tag) { \
93
+ return checked_convert<type, int64_t>(v.i, #type); \
94
+ } else if (Tag::HAS_si == tag) { \
95
+ return checked_convert<type, int64_t>( \
96
+ toSymInt().guard_int(__FILE__, __LINE__), #type); \
97
+ } else if (Tag::HAS_sd == tag) { \
98
+ return checked_convert<type, int64_t>( \
99
+ toSymFloat().guard_float(__FILE__, __LINE__), #type); \
100
+ } else if (Tag::HAS_sb == tag) { \
101
+ return checked_convert<type, int64_t>( \
102
+ toSymBool().guard_bool(__FILE__, __LINE__), #type); \
103
+ } \
104
+ TORCH_CHECK(false) \
105
+ }
106
+
107
+ // TODO: Support ComplexHalf accessor
108
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR)
109
+
110
+ #undef DEFINE_ACCESSOR
111
+
112
+ SymInt toSymInt() const {
113
+ if (Tag::HAS_si == tag) {
114
+ return c10::SymInt(intrusive_ptr<SymNodeImpl>::reclaim_copy(
115
+ static_cast<SymNodeImpl*>(v.p)));
116
+ } else {
117
+ return toLong();
118
+ }
119
+ }
120
+
121
+ SymFloat toSymFloat() const {
122
+ if (Tag::HAS_sd == tag) {
123
+ return c10::SymFloat(intrusive_ptr<SymNodeImpl>::reclaim_copy(
124
+ static_cast<SymNodeImpl*>(v.p)));
125
+ } else {
126
+ return toDouble();
127
+ }
128
+ }
129
+
130
+ SymBool toSymBool() const {
131
+ if (Tag::HAS_sb == tag) {
132
+ return c10::SymBool(intrusive_ptr<SymNodeImpl>::reclaim_copy(
133
+ static_cast<SymNodeImpl*>(v.p)));
134
+ } else {
135
+ return toBool();
136
+ }
137
+ }
138
+
139
+ // also support scalar.to<int64_t>();
140
+ // Deleted for unsupported types, but specialized below for supported types
141
+ template <typename T>
142
+ T to() const = delete;
143
+
144
+ // audit uses of data_ptr
145
+ const void* data_ptr() const {
146
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
147
+ return static_cast<const void*>(&v);
148
+ }
149
+
150
+ bool isFloatingPoint() const {
151
+ return Tag::HAS_d == tag || Tag::HAS_sd == tag;
152
+ }
153
+
154
+ C10_DEPRECATED_MESSAGE(
155
+ "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")
156
+ bool isIntegral() const {
157
+ return Tag::HAS_i == tag || Tag::HAS_si == tag;
158
+ }
159
+ bool isIntegral(bool includeBool) const {
160
+ return Tag::HAS_i == tag || Tag::HAS_si == tag ||
161
+ (includeBool && isBoolean());
162
+ }
163
+
164
+ bool isComplex() const {
165
+ return Tag::HAS_z == tag;
166
+ }
167
+ bool isBoolean() const {
168
+ return Tag::HAS_b == tag || Tag::HAS_sb == tag;
169
+ }
170
+
171
+ // you probably don't actually want these; they're mostly for testing
172
+ bool isSymInt() const {
173
+ return Tag::HAS_si == tag;
174
+ }
175
+ bool isSymFloat() const {
176
+ return Tag::HAS_sd == tag;
177
+ }
178
+ bool isSymBool() const {
179
+ return Tag::HAS_sb == tag;
180
+ }
181
+
182
+ bool isSymbolic() const {
183
+ return Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag;
184
+ }
185
+
186
+ C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) noexcept {
187
+ if (&other == this) {
188
+ return *this;
189
+ }
190
+
191
+ destroy();
192
+ moveFrom(std::move(other));
193
+ return *this;
194
+ }
195
+
196
+ C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) {
197
+ if (&other == this) {
198
+ return *this;
199
+ }
200
+
201
+ *this = Scalar(other);
202
+ return *this;
203
+ }
204
+
205
+ Scalar operator-() const;
206
+ Scalar conj() const;
207
+ Scalar log() const;
208
+
209
+ template <
210
+ typename T,
211
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
212
+ bool equal(T num) const {
213
+ if (isComplex()) {
214
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
215
+ auto val = v.z;
216
+ return (val.real() == num) && (val.imag() == T());
217
+ } else if (isFloatingPoint()) {
218
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
219
+ return v.d == num;
220
+ } else if (isIntegral(/*includeBool=*/false)) {
221
+ TORCH_CHECK(!isSymbolic(), "NYI SymInt equality");
222
+ return v.i == num;
223
+ } else if (isBoolean()) {
224
+ // boolean scalar does not equal to a non boolean value
225
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
226
+ return false;
227
+ } else {
228
+ TORCH_INTERNAL_ASSERT(false);
229
+ }
230
+ }
231
+
232
+ template <
233
+ typename T,
234
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
235
+ bool equal(T num) const {
236
+ if (isComplex()) {
237
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
238
+ return v.z == num;
239
+ } else if (isFloatingPoint()) {
240
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
241
+ return (v.d == num.real()) && (num.imag() == T());
242
+ } else if (isIntegral(/*includeBool=*/false)) {
243
+ TORCH_CHECK(!isSymbolic(), "NYI SymInt equality");
244
+ return (v.i == num.real()) && (num.imag() == T());
245
+ } else if (isBoolean()) {
246
+ // boolean scalar does not equal to a non boolean value
247
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
248
+ return false;
249
+ } else {
250
+ TORCH_INTERNAL_ASSERT(false);
251
+ }
252
+ }
253
+
254
+ bool equal(bool num) const {
255
+ if (isBoolean()) {
256
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
257
+ return static_cast<bool>(v.i) == num;
258
+ } else {
259
+ return false;
260
+ }
261
+ }
262
+
263
+ ScalarType type() const {
264
+ if (isComplex()) {
265
+ return ScalarType::ComplexDouble;
266
+ } else if (isFloatingPoint()) {
267
+ return ScalarType::Double;
268
+ } else if (isIntegral(/*includeBool=*/false)) {
269
+ return ScalarType::Long;
270
+ } else if (isBoolean()) {
271
+ return ScalarType::Bool;
272
+ } else {
273
+ throw std::runtime_error("Unknown scalar type.");
274
+ }
275
+ }
276
+
277
+ Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) {
278
+ moveFrom(std::move(rhs));
279
+ }
280
+
281
+ Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) {
282
+ if (isSymbolic()) {
283
+ c10::raw::intrusive_ptr::incref(v.p);
284
+ }
285
+ }
286
+
287
+ Scalar(c10::SymInt si) {
288
+ if (auto m = si.maybe_as_int()) {
289
+ tag = Tag::HAS_i;
290
+ v.i = *m;
291
+ } else {
292
+ tag = Tag::HAS_si;
293
+ v.p = std::move(si).release();
294
+ }
295
+ }
296
+
297
+ Scalar(c10::SymFloat sd) {
298
+ if (sd.is_symbolic()) {
299
+ tag = Tag::HAS_sd;
300
+ v.p = std::move(sd).release();
301
+ } else {
302
+ tag = Tag::HAS_d;
303
+ v.d = sd.as_float_unchecked();
304
+ }
305
+ }
306
+
307
+ Scalar(c10::SymBool sb) {
308
+ if (auto m = sb.maybe_as_bool()) {
309
+ tag = Tag::HAS_b;
310
+ v.i = *m;
311
+ } else {
312
+ tag = Tag::HAS_sb;
313
+ v.p = std::move(sb).release();
314
+ }
315
+ }
316
+
317
+ // We can't set v in the initializer list using the
318
+ // syntax v{ .member = ... } because it doesn't work on MSVC
319
+ private:
320
+ enum class Tag { HAS_d, HAS_i, HAS_z, HAS_b, HAS_sd, HAS_si, HAS_sb };
321
+
322
+ // NB: assumes that self has already been cleared
323
+ C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept {
324
+ v = rhs.v;
325
+ tag = rhs.tag;
326
+ if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd ||
327
+ rhs.tag == Tag::HAS_sb) {
328
+ // Move out of scalar
329
+ rhs.tag = Tag::HAS_i;
330
+ rhs.v.i = 0;
331
+ }
332
+ }
333
+
334
+ Tag tag;
335
+
336
+ union v_t {
337
+ double d{};
338
+ int64_t i;
339
+ c10::complex<double> z;
340
+ c10::intrusive_ptr_target* p;
341
+ v_t() {} // default constructor
342
+ } v;
343
+
344
+ template <
345
+ typename T,
346
+ typename std::enable_if<
347
+ std::is_integral<T>::value && !std::is_same<T, bool>::value,
348
+ bool>::type* = nullptr>
349
+ Scalar(T vv, bool) : tag(Tag::HAS_i) {
350
+ v.i = convert<decltype(v.i), T>(vv);
351
+ }
352
+
353
+ template <
354
+ typename T,
355
+ typename std::enable_if<
356
+ !std::is_integral<T>::value && !c10::is_complex<T>::value,
357
+ bool>::type* = nullptr>
358
+ Scalar(T vv, bool) : tag(Tag::HAS_d) {
359
+ v.d = convert<decltype(v.d), T>(vv);
360
+ }
361
+
362
+ template <
363
+ typename T,
364
+ typename std::enable_if<c10::is_complex<T>::value, bool>::type* = nullptr>
365
+ Scalar(T vv, bool) : tag(Tag::HAS_z) {
366
+ v.z = convert<decltype(v.z), T>(vv);
367
+ }
368
+ };
369
+
370
+ using OptionalScalarRef = c10::OptionalRef<Scalar>;
371
+
372
+ // define the scalar.to<int64_t>() specializations
373
+ #define DEFINE_TO(T, name) \
374
+ template <> \
375
+ inline T Scalar::to<T>() const { \
376
+ return to##name(); \
377
+ }
378
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO)
379
+ #undef DEFINE_TO
380
+
381
+ } // namespace c10
382
+
383
+ C10_CLANG_DIAGNOSTIC_POP()
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/typeid.h>
6
+
7
+ // these just expose TypeMeta/ScalarType bridge functions in c10
8
+ // TODO move to typeid.h (or codemod away) when TypeMeta et al
9
+ // are moved from caffe2 to c10 (see note at top of typeid.h)
10
+
11
+ namespace c10 {
12
+
13
+ /**
14
+ * convert ScalarType enum values to TypeMeta handles
15
+ */
16
+ static inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) {
17
+ return caffe2::TypeMeta::fromScalarType(scalar_type);
18
+ }
19
+
20
+ /**
21
+ * convert TypeMeta handles to ScalarType enum values
22
+ */
23
+ static inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
24
+ return dtype.toScalarType();
25
+ }
26
+
27
+ /**
28
+ * typeMetaToScalarType(), lifted to optional
29
+ */
30
+ static inline optional<at::ScalarType> optTypeMetaToScalarType(
31
+ optional<caffe2::TypeMeta> type_meta) {
32
+ if (!type_meta.has_value()) {
33
+ return c10::nullopt;
34
+ }
35
+ return type_meta->toScalarType();
36
+ }
37
+
38
+ /**
39
+ * convenience: equality across TypeMeta/ScalarType conversion
40
+ */
41
+ static inline bool operator==(ScalarType t, caffe2::TypeMeta m) {
42
+ return m.isScalarType(t);
43
+ }
44
+
45
+ static inline bool operator==(caffe2::TypeMeta m, ScalarType t) {
46
+ return t == m;
47
+ }
48
+
49
+ static inline bool operator!=(ScalarType t, caffe2::TypeMeta m) {
50
+ return !(t == m);
51
+ }
52
+
53
+ static inline bool operator!=(caffe2::TypeMeta m, ScalarType t) {
54
+ return !(t == m);
55
+ }
56
+
57
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SingletonSymNodeImpl.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/ConstantSymNodeImpl.h>
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <iostream>
5
+
6
+ namespace c10 {
7
+
8
+ // The motivating usecase for this is to represent the ragged size structure
9
+ // of a jagged tensor [B, [s_0, s_1, s_2], D] as a single integer j0. This
10
+ // allows us to simply return [B, j0, D] if someone queries for the size of our
11
+ // tensor.
12
+ //
13
+ // Morally we define comparison between two singleton ints to return true if
14
+ // that comparison holds for all corresponding elements of the arrays they
15
+ // represent. Comparison between a singleton int and a plain int is defined
16
+ // similarly.
17
+ //
18
+ // To simulate this desired behavior but also avoid the O(N) cost of checking,
19
+ // we associate each raggedness pattern with an integer "id" that can be used as
20
+ // a proxy to evaluate equality. We also constrain the range of values for this
21
+ // as to enable inequality checks.
22
+ //
23
+ // We also support a positive integer scalar "coeff" that is used for computing
24
+ // strides. For example given, a [B, j0, D] tensor, it can be strided in two
25
+ // different ways: [D * j0, D, 1] and [j0, 1, sum(j0)]. The coeff is used to
26
+ // differentiate the two cases.
27
+ //
28
+ // During tracing the strides of the outputs need to be a function of the size
29
+ // and strides of the inputs so it is important that SingletonSymNode itself is
30
+ // able to express this.
31
+ class C10_API SingletonSymNodeImpl : public SymNodeImpl {
32
+ public:
33
+ // CAUTION: you should probably not be constructing these directly; please
34
+ // the higher-level API in python instead (TODO: actually introduce that).
35
+ explicit SingletonSymNodeImpl(int64_t val, int64_t coeff)
36
+ : val_(val), coeff_(coeff) {}
37
+
38
+ bool bool_() override {
39
+ return false;
40
+ }
41
+
42
+ bool is_int() override {
43
+ return true;
44
+ }
45
+
46
+ bool is_float() override {
47
+ return false;
48
+ }
49
+
50
+ bool is_bool() override {
51
+ return false;
52
+ }
53
+
54
+ bool has_hint() override {
55
+ return true;
56
+ }
57
+
58
+ c10::SymNode wrap_int(int64_t num) override {
59
+ return SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(num));
60
+ };
61
+
62
+ int64_t guard_int(const char* file, int64_t line) override {
63
+ TORCH_CHECK(false);
64
+ }
65
+
66
+ double guard_float(const char* file, int64_t line) override {
67
+ TORCH_CHECK(false, "not a float");
68
+ }
69
+
70
+ bool guard_bool(const char* file, int64_t line) override {
71
+ TORCH_CHECK(false, "not a bool");
72
+ }
73
+
74
+ int64_t int_() override {
75
+ TORCH_CHECK(false);
76
+ }
77
+
78
+ std::string str() override {
79
+ if (coeff_ == 1) {
80
+ return "j" + std::to_string(val_);
81
+ }
82
+ return std::to_string(coeff_) + "*j" + std::to_string(val_);
83
+ }
84
+
85
+ // NOTE [ Inequalities with SingletonInt ]
86
+ //
87
+ // The semantics of SingletonInt when it comes to relations is that it is
88
+ // treated as integer known to be within a certain range,
89
+ //
90
+ // j0 \in [2, int64_t::max]
91
+ //
92
+ // allowing us to answer queries like j0 >= 1 (True), and j0 == 0 (False).
93
+ // This is a useful default range for the raggedness pattern of a jagged
94
+ // tensor (1) since sizes are non-negative, and (2) we need to get past 0/1
95
+ // specialization checks.
96
+ //
97
+ // [ Indeterminate inequalities error out ]
98
+ //
99
+ // Given the semantic defined above, certain relations like j0 < 3 are thus
100
+ // indeterminable. In our impl today, evaluating such relations error
101
+ //
102
+ // It may seem convenient to just define indeterminate relations to return
103
+ // False, but the implementation we maintain in parallel using sympy does not
104
+ // allow this.
105
+ //
106
+ // Sympy only allows overriding of Ge. The other relations (Lt, Gt, Le) are,
107
+ // by consequence, all derived from Ge e.g., Lt(a, b) := !Ge(a, b). This
108
+ // would mean that means that if we define the indeterminate j0 >= 3 to be
109
+ // False, the also indeterminate j0 < 3 will be evaluated to be True!
110
+ //
111
+ // [ Coefficient are assumed positive ]
112
+ //
113
+ // For the purpose of computing inequalities, we consider the coefficient of
114
+ // the SingletonInt to be a positive integer.
115
+ //
116
+ // Thus, no modifications are needed to the logic since
117
+ // j0 >= k implies coeff * j0 >= k
118
+ //
119
+ c10::SymNode eq(const c10::SymNode& other) override;
120
+ c10::SymNode ne(const c10::SymNode& other) override;
121
+ c10::SymNode ge(const c10::SymNode& other) override;
122
+ c10::SymNode gt(const c10::SymNode& other) override;
123
+ c10::SymNode lt(const c10::SymNode& other) override;
124
+ c10::SymNode le(const c10::SymNode& other) override;
125
+ c10::SymNode mul(const c10::SymNode& other) override;
126
+
127
+ c10::optional<int64_t> singleton_int() override {
128
+ return val_;
129
+ }
130
+
131
+ c10::optional<int64_t> singleton_coeff() override {
132
+ return coeff_;
133
+ }
134
+
135
+ bool is_symbolic() override {
136
+ return false;
137
+ }
138
+
139
+ #define DEFINE_BINARY_NOT_SUPPORTED(name) \
140
+ c10::SymNode name(const c10::SymNode& other) override { \
141
+ TORCH_CHECK(false, #name " not supported by SingletonSymNode"); \
142
+ }
143
+
144
+ DEFINE_BINARY_NOT_SUPPORTED(add)
145
+ DEFINE_BINARY_NOT_SUPPORTED(sub)
146
+ DEFINE_BINARY_NOT_SUPPORTED(truediv)
147
+ DEFINE_BINARY_NOT_SUPPORTED(pow)
148
+ DEFINE_BINARY_NOT_SUPPORTED(floordiv)
149
+ DEFINE_BINARY_NOT_SUPPORTED(mod)
150
+ DEFINE_BINARY_NOT_SUPPORTED(sym_min)
151
+ DEFINE_BINARY_NOT_SUPPORTED(sym_max)
152
+ DEFINE_BINARY_NOT_SUPPORTED(sym_and)
153
+ DEFINE_BINARY_NOT_SUPPORTED(sym_or)
154
+
155
+ #undef DEFINE_BINARY_NOT_SUPPORTED
156
+
157
+ #define DEFINE_NOT_SUPPORTED(name) \
158
+ c10::SymNode name() override { \
159
+ TORCH_CHECK(false, #name " is not supported by SingletonSymNode"); \
160
+ }
161
+
162
+ DEFINE_NOT_SUPPORTED(sym_not)
163
+ DEFINE_NOT_SUPPORTED(ceil)
164
+ DEFINE_NOT_SUPPORTED(floor)
165
+ DEFINE_NOT_SUPPORTED(neg)
166
+ DEFINE_NOT_SUPPORTED(clone)
167
+ DEFINE_NOT_SUPPORTED(sym_float)
168
+
169
+ #undef DEFINE_NOT_SUPPORTED
170
+
171
+ private:
172
+ int64_t val_;
173
+ int64_t coeff_;
174
+ };
175
+
176
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Storage.h ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/StorageImpl.h>
4
+ #include <c10/util/ExclusivelyOwned.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct Storage;
9
+
10
+ C10_API bool isSharedStorageAlias(
11
+ const Storage& storage0,
12
+ const Storage& storage1);
13
+
14
+ struct C10_API Storage {
15
+ public:
16
+ struct use_byte_size_t {};
17
+ struct unsafe_borrow_t {
18
+ explicit unsafe_borrow_t() = default;
19
+ };
20
+
21
+ Storage() = default;
22
+ Storage(c10::intrusive_ptr<StorageImpl> ptr)
23
+ : storage_impl_(std::move(ptr)) {}
24
+
25
+ // Allocates memory buffer using given allocator and creates a storage with it
26
+ Storage(
27
+ use_byte_size_t /*use_byte_size*/,
28
+ SymInt size_bytes,
29
+ Allocator* allocator = nullptr,
30
+ bool resizable = false)
31
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
32
+ StorageImpl::use_byte_size_t(),
33
+ std::move(size_bytes),
34
+ allocator,
35
+ resizable)) {}
36
+
37
+ // Creates storage with pre-allocated memory buffer. Allocator is given for
38
+ // potential future reallocations, however it can be nullptr if the storage
39
+ // is non-resizable
40
+ Storage(
41
+ use_byte_size_t /*use_byte_size*/,
42
+ size_t size_bytes,
43
+ at::DataPtr data_ptr,
44
+ at::Allocator* allocator = nullptr,
45
+ bool resizable = false)
46
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
47
+ StorageImpl::use_byte_size_t(),
48
+ size_bytes,
49
+ std::move(data_ptr),
50
+ allocator,
51
+ resizable)) {}
52
+
53
+ protected:
54
+ explicit Storage(unsafe_borrow_t, const Storage& rhs)
55
+ : storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
56
+ rhs.storage_impl_.get())) {}
57
+
58
+ friend MaybeOwnedTraits<Storage>;
59
+
60
+ public:
61
+ // Legacy constructor for partially initialized (dtype or memory) storages
62
+ // that can be temporarily created with Caffe2 APIs. See the note on top of
63
+ // TensorImpl.h for details.
64
+ static Storage create_legacy(at::Device device) {
65
+ auto allocator = GetAllocator(device.type());
66
+ return Storage(c10::make_intrusive<StorageImpl>(
67
+ StorageImpl::use_byte_size_t(),
68
+ 0,
69
+ allocator->allocate(0), // materialize a non-default Device.
70
+ allocator,
71
+ true));
72
+ }
73
+
74
+ // Mimic create_legacy, but without requiring a newly-created StorageImpl.
75
+ void reset_legacy() {
76
+ TORCH_CHECK(resizable() && allocator());
77
+ set_nbytes(0);
78
+ set_data_ptr_noswap(allocator()->allocate(0));
79
+ }
80
+
81
+ // TODO: remove later
82
+ void set_nbytes(size_t size_bytes) const {
83
+ storage_impl_.get()->set_nbytes(size_bytes);
84
+ }
85
+
86
+ void set_nbytes(c10::SymInt size_bytes) const {
87
+ storage_impl_.get()->set_nbytes(std::move(size_bytes));
88
+ }
89
+
90
+ bool resizable() const {
91
+ return storage_impl_->resizable();
92
+ }
93
+
94
+ size_t nbytes() const {
95
+ return storage_impl_->nbytes();
96
+ }
97
+
98
+ SymInt sym_nbytes() const {
99
+ return storage_impl_->sym_nbytes();
100
+ }
101
+ // get() use here is to get const-correctness
102
+
103
+ const void* data() const {
104
+ return storage_impl_->data();
105
+ }
106
+
107
+ void* mutable_data() const {
108
+ return storage_impl_->mutable_data();
109
+ }
110
+
111
+ at::DataPtr& mutable_data_ptr() const {
112
+ return storage_impl_->mutable_data_ptr();
113
+ }
114
+
115
+ const at::DataPtr& data_ptr() const {
116
+ return storage_impl_->data_ptr();
117
+ }
118
+
119
+ // Returns the previous data_ptr
120
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const {
121
+ return storage_impl_.get()->set_data_ptr(std::move(data_ptr));
122
+ }
123
+
124
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) const {
125
+ return storage_impl_.get()->set_data_ptr_noswap(std::move(data_ptr));
126
+ }
127
+
128
+ DeviceType device_type() const {
129
+ return storage_impl_->device_type();
130
+ }
131
+
132
+ at::Allocator* allocator() const {
133
+ return storage_impl_.get()->allocator();
134
+ }
135
+
136
+ at::Device device() const {
137
+ return storage_impl_->device();
138
+ }
139
+
140
+ StorageImpl* unsafeReleaseStorageImpl() {
141
+ return storage_impl_.release();
142
+ }
143
+
144
+ StorageImpl* unsafeGetStorageImpl() const noexcept {
145
+ return storage_impl_.get();
146
+ }
147
+
148
+ c10::weak_intrusive_ptr<StorageImpl> getWeakStorageImpl() const {
149
+ return c10::weak_intrusive_ptr<StorageImpl>(storage_impl_);
150
+ }
151
+
152
+ operator bool() const {
153
+ return storage_impl_;
154
+ }
155
+
156
+ size_t use_count() const {
157
+ return storage_impl_.use_count();
158
+ }
159
+
160
+ inline bool unique() const {
161
+ return storage_impl_.unique();
162
+ }
163
+
164
+ bool is_alias_of(const Storage& other) const {
165
+ return (
166
+ storage_impl_ == other.storage_impl_ ||
167
+ isSharedStorageAlias(*this, other));
168
+ }
169
+
170
+ void UniqueStorageShareExternalPointer(
171
+ void* src,
172
+ size_t capacity,
173
+ DeleterFnPtr d = nullptr) {
174
+ if (!storage_impl_.unique()) {
175
+ TORCH_CHECK(
176
+ false,
177
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
178
+ }
179
+ storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
180
+ }
181
+
182
+ void UniqueStorageShareExternalPointer(
183
+ at::DataPtr&& data_ptr,
184
+ size_t capacity) {
185
+ if (!storage_impl_.unique()) {
186
+ TORCH_CHECK(
187
+ false,
188
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
189
+ }
190
+ storage_impl_->UniqueStorageShareExternalPointer(
191
+ std::move(data_ptr), capacity);
192
+ }
193
+
194
+ protected:
195
+ c10::intrusive_ptr<StorageImpl> storage_impl_;
196
+ };
197
+
198
+ template <>
199
+ struct MaybeOwnedTraits<c10::Storage> {
200
+ using owned_type = c10::Storage;
201
+ using borrow_type = c10::Storage;
202
+
203
+ static borrow_type createBorrow(const owned_type& from) {
204
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
205
+ }
206
+
207
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
208
+ lhs.unsafeReleaseStorageImpl();
209
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
210
+ }
211
+
212
+ static void destroyBorrow(borrow_type& toDestroy) {
213
+ toDestroy.unsafeReleaseStorageImpl(); // "leak" it, but it was already +0.
214
+ }
215
+
216
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
217
+ return borrow;
218
+ }
219
+
220
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
221
+ return &borrow;
222
+ }
223
+
224
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
225
+ return true;
226
+ }
227
+ };
228
+
229
+ template <>
230
+ struct ExclusivelyOwnedTraits<c10::Storage> {
231
+ using repr_type = c10::Storage;
232
+ using pointer_type = c10::Storage*;
233
+ using const_pointer_type = const c10::Storage*;
234
+
235
+ static repr_type nullRepr() {
236
+ return c10::Storage();
237
+ }
238
+
239
+ template <class... Args>
240
+ static repr_type createInPlace(Args&&... args) {
241
+ return c10::Storage(std::forward<Args>(args)...);
242
+ }
243
+
244
+ static repr_type moveToRepr(c10::Storage&& x) {
245
+ return std::move(x);
246
+ }
247
+
248
+ static c10::Storage take(c10::Storage& x) {
249
+ return std::move(x);
250
+ }
251
+
252
+ static pointer_type getImpl(repr_type& x) {
253
+ return &x;
254
+ }
255
+
256
+ static const_pointer_type getImpl(const repr_type& x) {
257
+ return &x;
258
+ }
259
+ };
260
+
261
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/SymInt.h>
5
+ #include <c10/core/impl/PyObjectSlot.h>
6
+
7
+ #include <c10/util/intrusive_ptr.h>
8
+
9
+ namespace c10 {
10
+
11
+ // A storage represents the underlying backing data buffer for a
12
+ // tensor. This concept was inherited from the original Torch7
13
+ // codebase; we'd kind of like to get rid of the concept
14
+ // (see https://github.com/pytorch/pytorch/issues/14797) but
15
+ // it's hard work and no one has gotten around to doing it.
16
+ //
17
+ // NB: storage is supposed to uniquely own a data pointer; e.g.,
18
+ // two non-null data pointers alias if and only if they are from
19
+ // the same storage. Technically you can violate this invariant
20
+ // (e.g., you can create a non-owning StorageImpl with at::from_blob)
21
+ // but a lot of things won't work correctly, including:
22
+ //
23
+ // - An ordinary deleter on such a storage is wrong, because normal deleters
24
+ // assume unique ownership, but if you have two storages at the same data,
25
+ // that implies there is some sort of shared ownership. So your deleter would
26
+ // have to actually be internally doing some sort of refcount thing
27
+ // - Deepcopy in Python side relies on storage equality and not data pointer
28
+ // equality; so if there are two separate storages pointing to the same data,
29
+ // the data will actually get duplicated in that case (one data ptr before,
30
+ // two data ptrs after)
31
+ // - Version counts won't work correctly, because we do all VC tracking at the
32
+ // level of storages (unless you explicitly disconnect the VC with detach);
33
+ // mutation because data pointers are the same are totally untracked
34
+ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
35
+ public:
36
+ struct use_byte_size_t {};
37
+
38
+ StorageImpl(
39
+ use_byte_size_t /*use_byte_size*/,
40
+ SymInt size_bytes,
41
+ at::DataPtr data_ptr,
42
+ at::Allocator* allocator,
43
+ bool resizable)
44
+ : data_ptr_(std::move(data_ptr)),
45
+ size_bytes_(std::move(size_bytes)),
46
+ size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()),
47
+ resizable_(resizable),
48
+ received_cuda_(false),
49
+ allocator_(allocator) {
50
+ if (resizable) {
51
+ TORCH_INTERNAL_ASSERT(
52
+ allocator_, "For resizable storage, allocator must be provided");
53
+ }
54
+ }
55
+
56
+ StorageImpl(
57
+ use_byte_size_t /*use_byte_size*/,
58
+ const SymInt& size_bytes,
59
+ at::Allocator* allocator,
60
+ bool resizable)
61
+ : StorageImpl(
62
+ use_byte_size_t(),
63
+ size_bytes,
64
+ size_bytes.is_heap_allocated()
65
+ ? allocator->allocate(0)
66
+ : allocator->allocate(size_bytes.as_int_unchecked()),
67
+ allocator,
68
+ resizable) {}
69
+
70
+ StorageImpl& operator=(StorageImpl&& other) = delete;
71
+ StorageImpl& operator=(const StorageImpl&) = delete;
72
+ StorageImpl() = delete;
73
+ StorageImpl(StorageImpl&& other) = delete;
74
+ StorageImpl(const StorageImpl&) = delete;
75
+ ~StorageImpl() override = default;
76
+
77
+ void reset() {
78
+ data_ptr_.clear();
79
+ size_bytes_ = 0;
80
+ size_bytes_is_heap_allocated_ = false;
81
+ }
82
+
83
+ // Destructor doesn't call release_resources because it's
84
+ // unnecessary; don't forget to change that if needed!
85
+ void release_resources() override {
86
+ data_ptr_.clear();
87
+ }
88
+
89
+ size_t nbytes() const {
90
+ // OK to do this instead of maybe_as_int as nbytes is guaranteed positive
91
+ TORCH_CHECK(!size_bytes_is_heap_allocated_);
92
+ return size_bytes_.as_int_unchecked();
93
+ }
94
+
95
+ SymInt sym_nbytes() const {
96
+ return size_bytes_;
97
+ }
98
+
99
+ // TODO: remove later
100
+ void set_nbytes(size_t size_bytes) {
101
+ size_bytes_ = size_bytes;
102
+ size_bytes_is_heap_allocated_ = false;
103
+ }
104
+
105
+ void set_nbytes(c10::SymInt size_bytes) {
106
+ size_bytes_ = std::move(size_bytes);
107
+ }
108
+
109
+ bool resizable() const {
110
+ return resizable_;
111
+ }
112
+
113
+ at::DataPtr& mutable_data_ptr() {
114
+ return data_ptr_;
115
+ }
116
+
117
+ const at::DataPtr& data_ptr() const {
118
+ return data_ptr_;
119
+ }
120
+
121
+ // Returns the previous data_ptr
122
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) {
123
+ at::DataPtr old_data_ptr(std::move(data_ptr_));
124
+ data_ptr_ = std::move(data_ptr);
125
+ return old_data_ptr;
126
+ }
127
+
128
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) {
129
+ data_ptr_ = std::move(data_ptr);
130
+ }
131
+
132
+ const void* data() const {
133
+ return data_ptr_.get();
134
+ }
135
+
136
+ void* mutable_data() {
137
+ return data_ptr_.mutable_get();
138
+ }
139
+
140
+ at::DeviceType device_type() const {
141
+ return data_ptr_.device().type();
142
+ }
143
+
144
+ at::Allocator* allocator() {
145
+ return allocator_;
146
+ }
147
+
148
+ const at::Allocator* allocator() const {
149
+ return allocator_;
150
+ }
151
+
152
+ // You generally shouldn't use this method, but it is occasionally
153
+ // useful if you want to override how a tensor will be reallocated,
154
+ // after it was already allocated (and its initial allocator was
155
+ // set)
156
+ void set_allocator(at::Allocator* allocator) {
157
+ allocator_ = allocator;
158
+ }
159
+
160
+ Device device() const {
161
+ return data_ptr_.device();
162
+ }
163
+
164
+ void set_resizable(bool resizable) {
165
+ if (resizable) {
166
+ // We need an allocator to be resizable
167
+ AT_ASSERT(allocator_);
168
+ }
169
+ resizable_ = resizable;
170
+ }
171
+
172
+ /**
173
+ * Can only be called when use_count is 1
174
+ */
175
+ void UniqueStorageShareExternalPointer(
176
+ void* src,
177
+ size_t size_bytes,
178
+ DeleterFnPtr d = nullptr) {
179
+ UniqueStorageShareExternalPointer(
180
+ at::DataPtr(src, src, d, data_ptr_.device()), size_bytes);
181
+ }
182
+
183
+ /**
184
+ * Can only be called when use_count is 1
185
+ */
186
+ void UniqueStorageShareExternalPointer(
187
+ at::DataPtr&& data_ptr,
188
+ size_t size_bytes) {
189
+ data_ptr_ = std::move(data_ptr);
190
+ size_bytes_ = size_bytes;
191
+ size_bytes_is_heap_allocated_ = false;
192
+ allocator_ = nullptr;
193
+ resizable_ = false;
194
+ }
195
+
196
+ // This method can be used only after storage construction and cannot be used
197
+ // to modify storage status
198
+ void set_received_cuda(bool received_cuda) {
199
+ received_cuda_ = received_cuda;
200
+ }
201
+
202
+ bool received_cuda() {
203
+ return received_cuda_;
204
+ }
205
+
206
+ impl::PyObjectSlot* pyobj_slot() {
207
+ return &pyobj_slot_;
208
+ }
209
+
210
+ const impl::PyObjectSlot* pyobj_slot() const {
211
+ return &pyobj_slot_;
212
+ }
213
+
214
+ private:
215
+ DataPtr data_ptr_;
216
+ SymInt size_bytes_;
217
+ bool size_bytes_is_heap_allocated_;
218
+ bool resizable_;
219
+ // Identifies that Storage was received from another process and doesn't have
220
+ // local to process cuda memory allocation
221
+ bool received_cuda_;
222
+ Allocator* allocator_;
223
+ impl::PyObjectSlot pyobj_slot_;
224
+ };
225
+
226
+ // Declare StorageImpl create function pointer types.
227
+ using StorageImplCreateHelper = intrusive_ptr<StorageImpl> (*)(
228
+ StorageImpl::use_byte_size_t,
229
+ SymInt size_bytes,
230
+ Allocator* allocator,
231
+ bool resizable);
232
+
233
+ C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr);
234
+
235
+ C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t);
236
+
237
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Stream.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+
5
+ namespace c10 {
6
+
7
+ /// An index representing a specific stream. A StreamId is not independently
8
+ /// meaningful without knowing the Device it is associated with; try to
9
+ /// use Stream rather than StreamId directly.
10
+ ///
11
+ /// StreamIds are opaque; they are assigned by some DeviceType-specific
12
+ /// numbering system which is not visible to the user. HOWEVER, we
13
+ /// guarantee that StreamId 0 is always a valid stream, and corresponds
14
+ /// to some sort of "default" stream.
15
+ using StreamId = int64_t;
16
+
17
+ struct C10_API StreamData3 {
18
+ StreamId stream_id;
19
+ DeviceIndex device_index;
20
+ DeviceType device_type;
21
+ };
22
+
23
+ // NB: I decided not to call the above StreamIndex to avoid confusion with
24
+ // DeviceIndex. This way, you access device index with index(), and stream id
25
+ // with id()
26
+
27
+ /**
28
+ * A stream is a software mechanism used to synchronize launched kernels
29
+ * without requiring explicit synchronizations between kernels. The basic
30
+ * model is that every kernel launch is associated with a stream: every
31
+ * kernel on the same stream is implicitly synchronized so that if I launch
32
+ * kernels A and B on the same stream, A is guaranteed to finish before B
33
+ * launches. If I want B to run concurrently with A, I must schedule
34
+ * it on a different stream.
35
+ *
36
+ * The Stream class is a backend agnostic value class representing a stream
37
+ * which I may schedule a kernel on. Every stream is associated with a device,
38
+ * which is recorded in stream, which is used to avoid confusion about which
39
+ * device a stream refers to.
40
+ *
41
+ * Streams are explicitly thread-safe, in the sense that it is OK to pass
42
+ * a Stream from one thread to another, and kernels queued from two different
43
+ * threads will still get serialized appropriately. (Of course, the
44
+ * time when the kernels get queued is undetermined unless you synchronize
45
+ * host side ;)
46
+ *
47
+ * Stream does NOT have a default constructor. Streams are for expert
48
+ * users; if you want to use Streams, we're going to assume you know
49
+ * how to deal with C++ template error messages if you try to
50
+ * resize() a vector of Streams.
51
+ *
52
+ * Known instances of streams in backends:
53
+ *
54
+ * - cudaStream_t (CUDA)
55
+ * - hipStream_t (HIP)
56
+ * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration
57
+ * does NOT support command queues.)
58
+ *
59
+ * Because this class is device agnostic, it cannot provide backend-specific
60
+ * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are
61
+ * wrapper classes which provide this functionality, e.g., CUDAStream.
62
+ */
63
+ class C10_API Stream final {
64
+ private:
65
+ Device device_;
66
+ StreamId id_;
67
+
68
+ public:
69
+ enum Unsafe { UNSAFE };
70
+ enum Default { DEFAULT };
71
+
72
+ /// Unsafely construct a stream from a Device and a StreamId. In
73
+ /// general, only specific implementations of streams for a
74
+ /// backend should manufacture Stream directly in this way; other users
75
+ /// should use the provided APIs to get a stream. In particular,
76
+ /// we don't require backends to give any guarantees about non-zero
77
+ /// StreamIds; they are welcome to allocate in whatever way they like.
78
+ explicit Stream(Unsafe, Device device, StreamId id)
79
+ : device_(device), id_(id) {}
80
+
81
+ /// Construct the default stream of a Device. The default stream is
82
+ /// NOT the same as the current stream; default stream is a fixed stream
83
+ /// that never changes, whereas the current stream may be changed by
84
+ /// StreamGuard.
85
+ explicit Stream(Default, Device device) : device_(device), id_(0) {}
86
+
87
+ bool operator==(const Stream& other) const noexcept {
88
+ return this->device_ == other.device_ && this->id_ == other.id_;
89
+ }
90
+ bool operator!=(const Stream& other) const noexcept {
91
+ return !(*this == other);
92
+ }
93
+
94
+ Device device() const noexcept {
95
+ return device_;
96
+ }
97
+ DeviceType device_type() const noexcept {
98
+ return device_.type();
99
+ }
100
+ DeviceIndex device_index() const noexcept {
101
+ return device_.index();
102
+ }
103
+ StreamId id() const noexcept {
104
+ return id_;
105
+ }
106
+
107
+ // Enqueues a wait instruction in the stream's work queue.
108
+ // This instruction is a no-op unless the event is marked
109
+ // for recording. In that case the stream stops processing
110
+ // until the event is recorded.
111
+ template <typename T>
112
+ void wait(const T& event) const {
113
+ event.block(*this);
114
+ }
115
+
116
+ // Return whether all asynchronous work previously enqueued on this stream
117
+ // has completed running on the device.
118
+ bool query() const;
119
+
120
+ // Wait (by blocking the calling thread) until all asynchronous work enqueued
121
+ // on this stream has completed running on the device.
122
+ void synchronize() const;
123
+
124
+ // The purpose of this function is to more conveniently permit binding
125
+ // of Stream to and from Python. Without packing, I have to setup a whole
126
+ // class with two fields (device and stream id); with packing I can just
127
+ // store a single uint64_t.
128
+ //
129
+ // The particular way we pack streams into a uint64_t is considered an
130
+ // implementation detail and should not be relied upon.
131
+ uint64_t hash() const noexcept {
132
+ // Concat these together into a 64-bit integer
133
+ uint64_t bits = static_cast<uint64_t>(device_type()) << 56 |
134
+ static_cast<uint64_t>(device_index()) << 48 |
135
+ // Remove the sign extension part of the 64-bit address because
136
+ // the id might be used to hold a pointer.
137
+ (static_cast<uint64_t>(id()) & ((1ull << 48) - 1));
138
+ return bits;
139
+ }
140
+
141
+ struct StreamData3 pack3() const {
142
+ return {id(), device_index(), device_type()};
143
+ }
144
+
145
+ static Stream unpack3(
146
+ StreamId stream_id,
147
+ DeviceIndex device_index,
148
+ DeviceType device_type) {
149
+ TORCH_CHECK(isValidDeviceType(device_type));
150
+ return Stream(UNSAFE, Device(device_type, device_index), stream_id);
151
+ }
152
+
153
+ // I decided NOT to provide setters on this class, because really,
154
+ // why would you change the device of a stream? Just construct
155
+ // it correctly from the beginning dude.
156
+ };
157
+
158
+ C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s);
159
+
160
+ } // namespace c10
161
+
162
+ namespace std {
163
+ template <>
164
+ struct hash<c10::Stream> {
165
+ size_t operator()(c10::Stream s) const noexcept {
166
+ return std::hash<uint64_t>{}(s.hash());
167
+ }
168
+ };
169
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+
9
+ #include <limits>
10
+
11
+ namespace c10 {
12
+
13
+ // NB: this is actually double precision; we're using the Python naming here
14
+ class C10_API SymFloat {
15
+ public:
16
+ /*implicit*/ SymFloat(double d) : data_(d){};
17
+ SymFloat(SymNode ptr)
18
+ : data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
19
+ TORCH_CHECK(ptr_->is_float());
20
+ };
21
+ SymFloat() : data_(0.0) {}
22
+
23
+ SymNodeImpl* toSymNodeImplUnowned() const {
24
+ return ptr_.get();
25
+ }
26
+
27
+ SymNodeImpl* release() && {
28
+ return std::move(ptr_).release();
29
+ }
30
+
31
+ // Only valid if is_symbolic()
32
+ SymNode toSymNodeImpl() const;
33
+
34
+ // Guaranteed to return a SymNode, wrapping using base if necessary
35
+ SymNode wrap_node(const SymNode& base) const;
36
+
37
+ double expect_float() const {
38
+ TORCH_CHECK(!is_symbolic());
39
+ return data_;
40
+ }
41
+
42
+ SymFloat operator+(const SymFloat&) const;
43
+ SymFloat operator-(const SymFloat&) const;
44
+ SymFloat operator*(const SymFloat&) const;
45
+ SymFloat operator/(const SymFloat&) const;
46
+
47
+ SymBool sym_eq(const SymFloat&) const;
48
+ SymBool sym_ne(const SymFloat&) const;
49
+ SymBool sym_lt(const SymFloat&) const;
50
+ SymBool sym_le(const SymFloat&) const;
51
+ SymBool sym_gt(const SymFloat&) const;
52
+ SymBool sym_ge(const SymFloat&) const;
53
+
54
+ bool operator==(const SymFloat& o) const {
55
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
56
+ }
57
+ bool operator!=(const SymFloat& o) const {
58
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
59
+ }
60
+ bool operator<(const SymFloat& o) const {
61
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
62
+ }
63
+ bool operator<=(const SymFloat& o) const {
64
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
65
+ }
66
+ bool operator>(const SymFloat& o) const {
67
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
68
+ }
69
+ bool operator>=(const SymFloat& o) const {
70
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
71
+ }
72
+
73
+ SymFloat min(const SymFloat& sci) const;
74
+ SymFloat max(const SymFloat& sci) const;
75
+
76
+ // Need guidance on where to put this code
77
+ SymFloat sqrt() const;
78
+
79
+ // Insert a guard for the float to be its concrete value, and then return
80
+ // that value. This operation always works, even if the float is symbolic,
81
+ // so long as we know what the underlying value is. Don't blindly put this
82
+ // everywhere; you can cause overspecialization of PyTorch programs with
83
+ // this method.
84
+ //
85
+ // It should be called as guard_float(__FILE__, __LINE__). The file and line
86
+ // number can be used to diagnose overspecialization.
87
+ double guard_float(const char* file, int64_t line) const;
88
+
89
+ bool has_hint() const;
90
+
91
+ // N.B. It's important to keep this definition in the header
92
+ // as we expect if checks to be folded for mobile builds
93
+ // where `is_symbolic` is always false
94
+ C10_ALWAYS_INLINE bool is_symbolic() const {
95
+ return ptr_;
96
+ }
97
+
98
+ double as_float_unchecked() const {
99
+ return data_;
100
+ }
101
+
102
+ private:
103
+ // TODO: optimize to union
104
+ double data_;
105
+ SymNode ptr_;
106
+ };
107
+
108
+ C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
109
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ #include <numeric>
10
+ #include <type_traits>
11
+
12
+ namespace c10 {
13
+
14
+ class SymFloat;
15
+
16
+ // SymInt represents either a regular int64_t, or a symbolic integer
17
+ // (represented in a type erased way as SymNode). The intention is for SymInt
18
+ // to represent symbolic sizes that arise when doing shape computation in
19
+ // operator kernels. This allows for tracing through programs without baking in
20
+ // concrete sizes into kernel calls.
21
+ //
22
+ // SymInt has an API equivalent to int64_t. In particular, it is a value type.
23
+ // Internally, SymInt is represented in a clever packed way, so that it only
24
+ // occupies one word of space; but morally, it is a union between an int64_t
25
+ // and an intrusive pointer to SymNodeImpl.
26
+ //
27
+ // Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
28
+ // is_int() returns true
29
+
30
+ class C10_API SymInt {
31
+ public:
32
+ enum Unchecked {
33
+ UNCHECKED,
34
+ };
35
+
36
+ /*implicit*/ SymInt(int64_t d) : data_(d) {
37
+ if (is_heap_allocated()) {
38
+ // Large negative number, heap allocate it
39
+ promote_to_negative();
40
+ }
41
+ };
42
+ SymInt() : data_(0) {}
43
+ SymInt(SymNode n);
44
+
45
+ // unchecked c-tor accepting raw `data_`
46
+ // One appropriate use for this is when you are constructing a symint
47
+ // in a situation where you know it is non-negative (or, if it is negative,
48
+ // the negative value is -1; i.e., not user controlled)
49
+ SymInt(Unchecked, int64_t d) : data_(d) {}
50
+
51
+ // TODO: these implementations are not optimal because they allocate a
52
+ // temporary and then use the move constructor/assignment
53
+ SymInt(const SymInt& s) : data_(0) {
54
+ if (s.is_heap_allocated()) {
55
+ *this = SymInt(s.toSymNode());
56
+ } else {
57
+ data_ = s.data_;
58
+ }
59
+ }
60
+ SymInt(SymInt&& s) noexcept : data_(s.data_) {
61
+ s.data_ = 0;
62
+ }
63
+
64
+ SymInt& operator=(const SymInt& s) {
65
+ if (this != &s) {
66
+ if (s.is_heap_allocated()) {
67
+ *this = SymInt(s.toSymNode());
68
+ } else {
69
+ data_ = s.data_;
70
+ }
71
+ }
72
+ return *this;
73
+ }
74
+ SymInt& operator=(SymInt&& s) noexcept {
75
+ if (this != &s) {
76
+ release_(); // release the current SymNode if any
77
+ data_ = s.data_;
78
+ if (s.is_heap_allocated())
79
+ s.data_ = 0;
80
+ };
81
+ return *this;
82
+ }
83
+
84
+ SymNodeImpl* toSymNodeImplUnowned() const {
85
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
86
+ uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
87
+ uint64_t sign_bit_mask = 1ULL << (62 - 1);
88
+ // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
89
+ uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
90
+ return static_cast<SymNodeImpl*>(
91
+ reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
92
+ }
93
+
94
+ void release_() {
95
+ if (is_heap_allocated()) {
96
+ SymNode::reclaim(toSymNodeImplUnowned()); // steal
97
+ }
98
+ }
99
+
100
+ SymNodeImpl* release() && {
101
+ #ifndef C10_MOBILE
102
+ TORCH_INTERNAL_ASSERT(is_heap_allocated());
103
+ auto* r = toSymNodeImplUnowned();
104
+ data_ = 0; // transfer ownership
105
+ return r;
106
+ #else
107
+ TORCH_INTERNAL_ASSERT(false);
108
+ #endif
109
+ }
110
+
111
+ // Only valid if is_heap_allocated()
112
+ SymNode toSymNode() const;
113
+
114
+ // Guaranteed to return a SymNode, wrapping using base if necessary
115
+ SymNode wrap_node(const SymNode& base) const;
116
+
117
+ ~SymInt() {
118
+ release_();
119
+ }
120
+
121
+ // Require the int to be non-symbolic, and if it is symbolic raise an
122
+ // error. This is safe to use for C++ code that doesn't work for symbolic
123
+ // shapes, and you don't have time to fix it immediately, as if we
124
+ // try to trigger the path in C++ you'll appropriately get an error
125
+ int64_t expect_int() const {
126
+ if (auto r = maybe_as_int()) {
127
+ return *r;
128
+ }
129
+ TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(
130
+ false, "when unpacking SymInt, expected int but got ", *this);
131
+ }
132
+
133
+ // Test if we have a hint for this int (e.g., guard_int would work).
134
+ // Most of the time this is true; it is only false when you have
135
+ // an unbacked SymInt.
136
+ bool has_hint() const;
137
+
138
+ // Insert a guard for the int to be its concrete value, and then return
139
+ // that value. This operation always works, even if the int is symbolic,
140
+ // so long as we know what the underlying value is (e.g., this won't work
141
+ // if you call it on the size of nonzero output). Don't blindly put this
142
+ // everywhere; you can cause overspecialization of PyTorch programs with
143
+ // this method.
144
+ //
145
+ // It should be called as guard_int(__FILE__, __LINE__). The file and line
146
+ // number can be used to diagnose overspecialization.
147
+ int64_t guard_int(const char* file, int64_t line) const;
148
+
149
+ // Insert a guard that this SymInt must be size-like, returning true if
150
+ // the integer actually is >= 0. Unlike manually performing a >= 0 test,
151
+ // if the SymInt in question is an unbacked SymInt (or, potentially in the
152
+ // future, if it contains unbacked SymInts), we will also treat the
153
+ // unbacked SymInt as statically testing >= 2 (which will prevent us from
154
+ // choking on, e.g., contiguity checks.)
155
+ bool expect_size(const char* file, int64_t line) const;
156
+
157
+ // Distinguish actual symbolic values from constants stored on the heap
158
+ bool is_symbolic() const {
159
+ return is_heap_allocated() &&
160
+ !toSymNodeImplUnowned()->constant_int().has_value();
161
+ }
162
+
163
+ // N.B. It's important to keep this definition in the header
164
+ // as we expect if checks to be folded for mobile builds
165
+ // where `is_heap_allocated` is always false and optimize dead code paths
166
+ C10_ALWAYS_INLINE bool is_heap_allocated() const {
167
+ #ifdef C10_MOBILE
168
+ return false;
169
+ #else
170
+ return !check_range(data_);
171
+ #endif
172
+ }
173
+
174
+ SymInt operator+(const SymInt& sci) const;
175
+ SymInt operator-(const SymInt& sci) const;
176
+ SymInt operator*(const SymInt& sci) const;
177
+ SymInt operator/(const SymInt& sci) const;
178
+ SymInt operator%(const SymInt& sci) const;
179
+ void operator*=(const SymInt& sci);
180
+ void operator+=(const SymInt& sci);
181
+ void operator/=(const SymInt& sci);
182
+
183
+ SymInt clone() const;
184
+
185
+ SymBool sym_eq(const SymInt&) const;
186
+ SymBool sym_ne(const SymInt&) const;
187
+ SymBool sym_lt(const SymInt&) const;
188
+ SymBool sym_le(const SymInt&) const;
189
+ SymBool sym_gt(const SymInt&) const;
190
+ SymBool sym_ge(const SymInt&) const;
191
+
192
+ bool operator==(const SymInt& o) const {
193
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
194
+ }
195
+ bool operator!=(const SymInt& o) const {
196
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
197
+ }
198
+ bool operator<(const SymInt& o) const {
199
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
200
+ }
201
+ bool operator<=(const SymInt& o) const {
202
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
203
+ }
204
+ bool operator>(const SymInt& o) const {
205
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
206
+ }
207
+ bool operator>=(const SymInt& o) const {
208
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
209
+ }
210
+
211
+ SymInt min(const SymInt& sci) const;
212
+ SymInt max(const SymInt& sci) const;
213
+
214
+ // If both are symbolic, this checks if
215
+ // they share the same node.
216
+ // If both are not symbolic this just checks normal equality.
217
+ bool is_same(const SymInt& other) const;
218
+
219
+ operator SymFloat() const;
220
+
221
+ // Don't use this. Prefer maybe_as_int instead
222
+ int64_t as_int_unchecked() const {
223
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
224
+ return data_;
225
+ }
226
+
227
+ c10::optional<int64_t> maybe_as_int() const {
228
+ if (!is_heap_allocated()) {
229
+ return c10::make_optional(data_);
230
+ }
231
+ auto* node = toSymNodeImplUnowned();
232
+ if (auto c = node->constant_int()) {
233
+ return c;
234
+ }
235
+ return node->maybe_as_int();
236
+ }
237
+
238
+ // Return whether the integer is directly coercible to a SymInt
239
+ // without requiring heap allocation. You don't need to use this
240
+ // to check if you can pass an integer to SymInt; this is guaranteed
241
+ // to work (it just might heap allocate!)
242
+ static bool check_range(int64_t i) {
243
+ return i > MAX_UNREPRESENTABLE_INT;
244
+ }
245
+
246
+ // Return the min representable integer as a SymInt without
247
+ // heap allocation. For quantities that count bytes (or larger),
248
+ // this is still much larger than you need, so you may consider
249
+ // using this as a more efficient version of MIN_INT
250
+ static constexpr int64_t min_representable_int() {
251
+ return MAX_UNREPRESENTABLE_INT + 1;
252
+ }
253
+
254
+ private:
255
+ void promote_to_negative();
256
+
257
+ // Constraints on the internal representation:
258
+ //
259
+ // - Should represent positive and small negative ints
260
+ // - No conversion necessary for operations on ints
261
+ // - Must represent valid 64-bit pointers
262
+ // - Is symbolic test should be FAST (two arithmetic instructions is too
263
+ // much).
264
+ // This code being a hotpath is based on Strobelight profiles of
265
+ // is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
266
+ // (you will need to change the time window).
267
+ //
268
+ // So, the scheme is to reserve large negative numbers (assuming
269
+ // two's complement):
270
+ //
271
+ // - 0b0.... means we are a positive int
272
+ // - 0b11... means we are a small negative int
273
+ // - 0b10... means we are are a pointer. This means that
274
+ // [-2^63, -2^62-1] are not representable as ints.
275
+ // We don't actually need all of this space as on x86_64
276
+ // as the top 16bits aren't used for anything
277
+ static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
278
+ static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
279
+ // We must manually translate the bit pattern test into a greater
280
+ // than test because compiler doesn't figure it out:
281
+ // https://godbolt.org/z/356aferaW
282
+ static constexpr int64_t MAX_UNREPRESENTABLE_INT =
283
+ -1LL & static_cast<int64_t>(~(1ULL << 62));
284
+ int64_t data_;
285
+ };
286
+
287
+ /// Sum of a list of SymInt; accumulates into the c10::SymInt expression
288
+ template <
289
+ typename C,
290
+ typename std::enable_if<
291
+ std::is_same<typename C::value_type, c10::SymInt>::value,
292
+ int>::type = 0>
293
+ inline c10::SymInt multiply_integers(const C& container) {
294
+ return std::accumulate(
295
+ container.begin(),
296
+ container.end(),
297
+ c10::SymInt(1),
298
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
299
+ }
300
+
301
+ template <
302
+ typename Iter,
303
+ typename = std::enable_if_t<std::is_same<
304
+ typename std::iterator_traits<Iter>::value_type,
305
+ c10::SymInt>::value>>
306
+ inline c10::SymInt multiply_integers(Iter begin, Iter end) {
307
+ return std::accumulate(
308
+ begin,
309
+ end,
310
+ c10::SymInt(1),
311
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
312
+ }
313
+
314
+ #define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
315
+ C10_API RetTy operator%(const SymInt& a, scalar_t b); \
316
+ C10_API RetTy operator%(scalar_t a, const SymInt& b);
317
+
318
+ #define DECLARE_SYMINT_OP(scalar_t, RetTy) \
319
+ C10_API RetTy operator+(const SymInt& a, scalar_t b); \
320
+ C10_API RetTy operator-(const SymInt& a, scalar_t b); \
321
+ C10_API RetTy operator*(const SymInt& a, scalar_t b); \
322
+ C10_API RetTy operator/(const SymInt& a, scalar_t b); \
323
+ C10_API RetTy operator+(scalar_t a, const SymInt& b); \
324
+ C10_API RetTy operator-(scalar_t a, const SymInt& b); \
325
+ C10_API RetTy operator*(scalar_t a, const SymInt& b); \
326
+ C10_API RetTy operator/(scalar_t a, const SymInt& b); \
327
+ C10_API bool operator==(const SymInt& a, scalar_t b); \
328
+ C10_API bool operator!=(const SymInt& a, scalar_t b); \
329
+ C10_API bool operator<(const SymInt& a, scalar_t b); \
330
+ C10_API bool operator<=(const SymInt& a, scalar_t b); \
331
+ C10_API bool operator>(const SymInt& a, scalar_t b); \
332
+ C10_API bool operator>=(const SymInt& a, scalar_t b); \
333
+ C10_API bool operator==(scalar_t a, const SymInt& b); \
334
+ C10_API bool operator!=(scalar_t a, const SymInt& b); \
335
+ C10_API bool operator<(scalar_t a, const SymInt& b); \
336
+ C10_API bool operator<=(scalar_t a, const SymInt& b); \
337
+ C10_API bool operator>(scalar_t a, const SymInt& b); \
338
+ C10_API bool operator>=(scalar_t a, const SymInt& b);
339
+
340
+ DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
341
+ DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
342
+ DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
343
+ DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
344
+ DECLARE_SYMINT_OP(int64_t, SymInt)
345
+ DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
346
+ DECLARE_SYMINT_OP(uint64_t, SymInt)
347
+ DECLARE_SYMINT_OP(uint32_t, SymInt)
348
+ DECLARE_SYMINT_OP(double, SymFloat)
349
+ DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
350
+
351
+ // On OSX size_t is different than uint64_t so we have to
352
+ // define it separately
353
+ #if defined(__APPLE__)
354
+ DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
355
+ DECLARE_SYMINT_OP(size_t, SymInt)
356
+ #endif
357
+
358
+ #undef DECLARE_SYMINT_OP
359
+
360
+ C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
361
+ C10_API SymInt operator-(const SymInt& s);
362
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+
8
+ namespace c10 {
9
+ using SymIntArrayRef = ArrayRef<SymInt>;
10
+
11
+ inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) {
12
+ return IntArrayRef(reinterpret_cast<const int64_t*>(ar.data()), ar.size());
13
+ }
14
+
15
+ // TODO: a SymIntArrayRef containing a heap allocated large negative integer
16
+ // can actually technically be converted to an IntArrayRef... but not with
17
+ // the non-owning API we have here. We can't reinterpet cast; we have to
18
+ // allocate another buffer and write the integers into it. If you need it,
19
+ // we can do it. But I don't think you need it.
20
+
21
+ inline c10::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
22
+ c10::SymIntArrayRef ar) {
23
+ for (const c10::SymInt& sci : ar) {
24
+ if (sci.is_heap_allocated()) {
25
+ return c10::nullopt;
26
+ }
27
+ }
28
+
29
+ return {asIntArrayRefUnchecked(ar)};
30
+ }
31
+
32
+ inline at::IntArrayRef asIntArrayRefSlow(
33
+ c10::SymIntArrayRef ar,
34
+ const char* file,
35
+ int64_t line) {
36
+ for (const c10::SymInt& sci : ar) {
37
+ TORCH_CHECK(
38
+ !sci.is_heap_allocated(),
39
+ file,
40
+ ":",
41
+ line,
42
+ ": SymIntArrayRef expected to contain only concrete integers");
43
+ }
44
+ return asIntArrayRefUnchecked(ar);
45
+ }
46
+
47
+ #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__)
48
+
49
+ // Prefer using a more semantic constructor, like
50
+ // fromIntArrayRefKnownNonNegative
51
+ inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) {
52
+ return SymIntArrayRef(
53
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
54
+ }
55
+
56
+ inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) {
57
+ return fromIntArrayRefUnchecked(array_ref);
58
+ }
59
+
60
+ inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) {
61
+ for (long i : array_ref) {
62
+ TORCH_CHECK(
63
+ SymInt::check_range(i),
64
+ "IntArrayRef contains an int that cannot be represented as a SymInt: ",
65
+ i);
66
+ }
67
+ return SymIntArrayRef(
68
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
69
+ }
70
+
71
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/util/DimVector.h>
5
+
6
+ #include <atomic>
7
+ #include <mutex>
8
+
9
+ namespace c10 {
10
+
11
+ class C10_API SymbolicShapeMeta {
12
+ public:
13
+ // Basic metadata from which other quantities are derived
14
+ SymDimVector sizes_ = {0};
15
+ SymDimVector strides_ = {1};
16
+ SymInt storage_offset_ = 0;
17
+
18
+ bool strides_valid_ = true; // e.g. for sparse where there are no strides
19
+
20
+ SymbolicShapeMeta() = default;
21
+ SymbolicShapeMeta(const SymbolicShapeMeta& other);
22
+
23
+ void refresh_numel() {
24
+ // Non-const, don't need to hold mutables_ lock
25
+ available_.fetch_and(~numel_avail);
26
+ numel_ = 1;
27
+ }
28
+
29
+ void refresh_contiguous() {
30
+ // Non-const, don't need to hold mutables_ lock
31
+ available_.fetch_and(numel_avail);
32
+ is_contiguous_ = false;
33
+ is_channels_last_contiguous_ = false;
34
+ is_channels_last_3d_contiguous_ = false;
35
+ is_channels_last_ = false;
36
+ is_channels_last_3d_ = false;
37
+ is_non_overlapping_and_dense_ = false;
38
+ }
39
+
40
+ int64_t dim() const {
41
+ return static_cast<int64_t>(sizes_.size());
42
+ }
43
+
44
+ // Accessors for derived quantities, computed lazily on first access
45
+
46
+ bool has_numel() const {
47
+ return available_.load() & numel_avail;
48
+ }
49
+ bool has_is_contiguous() const {
50
+ return available_.load() & is_contiguous_avail;
51
+ }
52
+ bool has_is_channels_last_contiguous() const {
53
+ return available_.load() & is_channels_last_contiguous_avail;
54
+ }
55
+ bool has_is_channels_last_3d_contiguous() const {
56
+ return available_.load() & is_channels_last_3d_contiguous_avail;
57
+ }
58
+ bool has_is_channels_last() const {
59
+ return available_.load() & is_channels_last_avail;
60
+ }
61
+ bool has_is_channels_last_3d() const {
62
+ return available_.load() & is_channels_last_3d_avail;
63
+ }
64
+ bool has_is_non_overlapping_and_dense() const {
65
+ return available_.load() & is_non_overlapping_and_dense_avail;
66
+ }
67
+
68
+ // Accessors to cached derived properties
69
+ // DO NOT call with mutables_ lock held
70
+ const SymInt& numel() const {
71
+ if (C10_UNLIKELY(!has_numel())) {
72
+ init_numel();
73
+ }
74
+ return numel_;
75
+ }
76
+
77
+ const SymBool& is_contiguous() const {
78
+ if (C10_UNLIKELY(!has_is_contiguous())) {
79
+ init_is_contiguous();
80
+ }
81
+ return is_contiguous_;
82
+ }
83
+
84
+ const SymBool& is_channels_last_contiguous() const {
85
+ if (C10_UNLIKELY(!has_is_channels_last_contiguous())) {
86
+ init_is_channels_last_contiguous();
87
+ }
88
+ return is_channels_last_contiguous_;
89
+ }
90
+
91
+ const SymBool& is_channels_last_3d_contiguous() const {
92
+ if (C10_UNLIKELY(!has_is_channels_last_3d_contiguous())) {
93
+ init_is_channels_last_3d_contiguous();
94
+ }
95
+ return is_channels_last_3d_contiguous_;
96
+ }
97
+
98
+ const SymBool& is_channels_last() const {
99
+ if (C10_UNLIKELY(!has_is_channels_last())) {
100
+ init_is_channels_last();
101
+ }
102
+ return is_channels_last_;
103
+ }
104
+
105
+ const SymBool& is_channels_last_3d() const {
106
+ if (C10_UNLIKELY(!has_is_channels_last_3d())) {
107
+ init_is_channels_last_3d();
108
+ }
109
+ return is_channels_last_3d_;
110
+ }
111
+
112
+ const SymBool& is_non_overlapping_and_dense() const {
113
+ if (C10_UNLIKELY(!has_is_non_overlapping_and_dense())) {
114
+ init_is_non_overlapping_and_dense();
115
+ }
116
+ return is_non_overlapping_and_dense_;
117
+ }
118
+
119
+ // Assumptions so we can short-circuit computation
120
+ // NOTE: Don't need to lock mutables_ since these aren't const
121
+ void assume_contiguous(SymBool val = true) {
122
+ is_contiguous_ = std::move(val);
123
+ available_.fetch_or(is_contiguous_avail);
124
+ }
125
+ void assume_channels_last_contiguous(SymBool val = true) {
126
+ is_contiguous_ = std::move(val);
127
+ available_.fetch_or(is_channels_last_contiguous_avail);
128
+ }
129
+ void assume_channels_last_3d_contiguous(SymBool val = true) {
130
+ is_channels_last_3d_contiguous_ = std::move(val);
131
+ available_.fetch_or(is_channels_last_3d_contiguous_avail);
132
+ }
133
+ void assume_channels_last(SymBool val = true) {
134
+ is_channels_last_ = std::move(val);
135
+ available_.fetch_or(is_channels_last_avail);
136
+ }
137
+ void assume_channels_last_3d(SymBool val = true) {
138
+ is_channels_last_3d_ = std::move(val);
139
+ available_.fetch_or(is_channels_last_3d_avail);
140
+ }
141
+ void assume_non_overlapping_and_dense(SymBool val = true) {
142
+ is_non_overlapping_and_dense_ = std::move(val);
143
+ available_.fetch_or(is_non_overlapping_and_dense_avail);
144
+ }
145
+
146
+ private:
147
+ SymBool compute_contiguous() const;
148
+ SymBool compute_channels_last_contiguous_2d() const;
149
+ SymBool compute_channels_last_contiguous_3d() const;
150
+ SymBool compute_strides_like_channels_last_2d() const;
151
+ SymBool compute_strides_like_channels_last_3d() const;
152
+ SymBool compute_non_overlapping_and_dense() const;
153
+
154
+ // These are little wrappers over the real compute_ functions that
155
+ // can make use of other contiguity fields to short circuit.
156
+ // They need to be implemented separately for SymBool, as SymBool does
157
+ // not short circuit.
158
+ // TODO: should the SymBool cases avoid the short circuit? Need to reason
159
+ // if its correct, and reason if the simpler expressions are better for
160
+ // analysis (maybe not!)
161
+
162
+ SymBool compute_channels_last_contiguous_3d_dim5() const;
163
+ SymBool compute_channels_last_2d_dim5() const;
164
+ SymBool compute_channels_last_3d_dim5() const;
165
+ SymBool compute_is_non_overlapping_and_dense_dim4() const;
166
+ SymBool compute_is_non_overlapping_and_dense_dim5() const;
167
+ SymBool compute_is_non_overlapping_and_dense_anydim() const;
168
+
169
+ void init_numel() const;
170
+ void init_is_contiguous() const;
171
+ void init_is_channels_last_contiguous() const;
172
+ void init_is_channels_last_3d_contiguous() const;
173
+ void init_is_channels_last() const;
174
+ void init_is_channels_last_3d() const;
175
+ void init_is_non_overlapping_and_dense() const;
176
+
177
+ // NOTE: These only set if !has_foo()
178
+ void set_numel(SymInt val) const;
179
+ void set_is_contiguous(SymBool val) const;
180
+ void set_is_channels_last_contiguous(SymBool val) const;
181
+ void set_is_channels_last_3d_contiguous(SymBool val) const;
182
+ void set_is_channels_last(SymBool val) const;
183
+ void set_is_channels_last_3d(SymBool val) const;
184
+ void set_is_non_overlapping_and_dense(SymBool val) const;
185
+
186
+ // Lazily initialized variables, with the corresponding available_ flag
187
+ // indicating whether the value has been initialized
188
+ mutable std::atomic<int> available_{0};
189
+ enum avail {
190
+ numel_avail = 1 << 0,
191
+ is_contiguous_avail = 1 << 1,
192
+ is_channels_last_contiguous_avail = 1 << 2,
193
+ is_channels_last_3d_contiguous_avail = 1 << 3,
194
+ is_channels_last_avail = 1 << 4,
195
+ is_channels_last_3d_avail = 1 << 5,
196
+ is_non_overlapping_and_dense_avail = 1 << 6,
197
+ };
198
+
199
+ // Mutex to prevent races when initializing the variable from const accessors
200
+ mutable std::mutex mutables_;
201
+ mutable SymInt numel_ = 1;
202
+ mutable SymBool is_contiguous_{true};
203
+ mutable SymBool is_channels_last_contiguous_{false};
204
+ mutable SymBool is_channels_last_3d_contiguous_{false};
205
+ mutable SymBool is_channels_last_{false};
206
+ mutable SymBool is_channels_last_3d_{false};
207
+ mutable SymBool is_non_overlapping_and_dense_{true};
208
+ };
209
+
210
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h ADDED
@@ -0,0 +1,773 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/DefaultDtype.h>
5
+ #include <c10/core/Device.h>
6
+ #include <c10/core/Layout.h>
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/ScalarType.h>
9
+ #include <c10/core/ScalarTypeToTypeMeta.h>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+
14
+ #include <iosfwd>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ DispatchKey computeDispatchKey(
20
+ c10::optional<ScalarType> dtype,
21
+ c10::optional<Layout> layout,
22
+ c10::optional<Device> device);
23
+
24
+ inline ScalarType dtype_or_default(c10::optional<ScalarType> dtype) {
25
+ return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); });
26
+ }
27
+
28
+ inline caffe2::TypeMeta dtype_or_default(
29
+ c10::optional<caffe2::TypeMeta> dtype) {
30
+ return value_or_else(dtype, [] { return get_default_dtype(); });
31
+ }
32
+
33
+ inline Layout layout_or_default(c10::optional<Layout> layout) {
34
+ return layout.value_or(kStrided);
35
+ }
36
+
37
+ inline Device device_or_default(c10::optional<Device> device) {
38
+ return value_or_else(device, [] { return Device(kCPU); });
39
+ }
40
+
41
+ inline bool pinned_memory_or_default(c10::optional<bool> pinned_memory) {
42
+ return pinned_memory.value_or(false);
43
+ }
44
+
45
+ /// A class to encapsulate construction axes of an Tensor. TensorOptions was
46
+ /// designed to support the Python style API for specifying construction options
47
+ /// on factory functions, e.g.,
48
+ ///
49
+ /// torch.zeros(2, 3, dtype=torch.int32)
50
+ ///
51
+ /// Because C++ doesn't natively support keyword arguments, there must be
52
+ /// another way of specifying keyword-like arguments. TensorOptions is a
53
+ /// builder class which can be used to construct this "dictionary" of keyword
54
+ /// arguments: functions which support TensorOptions conventionally take this
55
+ /// argument optionally as their last argument.
56
+ ///
57
+ /// WARNING: In PyTorch, there are `torch::` variants of factory functions,
58
+ /// e.g., torch::zeros for at::zeros. These return Variables (while the
59
+ /// stock ATen functions return plain Tensors). If you mix these functions
60
+ /// up, you WILL BE SAD.
61
+ ///
62
+ /// Rather than use the constructor of this class directly, you should prefer to
63
+ /// use the constructor functions, and then chain setter methods on top of them.
64
+ ///
65
+ /// at::device(at::kCUDA).dtype(kInt)
66
+ /// at::dtype(at::kInt)
67
+ ///
68
+ /// Additionally, anywhere a TensorOptions is expected, you can directly
69
+ /// pass at::kCUDA / at::kInt, and it will implicitly convert to a
70
+ /// TensorOptions.
71
+ ///
72
+ /// Here are some recommended ways to create a 2x2 tensor of zeros
73
+ /// with certain properties. These all *implicitly* make use of
74
+ /// TensorOptions, even if they don't mention the class explicitly:
75
+ ///
76
+ /// at::zeros({2,2}, at::kCUDA);
77
+ /// at::zeros({2,2}, at::kLong);
78
+ /// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong()));
79
+ /// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1
80
+ /// at::zeros({2,2}, at::requires_grad());
81
+ ///
82
+
83
+ /// NOTE [ TensorOptions Constructors ]
84
+ ///
85
+ /// TensorOptions is like a dictionary with entries from the set:
86
+ /// {requires_grad, device, dtype, layout}, where each entry may be
87
+ /// unspecified (i.e., is optional). It is used to specify the properties of
88
+ /// tensors in many places both in C++ internal and API, e.g., tensor factory
89
+ /// methods like `at::empty({10}, options)`, tensor conversions like
90
+ /// `tensor.to(...)`, etc.
91
+ ///
92
+ /// To provide a simple API that is consistent with Python, where one can do
93
+ /// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a
94
+ /// `torch.layout`, we want TensorOptions to be implicitly convertible from
95
+ /// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have
96
+ /// three implicit constructors from each of these three types.
97
+ ///
98
+ /// This is sufficient for `ScalarType` and `Layout` as they are simple Enum
99
+ /// classes. However, `Device` is an ordinary class with implicit constructors
100
+ /// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be
101
+ /// consistent with Python API, where strings are treated as equivalent with a
102
+ /// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a
103
+ /// `torch.device("cuda:1")` is accepted). To support the syntax
104
+ /// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure
105
+ /// that `TensorOptions` is implicitly constructible with any arguments that a
106
+ /// `Device` can constructed from. So we have,
107
+ ///
108
+ /// /* implicit */ TensorOptions(T&& device) : TensorOptions() {
109
+ /// this->set_device(device);
110
+ /// }
111
+ ///
112
+ /// template <typename... Args,
113
+ /// typename = std::enable_if_t<std::is_constructible<Device,
114
+ /// Args&&...>::value>>
115
+ /// /* implicit */ TensorOptions(Args&&... args)
116
+ /// : TensorOptions(Device(std::forward<Args>(args)...)) {}
117
+ ///
118
+ ///
119
+ /// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`.
120
+ /// Compiler will complain about ambiguity between the copy constructor and the
121
+ /// `Device` constructor because `{kCUDA, 1}` can be converted to both a
122
+ /// `TensorOption` and a `Device`.
123
+ ///
124
+ /// To get around this, we templatize the `Device` constructor. Since overload
125
+ /// resolution is done before template resolution, our problem is solved.
126
+
127
+ DispatchKey computeDispatchKey(
128
+ optional<ScalarType> dtype,
129
+ optional<Layout> layout,
130
+ optional<Device> device);
131
+
132
+ struct C10_API TensorOptions {
133
+ TensorOptions()
134
+ : requires_grad_(false),
135
+ pinned_memory_(false),
136
+ has_device_(false),
137
+ has_dtype_(false),
138
+ has_layout_(false),
139
+ has_requires_grad_(false),
140
+ has_pinned_memory_(false),
141
+ has_memory_format_(false) {}
142
+
143
+ /// Constructs a `TensorOptions` object with the given layout.
144
+ /* implicit */ TensorOptions(Layout layout) : TensorOptions() {
145
+ this->set_layout(layout);
146
+ }
147
+
148
+ /// Constructs a `TensorOptions` object with the given device.
149
+ /// See NOTE [ TensorOptions Constructors ] on why this is templatized.
150
+ template <
151
+ typename T,
152
+ typename = std::enable_if_t<std::is_same<std::decay_t<T>, Device>::value>>
153
+ /* implicit */ TensorOptions(T&& device) : TensorOptions() {
154
+ this->set_device(std::forward<T>(device));
155
+ }
156
+
157
+ /// Constructs a `TensorOptions` object from arguments allowed in `Device`
158
+ /// constructors.
159
+ ///
160
+ /// See NOTE [ TensorOptions Constructors ].
161
+ ///
162
+ /// NB: Ideally we only allow implicit constructors here. But there is no easy
163
+ /// way to detect them. So we have this one that allows explicit
164
+ /// constructors too.
165
+ template <
166
+ typename... Args,
167
+ typename =
168
+ std::enable_if_t<std::is_constructible<Device, Args&&...>::value>>
169
+ /* implicit */ TensorOptions(Args&&... args)
170
+ : TensorOptions(Device(std::forward<Args>(args)...)) {}
171
+
172
+ /// Constructs a `TensorOptions` object with the given dtype.
173
+ /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() {
174
+ this->set_dtype(dtype);
175
+ }
176
+
177
+ /// legacy constructor to support ScalarType
178
+ /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() {
179
+ this->set_dtype(dtype);
180
+ }
181
+
182
+ /// Constructs a `TensorOptions` object with the given memory format.
183
+ /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() {
184
+ set_memory_format(memory_format);
185
+ }
186
+
187
+ /// Return a copy of `TensorOptions` with `device` set to the given one, or
188
+ /// cleared if `device` is `nullopt`.
189
+ C10_NODISCARD TensorOptions
190
+ device(c10::optional<Device> device) const noexcept {
191
+ TensorOptions r = *this;
192
+ r.set_device(device);
193
+ return r;
194
+ }
195
+
196
+ /// Return a copy of `TensorOptions` with `device` set to the given one.
197
+ /// (This overload ensures that variadic template c10::optional constructor
198
+ /// for Device work correctly.)
199
+ template <typename... Args>
200
+ C10_NODISCARD TensorOptions device(Args&&... args) const noexcept {
201
+ return device(
202
+ c10::optional<Device>(c10::in_place, std::forward<Args>(args)...));
203
+ }
204
+
205
+ /// Return a copy of `TensorOptions`, but with device set to CUDA, and the
206
+ /// device index set to the given one.
207
+ ///
208
+ /// TODO: This function encourages bad behavior (assuming CUDA is
209
+ /// the only device that matters). Get rid of it / rename it.
210
+ C10_NODISCARD TensorOptions
211
+ device_index(c10::DeviceIndex device_index) const noexcept {
212
+ return device(Device::Type::CUDA, device_index);
213
+ }
214
+
215
+ /// Return a copy of `TensorOptions` with `dtype` set to the given one.
216
+ C10_NODISCARD TensorOptions
217
+ dtype(c10::optional<caffe2::TypeMeta> dtype) const noexcept {
218
+ TensorOptions r = *this;
219
+ r.set_dtype(dtype);
220
+ return r;
221
+ }
222
+
223
+ // legacy function to support ScalarType
224
+ C10_NODISCARD TensorOptions
225
+ dtype(c10::optional<ScalarType> dtype) const noexcept {
226
+ TensorOptions r = *this;
227
+ r.set_dtype(dtype);
228
+ return r;
229
+ }
230
+
231
+ // Since dtype is taken...
232
+ template <typename T>
233
+ TensorOptions& dtype() {
234
+ dtype_ = caffe2::TypeMeta::Make<T>();
235
+ has_dtype_ = true;
236
+ return *this;
237
+ }
238
+
239
+ /// Sets the layout of the `TensorOptions`.
240
+ C10_NODISCARD TensorOptions
241
+ layout(c10::optional<Layout> layout) const noexcept {
242
+ TensorOptions r = *this;
243
+ r.set_layout(layout);
244
+ return r;
245
+ }
246
+
247
+ /// Sets the `requires_grad` property of the `TensorOptions`.
248
+ C10_NODISCARD TensorOptions
249
+ requires_grad(c10::optional<bool> requires_grad) const noexcept {
250
+ TensorOptions r = *this;
251
+ r.set_requires_grad(requires_grad);
252
+ return r;
253
+ }
254
+
255
+ /// Sets the `pinned_memory` property on the `TensorOptions`.
256
+ C10_NODISCARD TensorOptions
257
+ pinned_memory(c10::optional<bool> pinned_memory) const noexcept {
258
+ TensorOptions r = *this;
259
+ r.set_pinned_memory(pinned_memory);
260
+ return r;
261
+ }
262
+
263
+ /// Sets the `memory_format` property on `TensorOptions`.
264
+ C10_NODISCARD TensorOptions
265
+ memory_format(c10::optional<MemoryFormat> memory_format) const noexcept {
266
+ TensorOptions r = *this;
267
+ r.set_memory_format(memory_format);
268
+ return r;
269
+ }
270
+
271
+ /// Returns the device of the `TensorOptions`.
272
+ Device device() const noexcept {
273
+ return device_or_default(device_opt());
274
+ }
275
+
276
+ /// Returns whether the device is specified.
277
+ bool has_device() const noexcept {
278
+ return has_device_;
279
+ }
280
+
281
+ /// Returns the device of the `TensorOptions`, or `c10::nullopt` if
282
+ /// device is not specified.
283
+ c10::optional<Device> device_opt() const noexcept {
284
+ return has_device_ ? c10::make_optional(device_) : c10::nullopt;
285
+ }
286
+
287
+ /// Returns the device index of the `TensorOptions`.
288
+ int32_t device_index() const noexcept {
289
+ return device().index();
290
+ }
291
+
292
+ /// Returns the dtype of the `TensorOptions`.
293
+ caffe2::TypeMeta dtype() const noexcept {
294
+ return dtype_or_default(dtype_opt());
295
+ }
296
+
297
+ /// Returns whether the dtype is specified.
298
+ bool has_dtype() const noexcept {
299
+ return has_dtype_;
300
+ }
301
+
302
+ /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if
303
+ /// device is not specified.
304
+ c10::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
305
+ return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt;
306
+ }
307
+
308
+ /// Returns the layout of the `TensorOptions`.
309
+ Layout layout() const noexcept {
310
+ return layout_or_default(layout_opt());
311
+ }
312
+
313
+ /// Returns whether the layout is specified.
314
+ bool has_layout() const noexcept {
315
+ return has_layout_;
316
+ }
317
+
318
+ /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if
319
+ /// layout is not specified.
320
+ c10::optional<Layout> layout_opt() const noexcept {
321
+ return has_layout_ ? c10::make_optional(layout_) : c10::nullopt;
322
+ }
323
+
324
+ /// Returns the `requires_grad` property of the `TensorOptions`.
325
+ bool requires_grad() const noexcept {
326
+ return has_requires_grad_ ? requires_grad_ : false;
327
+ }
328
+
329
+ /// Returns whether the `requires_grad` is specified.
330
+ bool has_requires_grad() const noexcept {
331
+ return has_requires_grad_;
332
+ }
333
+
334
+ /// Returns the `requires_grad` property of the `TensorOptions`, or
335
+ /// `c10::nullopt` if `requires_grad` is not specified.
336
+ c10::optional<bool> requires_grad_opt() const noexcept {
337
+ return has_requires_grad_ ? c10::make_optional(requires_grad_)
338
+ : c10::nullopt;
339
+ }
340
+
341
+ /// Returns the `pinned_memory` property of the `TensorOptions`.
342
+ bool pinned_memory() const noexcept {
343
+ return pinned_memory_or_default(pinned_memory_opt());
344
+ }
345
+
346
+ /// Returns whether the `pinned_memory` is specified.
347
+ bool has_pinned_memory() const noexcept {
348
+ return has_pinned_memory_;
349
+ }
350
+
351
+ /// Returns if the layout is sparse
352
+ bool is_sparse() const {
353
+ return layout_ == c10::Layout::Sparse;
354
+ }
355
+
356
+ bool is_sparse_csr() const {
357
+ return layout_ == c10::Layout::SparseCsr;
358
+ }
359
+
360
+ // For compatibility with legacy tensor.type() comparisons
361
+ bool type_equal(const TensorOptions& other) const {
362
+ return computeDispatchKey() == other.computeDispatchKey() &&
363
+ typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype());
364
+ }
365
+
366
+ /// Returns the `pinned_memory` property of the `TensorOptions`, or
367
+ /// `c10::nullopt` if `pinned_memory` is not specified.
368
+ c10::optional<bool> pinned_memory_opt() const noexcept {
369
+ return has_pinned_memory_ ? c10::make_optional(pinned_memory_)
370
+ : c10::nullopt;
371
+ }
372
+
373
+ /// Returns whether the `memory_layout` is specified
374
+ bool has_memory_format() const noexcept {
375
+ return has_memory_format_;
376
+ }
377
+
378
+ // NB: memory_format() getter is PURPOSELY not defined, as the default
379
+ // behavior of memory_format varies from function to function.
380
+
381
+ /// Returns the `memory_layout` property of `TensorOptions, or
382
+ /// `c10::nullopt` if `memory_format` is not specified.
383
+ c10::optional<MemoryFormat> memory_format_opt() const noexcept {
384
+ return has_memory_format_ ? c10::make_optional(memory_format_)
385
+ : c10::nullopt;
386
+ }
387
+
388
+ // Resolves the ATen backend specified by the current construction axes.
389
+ // TODO: Deprecate this
390
+ Backend backend() const {
391
+ return at::dispatchKeyToBackend(computeDispatchKey());
392
+ }
393
+
394
+ /// Return the right-biased merge of two TensorOptions. This has the
395
+ /// effect of overwriting settings from self with specified options
396
+ /// of options.
397
+ ///
398
+ /// NB: This merging operation does NOT respect device merges.
399
+ /// For example, if you device({kCUDA, 1}).merge_in(kCUDA)
400
+ /// you will get kCUDA in the end! Functions like Tensor.new_empty
401
+ /// ensure the right device is selected anyway by way of a
402
+ /// device guard.
403
+ ///
404
+ TensorOptions merge_in(TensorOptions options) const noexcept {
405
+ TensorOptions merged = *this;
406
+ if (options.has_device())
407
+ merged.set_device(options.device_opt());
408
+ if (options.has_dtype())
409
+ merged.set_dtype(options.dtype_opt());
410
+ if (options.has_layout())
411
+ merged.set_layout(options.layout_opt());
412
+ // NB: requires grad is right biased; not a logical AND/OR!
413
+ if (options.has_requires_grad())
414
+ merged.set_requires_grad(options.requires_grad_opt());
415
+ if (options.has_pinned_memory())
416
+ merged.set_pinned_memory(options.pinned_memory_opt());
417
+ if (options.has_memory_format())
418
+ merged.set_memory_format(options.memory_format_opt());
419
+ return merged;
420
+ }
421
+
422
+ // TODO remove after TensorOptions rationalization
423
+ TensorOptions merge_memory_format(
424
+ c10::optional<MemoryFormat> optional_memory_format) const noexcept {
425
+ TensorOptions merged = *this;
426
+ if (optional_memory_format.has_value()) {
427
+ merged.set_memory_format(*optional_memory_format);
428
+ }
429
+ return merged;
430
+ }
431
+
432
+ // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for
433
+ // which dispatchKeyToBackend is injective, if it is defined at all (for
434
+ // the most part, this just means that this function never returns an
435
+ // Autograd key)
436
+ DispatchKey computeDispatchKey() const {
437
+ return c10::computeDispatchKey(
438
+ optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt());
439
+ }
440
+
441
+ private:
442
+ // These methods are currently private because I'm not sure if it's wise
443
+ // to actually publish them. They are methods because I need them in
444
+ // the constructor and the functional API implementation.
445
+ //
446
+ // If you really, really need it, you can make these public, but check if you
447
+ // couldn't just do what you need with the functional API. Similarly, these
448
+ // methods are not chainable, because if you wanted chaining, you probably
449
+ // want to use the functional API instead. (It's probably OK to make
450
+ // these chainable, because these functions are all explicitly annotated
451
+ // with a ref-qualifier, the trailing &, that makes them illegal to call
452
+ // on temporaries.)
453
+
454
+ /// Mutably set the device of `TensorOptions`.
455
+ void set_device(c10::optional<Device> device) & noexcept {
456
+ if (device) {
457
+ device_ = *device;
458
+ has_device_ = true;
459
+ } else {
460
+ has_device_ = false;
461
+ }
462
+ }
463
+
464
+ /// Mutably set the dtype of `TensorOptions`.
465
+ void set_dtype(c10::optional<caffe2::TypeMeta> dtype) & noexcept {
466
+ if (dtype) {
467
+ dtype_ = *dtype;
468
+ has_dtype_ = true;
469
+ } else {
470
+ has_dtype_ = false;
471
+ }
472
+ }
473
+
474
+ // legacy function to support ScalarType
475
+ void set_dtype(c10::optional<ScalarType> dtype) & noexcept {
476
+ if (dtype) {
477
+ dtype_ = scalarTypeToTypeMeta(*dtype);
478
+ has_dtype_ = true;
479
+ } else {
480
+ has_dtype_ = false;
481
+ }
482
+ }
483
+
484
+ /// Mutably set the layout of `TensorOptions`.
485
+ void set_layout(c10::optional<Layout> layout) & noexcept {
486
+ if (layout) {
487
+ layout_ = *layout;
488
+ has_layout_ = true;
489
+ } else {
490
+ has_layout_ = false;
491
+ }
492
+ }
493
+
494
+ /// Mutably set the `requires_grad` property of `TensorOptions`.
495
+ void set_requires_grad(c10::optional<bool> requires_grad) & noexcept {
496
+ if (requires_grad) {
497
+ requires_grad_ = *requires_grad;
498
+ has_requires_grad_ = true;
499
+ } else {
500
+ has_requires_grad_ = false;
501
+ }
502
+ }
503
+
504
+ /// Mutably set the `pinned_memory` property of `TensorOptions`.
505
+ void set_pinned_memory(c10::optional<bool> pinned_memory) & noexcept {
506
+ if (pinned_memory) {
507
+ pinned_memory_ = *pinned_memory;
508
+ has_pinned_memory_ = true;
509
+ } else {
510
+ has_pinned_memory_ = false;
511
+ }
512
+ }
513
+
514
+ /// Mutably set the `memory_Format` property of `TensorOptions`.
515
+ void set_memory_format(c10::optional<MemoryFormat> memory_format) & noexcept {
516
+ if (memory_format) {
517
+ memory_format_ = *memory_format;
518
+ has_memory_format_ = true;
519
+ } else {
520
+ has_memory_format_ = false;
521
+ }
522
+ }
523
+
524
+ // WARNING: If you edit TensorOptions to add more options, you
525
+ // may need to adjust the implementation of Tensor::options.
526
+ // The criteria for whether or not Tensor::options must be adjusted
527
+ // is whether or not the new option you added should preserved
528
+ // by functions such as empty_like(); if it should be preserved,
529
+ // you must adjust options().
530
+ //
531
+ // TODO: MemoryFormat is not implemented in this way
532
+
533
+ // NB: We didn't use c10::optional here, because then we can't pack
534
+ // the has_***_ boolean fields.
535
+
536
+ Device device_ = at::kCPU; // 16-bit
537
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 16-bit
538
+ Layout layout_ = at::kStrided; // 8-bit
539
+ MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit
540
+
541
+ // Bitmask required here to get this to fit inside 32 bits (or even 64 bits,
542
+ // for that matter)
543
+
544
+ bool requires_grad_ : 1;
545
+ bool pinned_memory_ : 1;
546
+
547
+ bool has_device_ : 1;
548
+ bool has_dtype_ : 1;
549
+ bool has_layout_ : 1;
550
+ bool has_requires_grad_ : 1;
551
+ bool has_pinned_memory_ : 1;
552
+ bool has_memory_format_ : 1;
553
+ };
554
+
555
+ // We should aspire to fit in one machine-size word; but a size greater than two
556
+ // words is too much. (We are doing terribly on 32-bit archs, where we require
557
+ // three machine size words to store tensor options. Eek!)
558
+ static_assert(
559
+ sizeof(TensorOptions) <= sizeof(int64_t) * 2,
560
+ "TensorOptions must fit in 128-bits");
561
+
562
+ /// Convenience function that returns a `TensorOptions` object with the `dtype`
563
+ /// set to the given one.
564
+ inline TensorOptions dtype(caffe2::TypeMeta dtype) {
565
+ return TensorOptions().dtype(dtype);
566
+ }
567
+
568
+ // legacy function to support ScalarType
569
+ inline TensorOptions dtype(ScalarType dtype) {
570
+ return TensorOptions().dtype(scalarTypeToTypeMeta(dtype));
571
+ }
572
+
573
+ /// Convenience function that returns a `TensorOptions` object with the `layout`
574
+ /// set to the given one.
575
+ inline TensorOptions layout(Layout layout) {
576
+ return TensorOptions().layout(layout);
577
+ }
578
+
579
+ /// Convenience function that returns a `TensorOptions` object with the `device`
580
+ /// set to the given one.
581
+ inline TensorOptions device(Device device) {
582
+ return TensorOptions().device(device);
583
+ }
584
+
585
+ /// Convenience function that returns a `TensorOptions` object with the
586
+ /// `device` set to CUDA and the `device_index` set to the given one.
587
+ inline TensorOptions device_index(int16_t device_index) {
588
+ return TensorOptions().device_index(
589
+ static_cast<c10::DeviceIndex>(device_index));
590
+ }
591
+
592
+ /// Convenience function that returns a `TensorOptions` object with the
593
+ /// `requires_grad` set to the given one.
594
+ inline TensorOptions requires_grad(bool requires_grad = true) {
595
+ return TensorOptions().requires_grad(requires_grad);
596
+ }
597
+
598
+ /// Convenience function that returns a `TensorOptions` object with the
599
+ /// `memory_format` set to the given one.
600
+ inline TensorOptions memory_format(MemoryFormat memory_format) {
601
+ return TensorOptions().memory_format(memory_format);
602
+ }
603
+
604
+ C10_API std::ostream& operator<<(
605
+ std::ostream& stream,
606
+ const TensorOptions& options);
607
+
608
+ template <typename T>
609
+ inline TensorOptions dtype() {
610
+ return dtype(caffe2::TypeMeta::Make<T>());
611
+ }
612
+
613
+ inline std::string toString(const TensorOptions& options) {
614
+ std::ostringstream stream;
615
+ stream << options;
616
+ return stream.str();
617
+ }
618
+
619
+ // This is intended to be a centralized location by which we can determine
620
+ // what an appropriate DispatchKey for a tensor is.
621
+ inline DispatchKey computeDispatchKey(
622
+ c10::optional<ScalarType> dtype,
623
+ c10::optional<Layout> layout,
624
+ c10::optional<Device> device) {
625
+ const auto layout_ = layout_or_default(layout);
626
+ const auto device_ = device_or_default(device);
627
+ switch (layout_) {
628
+ case Layout::Jagged:
629
+ case Layout::Strided: {
630
+ const auto dtype_ = dtype_or_default(dtype);
631
+ switch (device_.type()) {
632
+ #define DO_CASE(device, _) \
633
+ case c10::DeviceType::device: { \
634
+ if (isQIntType(dtype_)) { \
635
+ return DispatchKey::Quantized##device; \
636
+ } \
637
+ return DispatchKey::device; \
638
+ }
639
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
640
+ #undef DO_CASE
641
+ case c10::DeviceType::FPGA:
642
+ return DispatchKey::FPGA;
643
+ case c10::DeviceType::ORT:
644
+ return DispatchKey::ORT;
645
+ case c10::DeviceType::Vulkan:
646
+ return DispatchKey::Vulkan;
647
+ case c10::DeviceType::Metal:
648
+ return DispatchKey::Metal;
649
+ case c10::DeviceType::MKLDNN:
650
+ case c10::DeviceType::OPENGL:
651
+ case c10::DeviceType::OPENCL:
652
+ case c10::DeviceType::IDEEP:
653
+ TORCH_INTERNAL_ASSERT(
654
+ 0,
655
+ "This is a grandfathered Caffe2 device type ",
656
+ device_.type(),
657
+ ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error.");
658
+ default:
659
+ TORCH_CHECK_NOT_IMPLEMENTED(
660
+ false,
661
+ "Unsupported device type for dense layout: ",
662
+ device_.type());
663
+ }
664
+ }
665
+ case Layout::Sparse:
666
+ switch (device_.type()) {
667
+ #define DO_CASE(device, _) \
668
+ case c10::DeviceType::device: { \
669
+ return DispatchKey::Sparse##device; \
670
+ }
671
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
672
+ #undef DO_CASE
673
+ default:
674
+ TORCH_CHECK_NOT_IMPLEMENTED(
675
+ false,
676
+ "Unsupported device type for sparse layout: ",
677
+ device_.type());
678
+ }
679
+ case Layout::Mkldnn:
680
+ switch (device_.type()) {
681
+ case c10::DeviceType::CPU:
682
+ return DispatchKey::MkldnnCPU;
683
+ default:
684
+ TORCH_CHECK_NOT_IMPLEMENTED(
685
+ false,
686
+ "Unsupported device type for mkldnn layout: ",
687
+ device_.type());
688
+ }
689
+ case Layout::SparseCsr:
690
+ case Layout::SparseCsc:
691
+ case Layout::SparseBsr:
692
+ case Layout::SparseBsc:
693
+ switch (device_.type()) {
694
+ case c10::DeviceType::CPU:
695
+ return DispatchKey::SparseCsrCPU;
696
+ case c10::DeviceType::CUDA:
697
+ return DispatchKey::SparseCsrCUDA;
698
+ default:
699
+ AT_ERROR(
700
+ "Unsupported device type for ",
701
+ layout_,
702
+ " layout: ",
703
+ device_.type());
704
+ }
705
+ default:
706
+ TORCH_CHECK(false, "Unsupported layout: ", layout_);
707
+ }
708
+ }
709
+
710
+ inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) {
711
+ switch (dispatch_key) {
712
+ #define DO_CASE(bc, _) case DispatchKey::Sparse##bc:
713
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
714
+ #undef DO_CASE
715
+ return Layout::Sparse;
716
+ case DispatchKey::SparseCsrCPU:
717
+ case DispatchKey::SparseCsrCUDA:
718
+ TORCH_CHECK(
719
+ false,
720
+ "Cannot map DispatchKey ",
721
+ dispatch_key,
722
+ " to a unique layout.");
723
+ case DispatchKey::MkldnnCPU:
724
+ return Layout::Mkldnn;
725
+ default:
726
+ return Layout::Strided;
727
+ }
728
+ }
729
+
730
+ inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) {
731
+ switch (dispatch_key) {
732
+ // stuff that's real
733
+ #define DO_CASE(suffix, prefix) \
734
+ case DispatchKey::prefix##suffix: \
735
+ return c10::DeviceType::suffix;
736
+ #define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix)
737
+ C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES)
738
+ #undef DO_CASES
739
+ #undef DO_CASE
740
+
741
+ case DispatchKey::MkldnnCPU:
742
+ return c10::DeviceType::CPU;
743
+ case DispatchKey::Vulkan:
744
+ return c10::DeviceType::Vulkan;
745
+
746
+ case DispatchKey::ORT:
747
+ return c10::DeviceType::ORT;
748
+ default:
749
+ TORCH_CHECK(
750
+ false,
751
+ "DispatchKey ",
752
+ dispatch_key,
753
+ " doesn't correspond to a device");
754
+ }
755
+ }
756
+
757
+ inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) {
758
+ return TensorOptions()
759
+ .layout(dispatchKeyToLayout(dispatch_key))
760
+ .device(dispatchKeyToDeviceType(dispatch_key));
761
+ }
762
+
763
+ namespace detail {
764
+ inline bool backend_supports_empty_operator(const TensorOptions& options) {
765
+ // Quantized backends don't support at::empty().
766
+ // They have separate operators like at::empty_quantized() that take in
767
+ // extra information about how to quantize the tensor.
768
+ return !isQIntType(typeMetaToScalarType(options.dtype()));
769
+ }
770
+
771
+ } // namespace detail
772
+
773
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/TensorImpl.h>
4
+
5
+ namespace c10 {
6
+
7
+ struct C10_API UndefinedTensorImpl final : public TensorImpl {
8
+ public:
9
+ // Without this, we get:
10
+ // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
11
+ // device code
12
+ // (ostensibly because the constexpr tricks MSVC into trying to compile this
13
+ // function for device as well).
14
+ #ifdef _WIN32
15
+ static inline TensorImpl* singleton() {
16
+ #else
17
+ static constexpr inline TensorImpl* singleton() {
18
+ #endif
19
+ return &_singleton;
20
+ }
21
+ #ifdef DEBUG
22
+ bool has_storage() const override;
23
+ #endif
24
+ void set_storage_offset(int64_t offset) override;
25
+
26
+ protected:
27
+ bool is_contiguous_custom(MemoryFormat format) const override;
28
+ IntArrayRef strides_custom() const override;
29
+ SymIntArrayRef sym_strides_custom() const override;
30
+
31
+ private:
32
+ UndefinedTensorImpl();
33
+ static UndefinedTensorImpl _singleton;
34
+ const char* tensorimpl_type_name() const override;
35
+ };
36
+
37
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+
5
+ namespace c10 {
6
+
7
+ namespace detail {
8
+ // This template can only be specialized at int64_t and c10::SymInt;
9
+ // you'll get linker errors otherwise
10
+ template <typename T>
11
+ C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar);
12
+ } // namespace detail
13
+
14
+ template <typename T>
15
+ T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) {
16
+ // Inline the fast paths
17
+ if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) {
18
+ // For SymInts, we want an explicit control flow to trigger a guard, so we
19
+ // may as well branch too.
20
+ if (dim < 0) {
21
+ return dim + dim_post_expr;
22
+ }
23
+ return dim;
24
+ }
25
+ // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors)
26
+ return c10::detail::maybe_wrap_dim_slow<T>(
27
+ std::move(dim), std::move(dim_post_expr), wrap_scalar);
28
+ }
29
+
30
+ inline int64_t maybe_wrap_dim(
31
+ int64_t dim,
32
+ int64_t dim_post_expr,
33
+ bool wrap_scalar = true) {
34
+ return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
35
+ }
36
+
37
+ inline c10::SymInt maybe_wrap_dim(
38
+ c10::SymInt dim,
39
+ c10::SymInt dim_post_expr,
40
+ bool wrap_scalar = true) {
41
+ return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
42
+ }
43
+
44
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Stream.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ // Just for C10_ANONYMOUS_VARIABLE
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <atomic>
12
+
13
+ namespace c10 {
14
+
15
+ // Forward declaration
16
+ class DataPtr;
17
+
18
+ /**
19
+ * Flags defining the behavior of events.
20
+ *
21
+ * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The
22
+ * BACKEND_DEFAULT is what a particular backend would select if no
23
+ * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default
24
+ * choice for events on that backend, which may not be the same. For example,
25
+ * when PyTorch creates a CUDA event it sets the flag
26
+ * CUDA_EVENT_DISABLING_TIMING by default to improve performance.
27
+ *
28
+ * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each
29
+ * backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT,
30
+ * should map one-to-one with actual event flags for those backends.
31
+ */
32
+ enum class EventFlag {
33
+ PYTORCH_DEFAULT,
34
+ BACKEND_DEFAULT,
35
+ // CUDA flags
36
+ CUDA_EVENT_DEFAULT,
37
+ CUDA_EVENT_DISABLE_TIMING, // PyTorch-default for CUDA
38
+ // HIP flags
39
+ HIP_EVENT_DEFAULT,
40
+ HIP_EVENT_DISABLE_TIMING, // PyTorch-default for HIP
41
+ // FOR TESTING ONLY
42
+ INVALID
43
+ };
44
+
45
+ namespace impl {
46
+
47
+ /**
48
+ * DeviceGuardImplInterface represents the virtual interface which provides
49
+ * functionality to provide an RAII class for device and stream switching,
50
+ * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is
51
+ * expected to implement and register an implementation of this interface.
52
+ * All classes which inherit from DeviceGuardImplInterface should be declared
53
+ * 'final'.
54
+ *
55
+ * This class exists because we provide a unified interface for performing
56
+ * device guards via DeviceGuard, but we cannot assume that we have actually
57
+ * compiled against the, e.g., CUDA library, which actually implements
58
+ * this guard functionality. In this case, a dynamic dispatch is required
59
+ * to cross the library boundary.
60
+ *
61
+ * If possible, you should directly use implementations of this interface;
62
+ * those uses will be devirtualized.
63
+ */
64
+ struct C10_API DeviceGuardImplInterface {
65
+ DeviceGuardImplInterface() = default;
66
+ DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
67
+ DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
68
+ default;
69
+ DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
70
+ DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
71
+ default;
72
+
73
+ /**
74
+ * Return the type of device managed by this guard implementation.
75
+ */
76
+ virtual DeviceType type() const = 0;
77
+
78
+ /**
79
+ * Set the current device to Device, and return the previous Device.
80
+ */
81
+ virtual Device exchangeDevice(Device) const = 0;
82
+ // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might
83
+ // consider replacing exchangeDevice with a non-virtual function with a baked
84
+ // in implementation; however, note that this will triple the number of
85
+ // virtual calls (when you implement exchangeDevice in a final subclass,
86
+ // the compiler gets to devirtualize everything; it won't do that if you don't
87
+ // define it in the subclass!) A common way to solve this problem is to use
88
+ // some sort of CRTP; however, we can template DeviceGuardImplInterface since
89
+ // we really *do* need it to be virtual. A little boilerplate seems easiest
90
+ // to explain. (Another way around this problem is to provide inline
91
+ // functions that provide the default implementations, but this seems a little
92
+ // hard to explain. In any case, we're only going to have on order of ten
93
+ // implementations of this anyway.)
94
+
95
+ /**
96
+ * Get the current device.
97
+ */
98
+ virtual Device getDevice() const = 0;
99
+
100
+ /**
101
+ * Set the current device to Device.
102
+ */
103
+ virtual void setDevice(Device) const = 0;
104
+
105
+ /**
106
+ * Set the current device to Device, without checking for errors
107
+ * (so, e.g., this can be called from a destructor).
108
+ */
109
+ virtual void uncheckedSetDevice(Device) const noexcept = 0;
110
+
111
+ /**
112
+ * Get the current stream for a given device.
113
+ */
114
+ virtual Stream getStream(Device) const noexcept = 0;
115
+
116
+ /**
117
+ * Get the default stream for a given device.
118
+ */
119
+ virtual Stream getDefaultStream(Device) const {
120
+ TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
121
+ }
122
+
123
+ /**
124
+ * Get a stream from the global pool for a given device.
125
+ */
126
+ virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
127
+ const {
128
+ (void)isHighPriority; // Suppress unused variable warning
129
+ TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
130
+ }
131
+
132
+ /**
133
+ * Set a stream to be the thread local current stream for its device.
134
+ * Return the previous stream for that device. You are NOT required
135
+ * to set the current device to match the device of this stream.
136
+ */
137
+ virtual Stream exchangeStream(Stream) const noexcept = 0;
138
+
139
+ /**
140
+ * Destroys the given event.
141
+ */
142
+ virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
143
+ const noexcept {}
144
+
145
+ /**
146
+ * Increments the event's version and enqueues a job with this version
147
+ * in the stream's work queue. When the stream process that job
148
+ * it notifies all streams waiting on / blocked by that version of the
149
+ * event to continue and marks that version as recorded.
150
+ * */
151
+ virtual void record(
152
+ void** /*event*/,
153
+ const Stream& /*stream*/,
154
+ const DeviceIndex /*device_index*/,
155
+ const c10::EventFlag /*flag*/) const {
156
+ TORCH_CHECK(false, "Backend doesn't support events.");
157
+ }
158
+
159
+ /**
160
+ * Does nothing if the event has not been scheduled to be recorded.
161
+ * If the event was previously enqueued to be recorded, a command
162
+ * to wait for the version of the event that exists at the time of this call
163
+ * is inserted in the stream's work queue.
164
+ * When the stream reaches this command it will stop processing
165
+ * additional commands until that version of the event is marked as recorded.
166
+ */
167
+ virtual void block(void* /*event*/, const Stream& /*stream*/) const {
168
+ TORCH_CHECK(false, "Backend doesn't support events.");
169
+ }
170
+
171
+ /**
172
+ * Returns true if (and only if)
173
+ * (1) the event has never been scheduled to be recorded
174
+ * (2) the current version is marked as recorded.
175
+ * Returns false otherwise.
176
+ */
177
+ virtual bool queryEvent(void* /*event*/) const {
178
+ TORCH_CHECK(false, "Backend doesn't support events.");
179
+ }
180
+
181
+ /**
182
+ * Get the number of devices. WARNING: This is REQUIRED to not raise
183
+ * an exception. If there is some sort of problem, e.g., driver error,
184
+ * you should report that there are zero available devices.
185
+ */
186
+ virtual DeviceIndex deviceCount() const noexcept = 0;
187
+
188
+ /**
189
+ * Return true if all the work previously enqueued on the stream for
190
+ * asynchronous execution has completed running on the device.
191
+ */
192
+ virtual bool queryStream(const Stream& /*stream*/) const {
193
+ TORCH_CHECK(false, "Backend doesn't support querying streams.");
194
+ }
195
+
196
+ /**
197
+ * Wait (by blocking the calling thread) until all the work previously
198
+ * enqueued on the stream has completed running on the device.
199
+ */
200
+ virtual void synchronizeStream(const Stream& /*stream*/) const {
201
+ TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
202
+ }
203
+
204
+ /**
205
+ * Ensure the caching allocator (if any) is aware that the given DataPtr is
206
+ * being used on the given stream, and that it should thus avoid recycling the
207
+ * DataPtr until all work on that stream is done.
208
+ */
209
+ virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
210
+ }
211
+
212
+ /**
213
+ * Intended use of this class is to leak the DeviceGuardImpl at program end.
214
+ * So you better not call the destructor, buster!
215
+ */
216
+ virtual ~DeviceGuardImplInterface() = default;
217
+ };
218
+
219
+ // A no-op device guard impl that doesn't do anything interesting. Useful
220
+ // for devices that don't actually have a concept of device index. Prominent
221
+ // examples are CPU and Meta.
222
+ template <DeviceType D>
223
+ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
224
+ NoOpDeviceGuardImpl() = default;
225
+ DeviceType type() const override {
226
+ return D;
227
+ }
228
+ Device exchangeDevice(Device) const override {
229
+ return Device(D, -1); // no-op
230
+ }
231
+ Device getDevice() const override {
232
+ return Device(D, -1);
233
+ }
234
+ void setDevice(Device) const override {
235
+ // no-op
236
+ }
237
+ void uncheckedSetDevice(Device) const noexcept override {
238
+ // no-op
239
+ }
240
+ Stream getStream(Device) const noexcept override {
241
+ // no-op
242
+ return Stream(Stream::DEFAULT, Device(D, -1));
243
+ }
244
+ // NB: These do NOT set the current device
245
+ Stream exchangeStream(Stream) const noexcept override {
246
+ // no-op
247
+ return Stream(Stream::DEFAULT, Device(D, -1));
248
+ }
249
+ DeviceIndex deviceCount() const noexcept override {
250
+ return 1;
251
+ }
252
+
253
+ // Event-related functions
254
+ void record(
255
+ void** /*event*/,
256
+ const Stream& /*stream*/,
257
+ const DeviceIndex /*device_index*/,
258
+ const EventFlag /*flag*/) const override {
259
+ TORCH_CHECK(false, D, " backend doesn't support events.");
260
+ }
261
+ void block(void* /*event*/, const Stream& /*stream*/) const override {
262
+ TORCH_CHECK(false, D, " backend doesn't support events.")
263
+ }
264
+ bool queryEvent(void* /*event*/) const override {
265
+ TORCH_CHECK(false, D, " backend doesn't support events.")
266
+ }
267
+ void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
268
+ const noexcept override {}
269
+
270
+ // Stream-related functions
271
+ bool queryStream(const Stream& /*stream*/) const override {
272
+ return true;
273
+ }
274
+ void synchronizeStream(const Stream& /*stream*/) const override {
275
+ // Don't wait for anything.
276
+ }
277
+ };
278
+
279
+ // The registry is NON-owning. Each stored pointer is std::atomic so
280
+ // that under all interleavings of registry calls the structure is
281
+ // race-free. This doesn't cost us anything on reads in X86. (An
282
+ // unsynchronized implementation probably is OK too, but I didn't want
283
+ // to prove that we never read from device_guard_impl_registry at the
284
+ // same time some registration is occurring. Shiver.)
285
+ //
286
+ // I'd like this registry to be valid even at program destruction time
287
+ // (in case someone uses a DeviceGuard in a destructor to do some cleanup
288
+ // in the CUDA API.) Since there are no direct accesses of the underlying
289
+ // owning objects which I can use to enforce initialization order (unlike
290
+ // in a Meyer singleton), it implies that you must *leak* objects when
291
+ // putting them in the registry. This is done by deleting the destructor
292
+ // on DeviceGuardImplInterface.
293
+ extern C10_API std::atomic<const DeviceGuardImplInterface*>
294
+ device_guard_impl_registry[static_cast<size_t>(
295
+ DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
296
+
297
+ // I can't conveniently use c10/util/Registry.h for the following reason:
298
+ // c10/util/Registry.h gives me a slow way of Create'ing a object of some
299
+ // interface from the registry, but no way of quickly accessing an already
300
+ // created object. I'll be banging on getDeviceGuardImpl every time we do a
301
+ // DeviceGuard, so I really don't want to be doing an unordered_map lookup.
302
+ // Better if the registration mechanism directly drops its implementation
303
+ // into device_guard_impl_registry.
304
+
305
+ class C10_API DeviceGuardImplRegistrar {
306
+ public:
307
+ DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
308
+ };
309
+
310
+ #define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
311
+ static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \
312
+ g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl());
313
+
314
+ inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) {
315
+ // Two adjacent int16_t fields DeviceType and DeviceIndex has field access
316
+ // miscompiled on NVCC. To workaround this issue, we apply a mask to the
317
+ // DeviceType. First check if the DeviceType is 16-bit.
318
+ // FB employees can see
319
+ // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/
320
+ // for more details
321
+ static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit");
322
+ auto p = device_guard_impl_registry[static_cast<size_t>(type) & 0xFF].load();
323
+
324
+ // This seems to be the first place where you make use of a device
325
+ // when you pass devices to factory functions. Give a nicer error
326
+ // message in this case.
327
+ TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices");
328
+ return p;
329
+ }
330
+
331
+ inline bool hasDeviceGuardImpl(DeviceType type) {
332
+ return device_guard_impl_registry[static_cast<size_t>(type)].load();
333
+ }
334
+
335
+ } // namespace impl
336
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
4
+
5
+ #include <array>
6
+
7
+ namespace c10 {
8
+ namespace impl {
9
+
10
+ // FakeGuardImpl is hardcoded to have eight devices. Not for
11
+ // any good reason, just to simplify code.
12
+ constexpr DeviceIndex kFakeGuardImplMaxDevices = 8;
13
+
14
+ /**
15
+ * A fake implementation of DeviceGuardImplInterface suitable for testing.
16
+ * The current device is modeled as a mutable field in the guard implementation
17
+ * class. See DeviceGuard_test.cpp for an example use.
18
+ */
19
+ template <DeviceType T>
20
+ struct FakeGuardImpl final : public DeviceGuardImplInterface {
21
+ static constexpr DeviceType static_type = T;
22
+ // Runtime device type is not used
23
+ FakeGuardImpl(DeviceType) {}
24
+ FakeGuardImpl() = default;
25
+ DeviceType type() const override {
26
+ return T;
27
+ }
28
+ Device exchangeDevice(Device d) const override {
29
+ AT_ASSERT(d.type() == type());
30
+ AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
31
+ Device old_device = getDevice();
32
+ if (old_device.index() != d.index()) {
33
+ current_device_ = d.index();
34
+ }
35
+ return old_device;
36
+ }
37
+ Device getDevice() const override {
38
+ return Device(type(), current_device_);
39
+ }
40
+ void setDevice(Device d) const override {
41
+ AT_ASSERT(d.type() == type());
42
+ AT_ASSERT(d.index() >= 0);
43
+ AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
44
+ current_device_ = d.index();
45
+ }
46
+ void uncheckedSetDevice(Device d) const noexcept override {
47
+ current_device_ = d.index();
48
+ }
49
+ Stream getStream(Device d) const noexcept override {
50
+ return Stream(Stream::UNSAFE, d, current_streams_[d.index()]);
51
+ }
52
+ Stream exchangeStream(Stream s) const noexcept override {
53
+ auto old_id = current_streams_[s.device_index()];
54
+ current_streams_[s.device_index()] = s.id();
55
+ return Stream(Stream::UNSAFE, s.device(), old_id);
56
+ }
57
+ DeviceIndex deviceCount() const noexcept override {
58
+ return kFakeGuardImplMaxDevices;
59
+ }
60
+
61
+ // Event-related functions
62
+ void record(
63
+ void** event,
64
+ const Stream& stream,
65
+ const DeviceIndex device_index,
66
+ const EventFlag flag) const override {}
67
+ void block(void* event, const Stream& stream) const override {}
68
+ bool queryEvent(void* event) const override {
69
+ return true;
70
+ }
71
+ void destroyEvent(void* event, const DeviceIndex device_index)
72
+ const noexcept override {}
73
+
74
+ // Convenience methods for testing
75
+ static DeviceIndex getDeviceIndex() {
76
+ return current_device_;
77
+ }
78
+ static void setDeviceIndex(DeviceIndex i) {
79
+ AT_ASSERT(i >= 0);
80
+ AT_ASSERT(i < kFakeGuardImplMaxDevices);
81
+ current_device_ = i;
82
+ }
83
+ static StreamId getCurrentStreamIdFor(DeviceIndex i) {
84
+ return current_streams_.at(i);
85
+ }
86
+ static void resetStreams() {
87
+ current_streams_.fill(0);
88
+ }
89
+
90
+ private:
91
+ thread_local static DeviceIndex current_device_;
92
+ thread_local static std::array<StreamId, kFakeGuardImplMaxDevices>
93
+ current_streams_;
94
+ };
95
+
96
+ template <DeviceType T>
97
+ thread_local DeviceIndex FakeGuardImpl<T>::current_device_ = 0;
98
+
99
+ template <DeviceType T>
100
+ thread_local std::array<StreamId, kFakeGuardImplMaxDevices>
101
+ FakeGuardImpl<T>::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0};
102
+
103
+ } // namespace impl
104
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+
8
+ struct C10_API GPUTrace {
9
+ // On the x86 architecture the atomic operations are lock-less.
10
+ static std::atomic<const PyInterpreter*> gpuTraceState;
11
+
12
+ // When PyTorch migrates to C++20, this should be changed to an atomic flag.
13
+ // Currently, the access to this variable is not synchronized, on the basis
14
+ // that it will only be flipped once and by the first interpreter that
15
+ // accesses it.
16
+ static bool haveState;
17
+
18
+ // This function will only register the first interpreter that tries to invoke
19
+ // it. For all of the next ones it will be a no-op.
20
+ static void set_trace(const PyInterpreter*);
21
+
22
+ static const PyInterpreter* get_trace() {
23
+ if (!haveState)
24
+ return nullptr;
25
+ return gpuTraceState.load(std::memory_order_acquire);
26
+ }
27
+ };
28
+
29
+ } // namespace impl
30
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <atomic>
5
+
6
+ namespace c10 {
7
+ namespace impl {
8
+
9
+ // This TLS controls whether or not we permanently associate PyObject
10
+ // with Tensor the first time it is allocated. When hermetic PyObject
11
+ // TLS is enabled (state is true), we DO NOT save PyObjects to Tensor,
12
+ // meaning you get a distinct PyObject whenever you execute the code in
13
+ // question.
14
+ struct C10_API HermeticPyObjectTLS {
15
+ static void set_state(bool state);
16
+ static bool get_state() {
17
+ // Hypothetical fastpath if torchdeploy/multipy isn't used. Per
18
+ // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
19
+ // this qualifies relaxed access because it is a single-location data
20
+ // structure (only the boolean here).
21
+ //
22
+ // Forgetting about data races for a moment, is there a logical race?
23
+ //
24
+ // - Boolean only ever transitions from false to true. So the
25
+ // critical situation is when one interpreter is already running
26
+ // when a second interpreter switches haveState from false to true.
27
+ //
28
+ // - The first interpreter is indifferent whether or not it sees
29
+ // hasState true/false; obviously false works (this is what the
30
+ // interpreter was previously using; more directly, the interpreter
31
+ // calls into itself as the handler, so being hermetic is not
32
+ // required), and true simply means serviced python operator calls will
33
+ // be hermetic; in these cases it is expected to be functionally
34
+ // equivalent.
35
+ //
36
+ // - The second interpreter MUST see hasState true (as its requests will
37
+ // be forwarded to the first interpreter), but it is assumed that there
38
+ // is a synchronization between the interpreter initialization, and
39
+ // when we actually perform operations, so it is guaranteed to see
40
+ // hasState true.
41
+ //
42
+ // QED.
43
+ //
44
+ // This fastpath is currently disabled so that we can more easily test that
45
+ // hermetic mode works correctly even on stock build of PyTorch.
46
+ if (false && !haveState_.load(std::memory_order_relaxed))
47
+ return false;
48
+ return get_tls_state();
49
+ }
50
+ // Call this from the multipy/torchdeploy top level
51
+ static void init_state();
52
+
53
+ private:
54
+ // This only flipped once from false to true during torchdeploy/multipy
55
+ // initialization, and never again.
56
+ static std::atomic<bool> haveState_;
57
+ static bool get_tls_state();
58
+ };
59
+
60
+ } // namespace impl
61
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides implementations of InlineDeviceGuard and
4
+ // InlineOptionalDeviceGuard.
5
+
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
8
+ #include <c10/core/impl/VirtualGuardImpl.h>
9
+ #include <c10/util/C++17.h>
10
+ #include <c10/util/Optional.h>
11
+
12
+ namespace c10 {
13
+ namespace impl {
14
+
15
+ /**
16
+ * A DeviceGuard is an RAII class that sets a device to some value
17
+ * on construction, and resets the device to its original value on
18
+ * destruction.
19
+ *
20
+ * InlineDeviceGuard is a helper class for implementing DeviceGuards.
21
+ * It is templated over a DeviceGuardImpl (anything that implements
22
+ * DeviceGuardImplInterface). There are two primary ways to instantiate
23
+ * InlineDeviceGuard:
24
+ *
25
+ * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl.
26
+ * This is the best way to use InlineDeviceGuard, as all calls are
27
+ * devirtualized, giving you code as efficient as straight line
28
+ * calls to cudaGetDevice/cudaSetDevice.
29
+ *
30
+ * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl
31
+ * retrieved from a DeviceType registry. We have explicitly instantiated
32
+ * InlineDeviceGuard this way as c10::DeviceGuard.
33
+ *
34
+ * If you are in a hurry, you can use InlineDeviceGuard directly:
35
+ *
36
+ * using CUDAGuard = impl::InlineDeviceGuard<CUDAGuardImpl>;
37
+ *
38
+ * However, you can provide a better user experience if you explicitly write a
39
+ * wrapper class that itself contains the template instantiation:
40
+ *
41
+ * class CUDAGuard {
42
+ * public:
43
+ * // ... the API ...
44
+ * private:
45
+ * impl::InlineDeviceGuard<CUDAGuardImpl> guard_;
46
+ * }
47
+ *
48
+ * The wrapper class provides a good place to write documentation, and helps
49
+ * avoid weird template instantiation errors when a user incorrectly uses the
50
+ * class.
51
+ *
52
+ * If you need to test this class, consider instantiating it with FakeGuardImpl.
53
+ */
54
+ template <typename T>
55
+ class InlineDeviceGuard {
56
+ public:
57
+ // Note [Omitted default constructor from RAII]
58
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
59
+ // In principle, we could add a default constructor to
60
+ // DeviceGuard which reads the current device and promises to
61
+ // restore to that device on exit. However, most cases where you
62
+ // would have written this, you probably meant to actually just
63
+ // use OptionalDeviceGuard (since you don't actually need the
64
+ // restore to happen if you don't ever actually set the device).
65
+ // We remove the constructor here to encourage you to think about
66
+ // what you actually want to happen.
67
+ explicit InlineDeviceGuard() = delete;
68
+
69
+ /// Set the current device to the passed Device.
70
+ explicit InlineDeviceGuard(Device device)
71
+ : impl_(device.type()),
72
+ original_device_(
73
+ device.index() == -1 ? impl_.getDevice()
74
+ : impl_.exchangeDevice(device)),
75
+ current_device_(device.index() == -1 ? original_device_ : device) {}
76
+
77
+ /// Set the current device index to the passed DeviceIndex. (The
78
+ /// device type is inferred from the template parameter T).
79
+ template <
80
+ typename U = T,
81
+ typename = typename std::enable_if<
82
+ !std::is_same<U, VirtualGuardImpl>::value>::type>
83
+ explicit InlineDeviceGuard(DeviceIndex device_index)
84
+ : InlineDeviceGuard(Device(U::static_type, device_index)) {}
85
+
86
+ /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit
87
+ /// DeviceGuardImplInterface pointer.
88
+ template <
89
+ typename U = T,
90
+ typename = typename std::enable_if<
91
+ std::is_same<U, VirtualGuardImpl>::value>::type>
92
+ explicit InlineDeviceGuard(
93
+ Device device,
94
+ const DeviceGuardImplInterface* impl)
95
+ : impl_(
96
+ VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))),
97
+ original_device_(
98
+ device.index() == -1 ? impl_.getDevice()
99
+ : impl_.exchangeDevice(device)),
100
+ current_device_(device.index() == -1 ? original_device_ : device) {}
101
+
102
+ /// Copy is disallowed
103
+ InlineDeviceGuard(const InlineDeviceGuard<T>&) = delete;
104
+ InlineDeviceGuard<T>& operator=(const InlineDeviceGuard<T>&) = delete;
105
+
106
+ /// Move is disallowed, as DeviceGuard does not have an uninitialized state,
107
+ /// which is required for moves on types with nontrivial destructors.
108
+ InlineDeviceGuard(InlineDeviceGuard<T>&& other) = delete;
109
+ InlineDeviceGuard& operator=(InlineDeviceGuard<T>&& other) = delete;
110
+
111
+ ~InlineDeviceGuard() {
112
+ impl_.uncheckedSetDevice(original_device_);
113
+ }
114
+
115
+ /// Sets the device to the given one.
116
+ template <
117
+ typename U = T,
118
+ typename std::enable_if<!std::is_same<U, VirtualGuardImpl>::value, int>::
119
+ type = 0>
120
+ void set_device(at::Device device) {
121
+ AT_ASSERT(
122
+ (U::static_type == DeviceType::HIP && device.is_cuda()) ||
123
+ device.type() == U::static_type);
124
+ auto index = device.index();
125
+ if (index == -1)
126
+ return;
127
+ impl_.setDevice(device);
128
+ current_device_ = device;
129
+ }
130
+
131
+ /// Resets the currently set device to its original device, and then sets the
132
+ /// current device to the passed device. This is effectively equivalent to
133
+ /// set_device when a guard supports only a single device type.
134
+ template <typename U = T>
135
+ typename std::enable_if<!std::is_same<U, VirtualGuardImpl>::value>::type
136
+ reset_device(at::Device device) {
137
+ set_device(device);
138
+ }
139
+
140
+ /// Resets the currently set device to its original device, and then sets the
141
+ /// current device to the passed device (for a possibly different device
142
+ /// type).
143
+ ///
144
+ /// This method is named reset_device to highlight the fact that previous
145
+ /// device settings from this guard are NOT preserved, even if the device
146
+ /// has a different device type. For example:
147
+ ///
148
+ /// // CUDA device is 0
149
+ /// DeviceGuard g(Device(kCUDA, 1));
150
+ /// g.reset_device(Device(kHIP, 2));
151
+ /// // CUDA device is 0 (!!)
152
+ ///
153
+ /// NOTE: this implementation may skip some device setting if it can prove
154
+ /// that it is unnecessary.
155
+ ///
156
+ /// Optional argument is for testing only.
157
+ template <typename U = T>
158
+ typename std::enable_if<std::is_same<U, VirtualGuardImpl>::value>::type
159
+ reset_device(
160
+ at::Device device,
161
+ const impl::DeviceGuardImplInterface* impl = nullptr) {
162
+ auto index = device.index();
163
+ if (index == -1)
164
+ return;
165
+ if (device.type() == original_device_.type()) {
166
+ AT_ASSERT(impl == nullptr || impl->type() == device.type());
167
+ impl_.setDevice(device);
168
+ current_device_ = device;
169
+ } else {
170
+ // Destruct and reconstruct the DeviceGuard in place
171
+ impl_.setDevice(original_device_);
172
+ impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl);
173
+ original_device_ = impl_.exchangeDevice(device);
174
+ current_device_ = device;
175
+ }
176
+ }
177
+
178
+ /// Sets the device index to the given one. The device type is inferred
179
+ /// from the original device type.
180
+ void set_index(DeviceIndex index) {
181
+ reset_device(Device(original_device_.type(), index));
182
+ }
183
+
184
+ /// Returns the device that was set at the time the most recent
185
+ /// reset_device(), or otherwise the device at construction time.
186
+ Device original_device() const {
187
+ return original_device_;
188
+ }
189
+
190
+ /// Returns the most recent device that was set using this device guard,
191
+ /// either from construction, or via set_device/reset_device/set_index.
192
+ Device current_device() const {
193
+ return current_device_;
194
+ }
195
+
196
+ protected:
197
+ T impl_;
198
+
199
+ private:
200
+ Device original_device_;
201
+ Device current_device_;
202
+ };
203
+
204
+ /**
205
+ * A OptionalDeviceGuard is an RAII class that sets a device to some value on
206
+ * initialization, and resets the device to its original value on destruction.
207
+ *
208
+ * InlineOptionalDeviceGuard is a helper class for implementing
209
+ * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to
210
+ * use this. See OptionalDeviceGuard for user-oriented usage notes.
211
+ */
212
+ template <typename T>
213
+ class InlineOptionalDeviceGuard {
214
+ public:
215
+ // Note [Explicit initialization of optional fields]
216
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
217
+ // Explicit initialization of optional fields
218
+ // required to workaround an nvcc bug; see
219
+ // https://github.com/pytorch/pytorch/issues/12117
220
+
221
+ /// Creates an uninitialized OptionalDeviceGuard.
222
+ explicit InlineOptionalDeviceGuard()
223
+ : guard_() // See Note [Explicit initialization of optional fields]
224
+ {}
225
+
226
+ /// Set the current device to the passed Device, if it is not nullopt.
227
+ explicit InlineOptionalDeviceGuard(optional<Device> device_opt)
228
+ : guard_() { // See Note [Explicit initialization of optional fields]
229
+ if (device_opt.has_value()) {
230
+ guard_.emplace(device_opt.value());
231
+ }
232
+ }
233
+
234
+ /// Set the current device to the passed DeviceIndex, if it is not nullopt.
235
+ template <
236
+ typename U = T,
237
+ typename = typename std::enable_if<
238
+ !std::is_same<U, VirtualGuardImpl>::value>::type>
239
+ explicit InlineOptionalDeviceGuard(optional<DeviceIndex> device_index_opt)
240
+ : guard_() { // See Note [Explicit initialization of optional fields]
241
+ if (device_index_opt.has_value()) {
242
+ guard_.emplace(device_index_opt.value());
243
+ }
244
+ }
245
+
246
+ /// All constructors of DeviceGuard are valid for OptionalDeviceGuard
247
+ /// and result in initialized OptionalDeviceGuard.
248
+ template <typename... Args>
249
+ explicit InlineOptionalDeviceGuard(Args&&... args)
250
+ : guard_(in_place, std::forward<Args>(args)...) {}
251
+
252
+ // TODO: Consider reading Tensor and TensorList constructors here, when
253
+ // Tensor moves to c10. (These are only valid on OptionalDeviceGuard,
254
+ // because a Tensor may be undefined, in which case we need an uninitialized
255
+ // tensor guard.)
256
+
257
+ // Note [Move construction for RAII guards is tricky]
258
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
259
+ // In principle, move construction is useful for terminating
260
+ // the lifetime of a `OptionalDeviceGuard` early; for example:
261
+ //
262
+ // // current device is d0
263
+ // OptionalDeviceGuard g1(d1);
264
+ // // current device is d1
265
+ // {
266
+ // OptionalDeviceGuard g2(std::move(g1));
267
+ // }
268
+ // // current device is d0!!
269
+ //
270
+ // However, it's difficult to implement the move constructor
271
+ // in a way that works in all situations. For example, consider
272
+ // the following example:
273
+ //
274
+ // OptionalDeviceGuard g1(d1);
275
+ // {
276
+ // OptionalDeviceGuard g2(d2);
277
+ // {
278
+ // OptionalDeviceGuard g3(std::move(g1)); // !!!
279
+ // }
280
+ // }
281
+ //
282
+ // What should the current device be while g3 in scope... and what
283
+ // should it be after it goes out of scope? What about g2?
284
+ // There don't seem to be satisfactory answers for these questions.
285
+ //
286
+ // It's in principle possible to raise an error when this occurs
287
+ // by doing some extra thread-local bookkeeping. But why bother?
288
+ // Just don't provide the constructor.
289
+ InlineOptionalDeviceGuard(InlineOptionalDeviceGuard<T>&& other) = delete;
290
+
291
+ // Note [Move assignment for RAII guards is tricky]
292
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
293
+ // Move assignment is deleted, because you need to know which guard was
294
+ // defined "first", as that guard's original_device_ wins--with the current
295
+ // representation, we have no way of telling which is the case. (Move
296
+ // construction does not have this problem, as one guard is always
297
+ // uninitialized.)
298
+ //
299
+ // We can make this clear by way of a pair of examples:
300
+ //
301
+ // Example 1:
302
+ //
303
+ // // initial device is n0
304
+ // {
305
+ // CUDAGuard g1(n1);
306
+ // {
307
+ // CUDAGuard g2(n2);
308
+ // // current device should be n2
309
+ // g1 = std::move(g2);
310
+ // // current device should still be n2
311
+ // }
312
+ // // current device should still be n2
313
+ // }
314
+ // // current device should be n0
315
+ //
316
+ // Example 2 (flip the order of the two guards):
317
+ //
318
+ // // initial device is n0
319
+ // {
320
+ // CUDAGuard g2(n2);
321
+ // {
322
+ // CUDAGuard g1(n1);
323
+ // // current device should be n1
324
+ // g1 = std::move(g2);
325
+ // // current device should be n2
326
+ // }
327
+ // // current device should be n0 (since g2 has been vacated)
328
+ // }
329
+ //
330
+ // In both examples, we need g1 to restore to n0 after move assignment.
331
+ // However, in example 1, this is determined by the restore value of g1
332
+ // (prior to the move). In example 2, however, it is determined by the the
333
+ // restore value of g2(!!). We don't know which one should win, without having
334
+ // a way of telling which guard was allocated first.
335
+ //
336
+ // We could solve this with an extra thread-local variable. But no one is
337
+ // actually using move-assignment. So just get rid of it.
338
+ InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) =
339
+ delete;
340
+
341
+ /// Sets the device to the given one. Initializes OptionalDeviceGuard if it
342
+ /// is not already initialized.
343
+ template <
344
+ typename U = T,
345
+ typename = typename std::enable_if<
346
+ !std::is_same<U, VirtualGuardImpl>::value>::type>
347
+ void set_device(at::Device device) {
348
+ if (!guard_.has_value()) {
349
+ guard_.emplace(device);
350
+ } else {
351
+ guard_->set_device(device);
352
+ }
353
+ }
354
+
355
+ /// Resets the currently set device to its original device, and then sets the
356
+ /// current device to the passed device (for a possibly different device
357
+ /// type). Initializes OptionalDeviceGuard if it is not already initialized.
358
+ ///
359
+ /// See notes on why this is called reset_device on InlineDeviceGuard.
360
+ ///
361
+ /// Optional argument is for testing only.
362
+ template <
363
+ typename U = T,
364
+ typename = typename std::enable_if<
365
+ std::is_same<U, VirtualGuardImpl>::value>::type>
366
+ void reset_device(
367
+ at::Device device,
368
+ const DeviceGuardImplInterface* impl = nullptr) {
369
+ if (!guard_.has_value()) {
370
+ guard_.emplace(device, impl);
371
+ } else {
372
+ guard_->reset_device(device, impl);
373
+ }
374
+ }
375
+
376
+ /// Resets the currently set device to its original device, and then sets the
377
+ /// current device to the passed device. Initializes the guard if it is
378
+ /// not already initialized. This is effectively equivalent to set_device
379
+ /// when a guard supports only a single device type.
380
+ template <
381
+ typename U = T,
382
+ typename = typename std::enable_if<
383
+ !std::is_same<U, VirtualGuardImpl>::value>::type>
384
+ void reset_device(at::Device device) {
385
+ if (!guard_.has_value()) {
386
+ guard_.emplace(device);
387
+ } else {
388
+ guard_->reset_device(device);
389
+ }
390
+ }
391
+
392
+ /// Sets the device index to the given one. The device type is statically
393
+ /// known.
394
+ template <
395
+ typename U = T,
396
+ typename = typename std::enable_if<
397
+ !std::is_same<U, VirtualGuardImpl>::value>::type>
398
+ void set_index(DeviceIndex index) {
399
+ if (!guard_.has_value()) {
400
+ guard_.emplace(index);
401
+ } else {
402
+ guard_->set_index(index);
403
+ }
404
+ }
405
+
406
+ /// Returns the device that was set immediately prior to initialization of
407
+ /// the, guard, or nullopt if the guard is uninitialized.
408
+ optional<Device> original_device() const {
409
+ return guard_.has_value() ? make_optional(guard_->original_device())
410
+ : nullopt;
411
+ }
412
+
413
+ /// Returns the most recent device that was set using this device guard,
414
+ /// either from construction, or via set_device, if the guard is initialized,
415
+ /// or nullopt if the guard is uninitialized.
416
+ optional<Device> current_device() const {
417
+ return guard_.has_value() ? make_optional(guard_->current_device())
418
+ : nullopt;
419
+ }
420
+
421
+ /// Restore the original device, resetting this guard to uninitialized state.
422
+ void reset() {
423
+ guard_.reset();
424
+ }
425
+
426
+ private:
427
+ optional<InlineDeviceGuard<T>> guard_;
428
+ };
429
+
430
+ } // namespace impl
431
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ namespace c10 {
9
+ namespace impl {
10
+
11
+ template <typename T>
12
+ struct InlineEvent final {
13
+ InlineEvent() = delete;
14
+ InlineEvent(
15
+ const DeviceType _device_type,
16
+ const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
17
+ : backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {}
18
+
19
+ // Copy constructor and copy assignment operator (deleted)
20
+ InlineEvent(const InlineEvent&) = delete;
21
+ InlineEvent& operator=(const InlineEvent&) = delete;
22
+
23
+ // Move constructor and move assignment operator
24
+ InlineEvent(InlineEvent&& other) noexcept
25
+ : InlineEvent(other.device_type_, other.flag_) {
26
+ swap(std::move(other));
27
+ }
28
+ InlineEvent& operator=(InlineEvent&& other) noexcept {
29
+ swap(std::move(other));
30
+ return *this;
31
+ }
32
+
33
+ void swap(InlineEvent&& other) {
34
+ std::swap(event_, other.event_);
35
+ std::swap(backend_, other.backend_);
36
+ std::swap(device_type_, other.device_type_);
37
+ std::swap(device_index_, other.device_index_);
38
+ std::swap(flag_, other.flag_);
39
+ std::swap(was_marked_for_recording_, other.was_marked_for_recording_);
40
+ }
41
+
42
+ ~InlineEvent() noexcept {
43
+ if (event_)
44
+ backend_.destroyEvent(event_, device_index_);
45
+ }
46
+
47
+ DeviceType device_type() const noexcept {
48
+ return device_type_;
49
+ }
50
+ DeviceIndex device_index() const noexcept {
51
+ return device_index_;
52
+ }
53
+ EventFlag flag() const noexcept {
54
+ return flag_;
55
+ }
56
+ bool was_marked_for_recording() const noexcept {
57
+ return was_marked_for_recording_;
58
+ }
59
+
60
+ void recordOnce(const Stream& stream) {
61
+ if (!was_marked_for_recording_)
62
+ record(stream);
63
+ }
64
+
65
+ void record(const Stream& stream) {
66
+ TORCH_CHECK(
67
+ stream.device_type() == device_type_,
68
+ "Event device type ",
69
+ DeviceTypeName(device_type_),
70
+ " does not match recording stream's device type ",
71
+ DeviceTypeName(stream.device_type()),
72
+ ".");
73
+
74
+ backend_.record(&event_, stream, device_index_, flag_);
75
+ was_marked_for_recording_ = true;
76
+ device_index_ = stream.device_index();
77
+ }
78
+
79
+ void block(const Stream& stream) const {
80
+ if (!was_marked_for_recording_)
81
+ return;
82
+
83
+ TORCH_CHECK(
84
+ stream.device_type() == device_type_,
85
+ "Event device type ",
86
+ DeviceTypeName(device_type_),
87
+ " does not match blocking stream's device type ",
88
+ DeviceTypeName(stream.device_type()),
89
+ ".");
90
+
91
+ backend_.block(event_, stream);
92
+ }
93
+
94
+ bool query() const {
95
+ if (!was_marked_for_recording_)
96
+ return true;
97
+ return backend_.queryEvent(event_);
98
+ }
99
+
100
+ private:
101
+ void* event_ = nullptr;
102
+ T backend_;
103
+ DeviceType device_type_;
104
+ DeviceIndex device_index_ = -1;
105
+ EventFlag flag_ = EventFlag::PYTORCH_DEFAULT;
106
+ bool was_marked_for_recording_ = false;
107
+ };
108
+
109
+ } // namespace impl
110
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineDeviceGuard.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ namespace c10 {
8
+ namespace impl {
9
+
10
+ /**
11
+ * A StreamGuard is an RAII class that changes the current device
12
+ * to the device corresponding to some stream, and changes the
13
+ * default stream on that device to be this stream.
14
+ *
15
+ * InlineStreamGuard is a helper class for implementing StreamGuards.
16
+ * See InlineDeviceGuard for guidance on how to use this class.
17
+ */
18
+ template <typename T>
19
+ class InlineStreamGuard : private InlineDeviceGuard<T> {
20
+ public:
21
+ /// No default constructor, see Note [Omitted default constructor from RAII]
22
+ explicit InlineStreamGuard() = delete;
23
+
24
+ /// Set the current device to the device associated with the passed stream,
25
+ /// and set the current stream on that device to the passed stream.
26
+ explicit InlineStreamGuard(Stream stream)
27
+ : InlineDeviceGuard<T>(stream.device()),
28
+ original_stream_of_original_device_(
29
+ this->impl_.getStream(original_device())),
30
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
31
+ current_stream_(stream) {}
32
+
33
+ /// This constructor exists purely for testing
34
+ template <
35
+ typename U = T,
36
+ typename = typename std::enable_if<
37
+ std::is_same<U, VirtualGuardImpl>::value>::type>
38
+ explicit InlineStreamGuard(
39
+ Stream stream,
40
+ const DeviceGuardImplInterface* impl)
41
+ : InlineDeviceGuard<T>(
42
+ stream.device(),
43
+ impl ? impl : getDeviceGuardImpl(stream.device_type())),
44
+ original_stream_of_original_device_(
45
+ this->impl_.getStream(original_device())),
46
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
47
+ current_stream_(stream) {}
48
+
49
+ /// Copy is disallowed
50
+ InlineStreamGuard(const InlineStreamGuard<T>&) = delete;
51
+ InlineStreamGuard<T>& operator=(const InlineStreamGuard<T>&) = delete;
52
+
53
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
54
+ /// which is required for moves on types with nontrivial destructors.
55
+ InlineStreamGuard(InlineStreamGuard<T>&& other) = delete;
56
+ InlineStreamGuard& operator=(InlineStreamGuard<T>&& other) = delete;
57
+
58
+ ~InlineStreamGuard() {
59
+ this->impl_.exchangeStream(original_stream_of_current_device_);
60
+ }
61
+
62
+ /// Resets the currently set stream to the original stream and
63
+ /// the currently set device to the original device. Then,
64
+ /// set the current device to the device associated with the passed stream,
65
+ /// and set the current stream on that device to the passed stream.
66
+ ///
67
+ /// NOTE: this implementation may skip some stream/device setting if
68
+ /// it can prove that it is unnecessary.
69
+ ///
70
+ /// WARNING: reset_stream does NOT preserve previously set streams on
71
+ /// different devices. If you need to set streams on multiple devices
72
+ /// use MultiStreamGuard instead.
73
+ void reset_stream(Stream stream) {
74
+ // TODO: make a version that takes an impl argument. Unfortunately,
75
+ // that will require SFINAE because impl is only valid for the
76
+ // VirtualGuardImpl specialization.
77
+ if (stream.device() == this->current_device()) {
78
+ this->impl_.exchangeStream(stream);
79
+ current_stream_ = stream;
80
+ } else {
81
+ // Destruct and reconstruct the StreamGuard in-place
82
+ this->impl_.exchangeStream(original_stream_of_current_device_);
83
+ this->reset_device(stream.device());
84
+ original_stream_of_current_device_ = this->impl_.exchangeStream(stream);
85
+ current_stream_ = stream;
86
+ }
87
+ }
88
+
89
+ // It's not clear if set_device should also reset the current stream
90
+ // if the device is unchanged; therefore, we don't provide it.
91
+ // The situation is somewhat clearer with reset_device, but it's still
92
+ // a pretty weird thing to do, so haven't added this either.
93
+
94
+ /// Returns the stream of the original device prior to this guard. Subtly,
95
+ /// the stream returned here is the original stream of the *original*
96
+ /// device; i.e., it's the stream that your computation *would* have
97
+ /// been put on, if it hadn't been for this meddling stream guard.
98
+ /// This is usually what you want.
99
+ Stream original_stream() const {
100
+ return original_stream_of_original_device_;
101
+ }
102
+
103
+ /// Returns the most recent stream that was set using this device guard,
104
+ /// either from construction, or via set_stream.
105
+ Stream current_stream() const {
106
+ return current_stream_;
107
+ }
108
+
109
+ /// Returns the most recent device that was set using this device guard,
110
+ /// either from construction, or via set_device/reset_device/set_index.
111
+ Device current_device() const {
112
+ return InlineDeviceGuard<T>::current_device();
113
+ }
114
+
115
+ /// Returns the device that was set at the most recent reset_stream(),
116
+ /// or otherwise the device at construction time.
117
+ Device original_device() const {
118
+ return InlineDeviceGuard<T>::original_device();
119
+ }
120
+
121
+ private:
122
+ Stream
123
+ original_stream_of_original_device_; // what the user probably cares about
124
+ Stream original_stream_of_current_device_; // what we need to restore
125
+ Stream current_stream_;
126
+ };
127
+
128
+ /**
129
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
130
+ * initialization, and resets the device to its original value on destruction.
131
+ * See InlineOptionalDeviceGuard for more guidance on how to use this class.
132
+ */
133
+ template <typename T>
134
+ class InlineOptionalStreamGuard {
135
+ public:
136
+ /// Creates an uninitialized stream guard.
137
+ explicit InlineOptionalStreamGuard()
138
+ : guard_() // See Note [Explicit initialization of optional fields]
139
+ {}
140
+
141
+ /// Set the current device to the device associated with the passed stream,
142
+ /// and set the current stream on that device to the passed stream,
143
+ /// if the passed stream is not nullopt.
144
+ explicit InlineOptionalStreamGuard(optional<Stream> stream_opt) : guard_() {
145
+ if (stream_opt.has_value()) {
146
+ guard_.emplace(stream_opt.value());
147
+ }
148
+ }
149
+
150
+ /// All constructors of StreamGuard are valid for OptionalStreamGuard
151
+ template <typename... Args>
152
+ explicit InlineOptionalStreamGuard(Args&&... args)
153
+ : guard_(in_place, std::forward<Args>(args)...) {}
154
+
155
+ // See Note [Move construction for RAII guards is tricky]
156
+ InlineOptionalStreamGuard(InlineOptionalStreamGuard<T>&& other) = delete;
157
+
158
+ // See Note [Move assignment for RAII guards is tricky]
159
+ InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) =
160
+ delete;
161
+
162
+ /// Resets the currently set stream to the original stream and
163
+ /// the currently set device to the original device. Then,
164
+ /// set the current device to the device associated with the passed stream,
165
+ /// and set the current stream on that device to the passed stream.
166
+ /// Initializes the OptionalStreamGuard if it was not previously initialized.
167
+ void reset_stream(Stream stream) {
168
+ if (guard_.has_value()) {
169
+ guard_->reset_stream(stream);
170
+ } else {
171
+ guard_.emplace(stream);
172
+ }
173
+ }
174
+
175
+ /// Returns the stream that was set at the time the guard was most recently
176
+ /// initialized, or nullopt if the guard is uninitialized.
177
+ optional<Stream> original_stream() const {
178
+ return guard_.has_value() ? make_optional(guard_->original_stream())
179
+ : nullopt;
180
+ }
181
+
182
+ /// Returns the most recent stream that was set using this stream guard,
183
+ /// either from construction, or via reset_stream, if the guard is
184
+ /// initialized, or nullopt if the guard is uninitialized.
185
+ optional<Stream> current_stream() const {
186
+ return guard_.has_value() ? make_optional(guard_->current_stream())
187
+ : nullopt;
188
+ }
189
+
190
+ /// Restore the original device and stream, resetting this guard to
191
+ /// uninitialized state.
192
+ void reset() {
193
+ guard_.reset();
194
+ }
195
+
196
+ private:
197
+ optional<InlineStreamGuard<T>> guard_;
198
+ };
199
+
200
+ template <typename T>
201
+ class InlineMultiStreamGuard {
202
+ public:
203
+ /// Calls `set_stream` on each of the streams in the list.
204
+ /// This may be useful if you need to set different streams
205
+ /// for different devices.
206
+ explicit InlineMultiStreamGuard(ArrayRef<Stream> streams) {
207
+ if (!streams.empty()) {
208
+ impl_.emplace(getDeviceTypeOfStreams(streams));
209
+ original_streams_.reserve(streams.size());
210
+ for (const Stream& s : streams) {
211
+ original_streams_.emplace_back(this->impl_->exchangeStream(s));
212
+ }
213
+ }
214
+ }
215
+
216
+ /// Copy is disallowed
217
+ InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete;
218
+ InlineMultiStreamGuard<T>& operator=(const InlineMultiStreamGuard&) = delete;
219
+
220
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
221
+ /// which is required for moves on types with nontrivial destructors.
222
+ InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete;
223
+ InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete;
224
+
225
+ ~InlineMultiStreamGuard() {
226
+ for (const Stream& s : original_streams_) {
227
+ this->impl_->exchangeStream(s);
228
+ }
229
+ }
230
+
231
+ protected:
232
+ optional<T> impl_;
233
+
234
+ private:
235
+ /// The original streams that were active on all devices.
236
+ std::vector<Stream> original_streams_;
237
+
238
+ static DeviceType getDeviceTypeOfStreams(ArrayRef<Stream> streams) {
239
+ TORCH_INTERNAL_ASSERT(!streams.empty());
240
+ DeviceType type = streams[0].device_type();
241
+ for (const auto idx : c10::irange(1, streams.size())) {
242
+ TORCH_CHECK_VALUE(
243
+ streams[idx].device_type() == type,
244
+ "Streams have a mix of device types: stream 0 is on ",
245
+ streams[0].device(),
246
+ " while stream ",
247
+ idx,
248
+ " is on device ",
249
+ streams[idx].device());
250
+ }
251
+ return type;
252
+ }
253
+ };
254
+
255
+ } // namespace impl
256
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DispatchKeySet.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ // TLS management for DispatchKeySet (the "local" DispatchKeySet(s))
7
+ //
8
+ // This manages two thread-local DispatchKeySets:
9
+ //
10
+ // - The included type set, which adds a tensor type for consideration
11
+ // in dispatch. (For example, you might add Profiling to
12
+ // the included type set to turn on profiling on all tensor operations.)
13
+ //
14
+ // - The excluded type set, which disqualifies a tensor type from dispatch.
15
+ // (For example, after redispatching on variable, we disqualify
16
+ // Autograd so we don't attempt to handle variable again.)
17
+ // (Exclusion wins over inclusion.)
18
+ //
19
+ // NB: Originally, I implemented the excluded type set as storing the inverted
20
+ // set, but TLS is defined to be zero-initialized, so this doesn't actually work
21
+ // (if it's inverted, you want the set to be -1 initialized).
22
+
23
+ namespace c10 {
24
+ namespace impl {
25
+
26
+ // POD version of LocalDispatchKeySet. Declared here just so that
27
+ // we can put it in the guards.
28
+ // This struct encapsulates special handling for TLS initialization
29
+ // in set_included()/included() API so that they reflect the truth.
30
+ // If you want to create PODLocalDispatchKeySet with non-zero state,
31
+ // use set_included() instead of default constructor.
32
+ struct C10_API PODLocalDispatchKeySet {
33
+ uint64_t included_;
34
+ uint64_t excluded_;
35
+
36
+ // See Note [TLS Initialization]
37
+ DispatchKeySet included() const {
38
+ return DispatchKeySet(DispatchKeySet::RAW, included_) ^
39
+ c10::default_included_set;
40
+ }
41
+ DispatchKeySet excluded() const {
42
+ return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^
43
+ c10::default_excluded_set;
44
+ }
45
+
46
+ void set_included(DispatchKeySet x) {
47
+ included_ = (x ^ c10::default_included_set).raw_repr();
48
+ }
49
+ void set_excluded(DispatchKeySet x) {
50
+ excluded_ = (x ^ c10::default_excluded_set).raw_repr();
51
+ }
52
+ };
53
+ static_assert(
54
+ std::is_trivial<PODLocalDispatchKeySet>::value,
55
+ "PODLocalDispatchKeySet must be a POD type.");
56
+
57
+ struct C10_API LocalDispatchKeySet {
58
+ /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x)
59
+ : included_(x.included()), excluded_(x.excluded()) {}
60
+ DispatchKeySet included_;
61
+ DispatchKeySet excluded_;
62
+ };
63
+
64
+ // thread_local variables cannot be C10_API on Windows.
65
+ // Inlining this seems to break AutoDispatchBelowAutograd on Android.
66
+ #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
67
+ C10_API LocalDispatchKeySet tls_local_dispatch_key_set();
68
+ #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
69
+ extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set;
70
+
71
+ inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() {
72
+ // Don't let people fiddle with the thread_local directly just
73
+ // because they include this header.
74
+ return raw_local_dispatch_key_set;
75
+ }
76
+ #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
77
+
78
+ // Internal, use ThreadLocalStateGuard
79
+ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
80
+
81
+ // RAII API for manipulating the thread-local dispatch state.
82
+
83
+ class C10_API IncludeDispatchKeyGuard {
84
+ public:
85
+ IncludeDispatchKeyGuard(DispatchKeySet);
86
+ IncludeDispatchKeyGuard(DispatchKey k)
87
+ : IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
88
+ IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
89
+ IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete;
90
+ IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete;
91
+ IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete;
92
+ ~IncludeDispatchKeyGuard();
93
+
94
+ private:
95
+ // A little micro-optimization to save us from tls_get_addr call
96
+ // on destruction
97
+ PODLocalDispatchKeySet* tls_;
98
+ DispatchKeySet include_;
99
+ };
100
+
101
+ class C10_API ExcludeDispatchKeyGuard {
102
+ public:
103
+ ExcludeDispatchKeyGuard(DispatchKeySet);
104
+ ExcludeDispatchKeyGuard(DispatchKey k)
105
+ : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
106
+ ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
107
+ ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete;
108
+ ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete;
109
+ ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete;
110
+ ~ExcludeDispatchKeyGuard();
111
+
112
+ private:
113
+ // A little micro-optimization to save us from tls_get_addr call
114
+ // on destruction
115
+ PODLocalDispatchKeySet* tls_;
116
+ DispatchKeySet exclude_;
117
+ };
118
+
119
+ struct C10_API ForceDispatchKeyGuard {
120
+ public:
121
+ ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set)
122
+ : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {
123
+ c10::impl::_force_tls_local_dispatch_key_set(key_set);
124
+ }
125
+ ForceDispatchKeyGuard(
126
+ c10::DispatchKeySet include,
127
+ c10::DispatchKeySet exclude)
128
+ : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {
129
+ auto updated_set = saved_keyset_;
130
+ updated_set.included_ = include;
131
+ updated_set.excluded_ = exclude;
132
+ c10::impl::_force_tls_local_dispatch_key_set(updated_set);
133
+ }
134
+ ~ForceDispatchKeyGuard() {
135
+ c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_);
136
+ }
137
+
138
+ private:
139
+ c10::impl::LocalDispatchKeySet saved_keyset_;
140
+ };
141
+
142
+ // Non-RAII API for manipulating the thread-local dispatch state.
143
+ // Please prefer the RAII API. The non-RAII API may be useful when
144
+ // the included/excluded state of a given DispatchKey must span
145
+ // many calls from the Python to the C++, so you cannot conveniently
146
+ // use an RAII guard.
147
+ //
148
+ // Example use case: a Python context manager that includes a certain
149
+ // DispatchKey, to ensure ops running under the context manager dispatch
150
+ // through that DispatchKey's registered overrides.
151
+ //
152
+ // The non-RAII API is less efficient than the RAII guards because both the
153
+ // getter and setter will do a tls_getaddr lookup (the RAII struct only needs
154
+ // one!)
155
+
156
+ C10_API bool tls_is_dispatch_key_excluded(DispatchKey x);
157
+ C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state);
158
+ C10_API bool tls_is_dispatch_key_included(DispatchKey x);
159
+ C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
160
+ C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
161
+ C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
162
+
163
+ } // namespace impl
164
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/Layout.h>
5
+ #include <c10/core/MemoryFormat.h>
6
+ #include <c10/core/SymIntArrayRef.h>
7
+ #include <c10/macros/Export.h>
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <c10/util/intrusive_ptr.h>
10
+ #include <c10/util/python_stub.h>
11
+ #include <string>
12
+ #include <vector>
13
+
14
+ // Forward declarations
15
+
16
+ namespace c10 {
17
+ struct IValue;
18
+ class OperatorHandle;
19
+ struct TensorImpl;
20
+ } // namespace c10
21
+
22
+ namespace torch {
23
+ namespace jit {
24
+ using Stack = std::vector<c10::IValue>;
25
+ }
26
+ } // namespace torch
27
+
28
+ // Actual implementation
29
+
30
+ namespace c10 {
31
+ namespace impl {
32
+
33
+ struct C10_API PyInterpreter;
34
+
35
+ // Note [Python interpreter tag]
36
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
37
+ // Traditionally, PyTorch is layered such that our Python library
38
+ // (libtorch_python) references our pure C++ library (libtorch) as the
39
+ // natural order of things. However, sometimes this natural order is
40
+ // subverted: C++ objects refer to Python objects (for example, we
41
+ // store a PyObject* pointer on TensorImpl so that converting from a
42
+ // C++ Tensor to a Python Tensor is just a memory dereference).
43
+ //
44
+ // These unusual orderings must be treated with care. To start, you need to
45
+ // virtualize the destructor so that the PyObject can be decref'ed on
46
+ // destruction (because the C++ object itself doesn't know anything about
47
+ // Python--remember, layering!). This process itself is fraught, since
48
+ // acquiring the GIL could lead to deadlocks if someone is blocking on you
49
+ // while holding the GIL. Furthermore, if the C++ objects outlive the
50
+ // interpreter (which can happen if you stash them in a static global
51
+ // variable defined in libtorch), you may attempt to decref the object when
52
+ // the Python interpreter has already been shutdown.
53
+ //
54
+ // BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python
55
+ // interpreters in a single process. If a C++ object is accessible from
56
+ // multiple interpreters, we must take care not to accidentally pass a
57
+ // PyObject from one interpreter with another interpreter.
58
+ //
59
+ // To prevent these mixups, we introduce a PyInterpreter "tag" (object with
60
+ // a vtable), which specifies a specific Python interpreter.
61
+ //
62
+ // - Any given object can be associated with AT MOST one Python interpreter.
63
+ // We represent the interpreter tag as a memory address to an instance of
64
+ // a virtual class that is allocated once per interpreter (this is so that
65
+ // we can request the interpreter to perform operations for us, if
66
+ // necessary).
67
+ //
68
+ // - It can be recorded with a PyObject (PyInterpreterObject) so that
69
+ // we know what interpreter the object is associated with, and we can
70
+ // raise an error if you try to use the PyObject from the wrong
71
+ // interpreter context.
72
+ //
73
+ // - It contains a vtable that can be used to perform various Python
74
+ // operations from ordinary C++ code that ordinarily wouldn't be accessible
75
+ // from libtorch.
76
+ //
77
+ // A simple use case is when a C++ object must be associated with a PyObject.
78
+ // However, for TensorImpl, we lazily allocate a PyObject the first time the
79
+ // object passes into Python. The invariants for this situation are more
80
+ // subtle:
81
+ //
82
+ // - A given TensorImpl's interpreter tag can only go from uninitialized to
83
+ // tagged; once tagged, this is a quiescent state (once tagged to an
84
+ // interpreter, ALWAYS tagged to that interpreter)
85
+ //
86
+ // - A thread may mutate the PyObject field of a TensorImpl if and only if it
87
+ // holds the GIL for the interpreter tagged on the TensorImpl. (If the
88
+ // TensorImpl is not tagged, it must first atomically claim its tag before it
89
+ // can validly write)
90
+ //
91
+ // WARNING: This class has to be written very carefully, because it may be
92
+ // possible for a Tensor to have a reference an interpreter corresponding to
93
+ // a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling
94
+ // virtual methods very dangerous, because the vtable may be garbage at that
95
+ // point (on a good day, you might get "pure virtual method called").
96
+ //
97
+ // The idea to solve this problem is we always leak PyInterpreters (so they
98
+ // always stay live even after dlclose), and make sure we can disarm their
99
+ // virtual methods by indirecting through a separate PyInterpreterVTable
100
+ // object. This can be replaced with a no-op vtable from libc10.so, which
101
+ // is guaranteed to stick around until the bitter end.
102
+ //
103
+ // NB: The downside with representing PyInterpreter tags as full objects is that
104
+ // it takes an extra word on TensorImpl. If tags were instead just integer
105
+ // indices, on 64-bit architectures we could pack the tag and PyObject together
106
+ // into a single atomic word. On 32-bit architectures we could simply say that
107
+ // only one Python interpreter is supported (erroring if a nontrivial
108
+ // interpreter tag is attempted to be set).
109
+ //
110
+ // The difficulty with this scheme is we need to maintain an out-of-line table
111
+ // to get at the PyInterpreters so that we can do virtual method calls on them,
112
+ // and registration/deregistration to this table must be done in a thread safe
113
+ // manner. This can be easily done if the number of possible PyInterpreters is
114
+ // small enough (e.g., 8-bit integer) by simply preallocating an array of
115
+ // sufficient size to hold all possible interpreters. Surely 128 threads is
116
+ // more than enough for anyone!
117
+ //
118
+ // I didn't decide to do this technique at the moment, because the extra word
119
+ // added by the PyInterpreter tag takes us to 24 words, which means that we
120
+ // still fit inside three eight word cache lines. If you need to penny pinch
121
+ // another word consider doing this!
122
+
123
+ struct C10_API PyInterpreterVTable {
124
+ virtual ~PyInterpreterVTable() = default;
125
+
126
+ // Report the name of this interpreter
127
+ virtual std::string name() const = 0;
128
+
129
+ // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call
130
+ // See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg]
131
+ virtual void decref(PyObject* pyobj, bool has_pyobj_slot) const = 0;
132
+
133
+ // Perform a detach by deferring to the __torch_dispatch__ implementation of
134
+ // detach, which will also arrange for the PyObject to get copied in this
135
+ // situation
136
+ virtual c10::intrusive_ptr<TensorImpl> detach(
137
+ const TensorImpl* self) const = 0;
138
+
139
+ // Invoke the Python boxed fallback dispatch to go back into Python
140
+ virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack)
141
+ const = 0;
142
+
143
+ virtual void reportErrorCallback(PyObject* callback, DispatchKey key)
144
+ const = 0;
145
+
146
+ // This is only invoked in the multipy/torchdeploy situation from
147
+ // pythonOpRegistrationTrampoline; this lets us get to the Python
148
+ // interpreter to actually find the appropriate Python op registration
149
+ // entry to call.
150
+ virtual void python_op_registration_trampoline(
151
+ const c10::OperatorHandle& op,
152
+ c10::DispatchKey,
153
+ torch::jit::Stack* stack) const = 0;
154
+
155
+ virtual void throw_abstract_impl_not_imported_error(
156
+ std::string opname,
157
+ const char* pymodule,
158
+ const char* context) const = 0;
159
+
160
+ // Invoke the Python dispatcher to handle this call
161
+ virtual void python_dispatcher(
162
+ const c10::OperatorHandle& op,
163
+ c10::DispatchKeySet,
164
+ torch::jit::Stack* stack) const = 0;
165
+
166
+ virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat)
167
+ const = 0;
168
+ virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
169
+ const = 0;
170
+ virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0;
171
+ virtual c10::Device device(const TensorImpl* self) const = 0;
172
+ virtual int64_t dim(const TensorImpl* self) const = 0;
173
+ virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0;
174
+ virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0;
175
+ virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0;
176
+ virtual c10::Layout layout(const TensorImpl* self) const = 0;
177
+ virtual int64_t numel(const TensorImpl* self) const = 0;
178
+ virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0;
179
+ virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0;
180
+ virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0;
181
+
182
+ virtual void trace_gpu_event_creation(uintptr_t event) const = 0;
183
+ virtual void trace_gpu_event_deletion(uintptr_t event) const = 0;
184
+ virtual void trace_gpu_event_record(uintptr_t event, uintptr_t stream)
185
+ const = 0;
186
+ virtual void trace_gpu_event_wait(uintptr_t event, uintptr_t stream)
187
+ const = 0;
188
+ virtual void trace_gpu_memory_allocation(uintptr_t ptr) const = 0;
189
+ virtual void trace_gpu_memory_deallocation(uintptr_t ptr) const = 0;
190
+ virtual void trace_gpu_stream_creation(uintptr_t stream) const = 0;
191
+ virtual void trace_gpu_device_synchronization() const = 0;
192
+ virtual void trace_gpu_stream_synchronization(uintptr_t stream) const = 0;
193
+ virtual void trace_gpu_event_synchronization(uintptr_t event) const = 0;
194
+
195
+ virtual void reset_backward_hooks(const TensorImpl* self) const = 0;
196
+ };
197
+
198
+ struct C10_API PyInterpreter {
199
+ const PyInterpreterVTable* vtable_;
200
+
201
+ PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){};
202
+
203
+ const PyInterpreterVTable& operator*() const noexcept {
204
+ return *vtable_;
205
+ }
206
+ const PyInterpreterVTable* operator->() const noexcept {
207
+ return vtable_;
208
+ }
209
+
210
+ // Disarm this PyInterpreter, making all of its methods noops.
211
+ // The vtable pointer is not an atomic at the moment, which means
212
+ // a disarm() invocation that is concurrent with active destructors
213
+ // is not thread safe and will trigger TSAN. My hope is that this
214
+ // situations doesn't ever actually happen; tensor destruction should
215
+ // quiesce when a dlclose happens, and any long lived tensors whose
216
+ // destructors would be disarmed here only begin the destruction process
217
+ // on process shutdown (long after the dlclose has occurred).
218
+ void disarm() noexcept;
219
+ };
220
+
221
+ // PyInterpreterStatus describes what the state of its interpreter tag
222
+ // is, relative to the thread currently holding the GIL.
223
+ enum class PyInterpreterStatus {
224
+ // We just allocated the Tensor, it hasn't escaped to other threads,
225
+ // we know that it definitely hasn't been tagged to be associated
226
+ // with an interpreter.
227
+ DEFINITELY_UNINITIALIZED,
228
+ // We queried the interpreter field and it looked uninitialized. But
229
+ // another thread may have raced with us to tag it with some other
230
+ // interpreter id. So we will have to do a CEX to make sure we can
231
+ // actually nab it.
232
+ MAYBE_UNINITIALIZED,
233
+ // We queried the interpreter field and it was tagged to belong to us.
234
+ // This means we have sole write access (as we hold the GIL for this
235
+ // interpreter)
236
+ TAGGED_BY_US,
237
+ // Someone else tagged this. We can't use this TensorImpl from Python.
238
+ TAGGED_BY_OTHER,
239
+ };
240
+
241
+ } // namespace impl
242
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/HermeticPyObjectTLS.h>
4
+ #include <c10/core/impl/PyInterpreter.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10 {
11
+ namespace impl {
12
+
13
+ struct C10_API PyObjectSlot {
14
+ public:
15
+ PyObjectSlot();
16
+
17
+ ~PyObjectSlot();
18
+
19
+ void maybe_destroy_pyobj();
20
+
21
+ // Associate the TensorImpl with the specified PyObject, and, if necessary,
22
+ // also tag the interpreter.
23
+ //
24
+ // NB: This lives in a header so that we can inline away the switch on status
25
+ //
26
+ // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after
27
+ // PyObject if necessary!
28
+ void init_pyobj(
29
+ PyInterpreter* self_interpreter,
30
+ PyObject* pyobj,
31
+ PyInterpreterStatus status) {
32
+ impl::PyInterpreter* expected = nullptr;
33
+ switch (status) {
34
+ case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED:
35
+ // caller guarantees there is no multithreaded access; if there is
36
+ // no data race OK to do a relaxed store
37
+ pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed);
38
+ break;
39
+ case impl::PyInterpreterStatus::TAGGED_BY_US:
40
+ // no tagging is necessary, the tag is already correct
41
+ break;
42
+ case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED:
43
+ // attempt to claim this TensorImpl with the specified interpreter
44
+ // tag
45
+ if (pyobj_interpreter_.compare_exchange_strong(
46
+ expected, self_interpreter, std::memory_order_acq_rel)) {
47
+ break;
48
+ }
49
+ // test if, actually, it was already tagged by us! this situation can't
50
+ // be caused by a race, but it could be caused by a situation
51
+ // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED
52
+ // (because they didn't pre-check the tag) when actually it was
53
+ // owned by the interpreter
54
+ if (expected == self_interpreter) {
55
+ break;
56
+ }
57
+ // fallthrough, we lost the race. We are guaranteed not to lose the
58
+ // race with ourself, as calls to init_pyobj with the same interpreter
59
+ // ID must be sequentialized by the GIL
60
+ C10_FALLTHROUGH;
61
+ case impl::PyInterpreterStatus::TAGGED_BY_OTHER:
62
+ TORCH_CHECK(
63
+ false,
64
+ "cannot allocate PyObject for Tensor on interpreter ",
65
+ self_interpreter,
66
+ " that has already been used by another torch deploy interpreter ",
67
+ pyobj_interpreter_.load());
68
+ }
69
+
70
+ // we are the ONLY thread that can have gotten to this point. It is not
71
+ // possible to conflict with another zero interpreter as access is protected
72
+ // by GIL
73
+ // NB: owns_pyobj tag is initially false
74
+ pyobj_ = pyobj;
75
+ }
76
+
77
+ // Query the PyObject interpreter. This may return null if there is no
78
+ // interpreter. This is racy!
79
+ PyInterpreter* pyobj_interpreter();
80
+
81
+ PyObject* _unchecked_untagged_pyobj() const;
82
+
83
+ // Test the interpreter tag. If tagged for the current interpreter, return
84
+ // a non-nullopt (but possibly null) PyObject. If (possibly) untagged,
85
+ // returns a nullopt. If it is definitely invalid, raises an error.
86
+ //
87
+ // If `ignore_hermetic_tls` is false and this function is called from a
88
+ // hermetic context (ie, `HermeticPyObjectTLS::get_state()` is true), then
89
+ // nullopt is returned. If `ignore_hermetic_tls` is true, then the hermetic
90
+ // context is ignored, allowing you to check the interpreter tag of a
91
+ // nonhermetic PyObject from within a hermetic context. This is necessary
92
+ // because there are some cases where the deallocator function of a
93
+ // nonhermetic PyObject is called from within a hermetic context, so it must
94
+ // be properly treated as a nonhermetic PyObject.
95
+ //
96
+ // NB: this lives in header so that we can avoid actually creating the
97
+ // c10::optional
98
+ c10::optional<PyObject*> check_pyobj(
99
+ PyInterpreter* self_interpreter,
100
+ bool ignore_hermetic_tls = false) const {
101
+ // Note [Memory ordering on Python interpreter tag]
102
+ impl::PyInterpreter* interpreter =
103
+ pyobj_interpreter_.load(std::memory_order_acquire);
104
+ if (interpreter == nullptr) {
105
+ // NB: This never returns DEFINITELY_UNINITIALIZED because there is
106
+ // always the possibility that another thread races to initialize
107
+ // after we query here. The only time when we can conclude a tensor
108
+ // is definitely uninitialized is when we have just allocated it and
109
+ // it cannot have escaped to other threads yet
110
+ return c10::nullopt;
111
+ } else if (interpreter == self_interpreter) {
112
+ // NB: pyobj_ could still be null!
113
+ if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) {
114
+ return c10::nullopt;
115
+ } else {
116
+ return c10::make_optional(_unchecked_untagged_pyobj());
117
+ }
118
+ } else {
119
+ TORCH_CHECK(
120
+ false,
121
+ "cannot access PyObject for Tensor on interpreter ",
122
+ (*self_interpreter)->name(),
123
+ " that has already been used by another torch deploy interpreter ",
124
+ (*pyobj_interpreter_.load())->name());
125
+ }
126
+ }
127
+
128
+ // Clear the PyObject field for an interpreter, in situations where we
129
+ // statically know the tensor is tagged with our interpreter.
130
+ void unchecked_clear_pyobj(PyInterpreter* interpreter);
131
+
132
+ PyInterpreter& load_pyobj_interpreter() const;
133
+
134
+ // Check if the PyObjectSlot's interpreter is the same as the specified
135
+ // interpreter
136
+ bool check_interpreter(PyInterpreter* interpreter);
137
+
138
+ // Check if the PyObjectSlot is holding a PyObject, owned or non-owned
139
+ bool has_pyobj_nonhermetic();
140
+
141
+ bool owns_pyobj();
142
+
143
+ void set_owns_pyobj(bool b);
144
+
145
+ private:
146
+ // This field contains the interpreter tag for this object. See
147
+ // Note [Python interpreter tag] for general context
148
+ //
149
+ // Note [Memory ordering on Python interpreter tag]
150
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
151
+ // What memory_order do we need when accessing this atomic? We don't
152
+ // need a single total modification order (as provided by
153
+ // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only
154
+ // transition from -1 to some positive integer and never changes afterwards.
155
+ // Because there is only one modification, it trivially already has a total
156
+ // modification order (e.g., we don't need fences or locked instructions on
157
+ // x86)
158
+ //
159
+ // In fact, one could make a reasonable argument that relaxed reads are OK,
160
+ // due to the presence of external locking (GIL) to ensure that interactions
161
+ // with other data structures are still correctly synchronized, so that
162
+ // we fall in the "Single-Location Data Structures" case as described in
163
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
164
+ // However, on x86, it doesn't matter if I use acquire or relaxed on the load
165
+ // as I get the same assembly in both cases. So I just use the more
166
+ // conservative acquire (which will impede compiler optimizations but I don't
167
+ // care)
168
+ std::atomic<PyInterpreter*> pyobj_interpreter_;
169
+
170
+ // This field contains a reference to a PyObject representing this Tensor.
171
+ // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new
172
+ // PyObject for it and set this field. This field does not have to be
173
+ // protected by an atomic as it is only allowed to be accessed when you hold
174
+ // the GIL, or during destruction of the tensor.
175
+ //
176
+ // When a PyObject dies, you are obligated to clear this field
177
+ // (otherwise, you will try to use-after-free the pyobj); this currently
178
+ // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp
179
+ //
180
+ // NB: Ordinarily, this should not be a strong reference, as if the
181
+ // PyObject owns the Tensor, this would create a reference cycle.
182
+ // However, sometimes this ownership flips. To track who owns
183
+ // who, this has a single pointer tag indicating whether or not the
184
+ // C++ object owns the PyObject (the common case, zero, means PyObject
185
+ // owns the C++ object); see _unchecked_untagged_pyobj for raw access
186
+ // or check_pyobj for checked access. See references to PyObject
187
+ // resurrection in torch/csrc/autograd/python_variable.cpp
188
+ PyObject* pyobj_;
189
+ };
190
+
191
+ } // namespace impl
192
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+ namespace impl {
8
+
9
+ struct C10_API PythonDispatcherTLS {
10
+ static void set_state(PyInterpreter* state);
11
+ static PyInterpreter* get_state();
12
+ static void reset_state();
13
+ };
14
+
15
+ struct C10_API DisablePythonDispatcher {
16
+ DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
17
+ PythonDispatcherTLS::set_state({});
18
+ }
19
+ ~DisablePythonDispatcher() {
20
+ PythonDispatcherTLS::set_state(old_);
21
+ }
22
+ PyInterpreter* old_;
23
+ };
24
+
25
+ } // namespace impl
26
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <cstdint>
5
+
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <c10/util/SmallVector.h>
9
+
10
+ #define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5
11
+
12
+ namespace c10 {
13
+ namespace impl {
14
+
15
+ // Packed container for TensorImpl sizes and strides.
16
+ // This design improves on the previous approach of using a pair of
17
+ // c10::SmallVector<int64_t, 5> by specializing for the operations we
18
+ // actually use and enforcing that the number of sizes is the same as
19
+ // the number of strides. The memory layout is as follows:
20
+ //
21
+ // 1 size_t for the size
22
+ // 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer
23
+ // to out-of-line array
24
+ class C10_API SizesAndStrides {
25
+ public:
26
+ // TODO: different iterator types for sizes & strides to prevent
27
+ // mixing the two accidentally.
28
+ using sizes_iterator = int64_t*;
29
+ using sizes_const_iterator = const int64_t*;
30
+ using strides_iterator = int64_t*;
31
+ using strides_const_iterator = const int64_t*;
32
+
33
+ SizesAndStrides() : size_(1) {
34
+ size_at_unchecked(0) = 0;
35
+ stride_at_unchecked(0) = 1;
36
+ }
37
+
38
+ ~SizesAndStrides() {
39
+ if (C10_UNLIKELY(!isInline())) {
40
+ free(outOfLineStorage_);
41
+ }
42
+ }
43
+
44
+ SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) {
45
+ if (C10_LIKELY(rhs.isInline())) {
46
+ copyDataInline(rhs);
47
+ } else {
48
+ allocateOutOfLineStorage(size_);
49
+ copyDataOutline(rhs);
50
+ }
51
+ }
52
+
53
+ SizesAndStrides& operator=(const SizesAndStrides& rhs) {
54
+ if (this == &rhs) {
55
+ return *this;
56
+ }
57
+ if (C10_LIKELY(rhs.isInline())) {
58
+ if (C10_UNLIKELY(!isInline())) {
59
+ free(outOfLineStorage_);
60
+ }
61
+ copyDataInline(rhs);
62
+ } else {
63
+ if (isInline()) {
64
+ allocateOutOfLineStorage(rhs.size_);
65
+ } else {
66
+ resizeOutOfLineStorage(rhs.size_);
67
+ }
68
+ copyDataOutline(rhs);
69
+ }
70
+ size_ = rhs.size_;
71
+ return *this;
72
+ }
73
+
74
+ // Move from rhs. rhs.size() == 0 afterwards.
75
+ SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) {
76
+ if (C10_LIKELY(isInline())) {
77
+ memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
78
+ } else {
79
+ outOfLineStorage_ = rhs.outOfLineStorage_;
80
+ rhs.outOfLineStorage_ = nullptr;
81
+ }
82
+
83
+ rhs.size_ = 0;
84
+ }
85
+
86
+ // Move from rhs. rhs.size() == 0 afterwards.
87
+ SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept {
88
+ if (this == &rhs) {
89
+ return *this;
90
+ }
91
+ if (C10_LIKELY(rhs.isInline())) {
92
+ if (C10_UNLIKELY(!isInline())) {
93
+ free(outOfLineStorage_);
94
+ }
95
+ copyDataInline(rhs);
96
+ } else {
97
+ // They're outline. We're going to steal their vector.
98
+ if (!isInline()) {
99
+ free(outOfLineStorage_);
100
+ }
101
+ outOfLineStorage_ = rhs.outOfLineStorage_;
102
+ rhs.outOfLineStorage_ = nullptr;
103
+ }
104
+ size_ = rhs.size_;
105
+ rhs.size_ = 0;
106
+
107
+ return *this;
108
+ }
109
+
110
+ size_t size() const noexcept {
111
+ return size_;
112
+ }
113
+
114
+ const int64_t* sizes_data() const noexcept {
115
+ if (C10_LIKELY(isInline())) {
116
+ return &inlineStorage_[0];
117
+ } else {
118
+ return &outOfLineStorage_[0];
119
+ }
120
+ }
121
+
122
+ int64_t* sizes_data() noexcept {
123
+ if (C10_LIKELY(isInline())) {
124
+ return &inlineStorage_[0];
125
+ } else {
126
+ return &outOfLineStorage_[0];
127
+ }
128
+ }
129
+
130
+ sizes_const_iterator sizes_begin() const noexcept {
131
+ return sizes_data();
132
+ }
133
+
134
+ sizes_iterator sizes_begin() noexcept {
135
+ return sizes_data();
136
+ }
137
+
138
+ sizes_const_iterator sizes_end() const noexcept {
139
+ return sizes_begin() + size();
140
+ }
141
+
142
+ sizes_iterator sizes_end() noexcept {
143
+ return sizes_begin() + size();
144
+ }
145
+
146
+ IntArrayRef sizes_arrayref() const noexcept {
147
+ return IntArrayRef{sizes_data(), size()};
148
+ }
149
+
150
+ void set_sizes(IntArrayRef newSizes) {
151
+ resize(newSizes.size());
152
+ std::copy(newSizes.begin(), newSizes.end(), sizes_begin());
153
+ }
154
+
155
+ void set_strides(IntArrayRef strides) {
156
+ TORCH_INTERNAL_ASSERT(strides.size() == size());
157
+ std::copy(strides.begin(), strides.end(), strides_begin());
158
+ }
159
+
160
+ const int64_t* strides_data() const noexcept {
161
+ if (C10_LIKELY(isInline())) {
162
+ return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
163
+ } else {
164
+ return &outOfLineStorage_[size()];
165
+ }
166
+ }
167
+
168
+ int64_t* strides_data() noexcept {
169
+ if (C10_LIKELY(isInline())) {
170
+ return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
171
+ } else {
172
+ return &outOfLineStorage_[size()];
173
+ }
174
+ }
175
+
176
+ strides_const_iterator strides_begin() const noexcept {
177
+ if (C10_LIKELY(isInline())) {
178
+ return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
179
+ } else {
180
+ return &outOfLineStorage_[size()];
181
+ }
182
+ }
183
+
184
+ strides_iterator strides_begin() noexcept {
185
+ if (C10_LIKELY(isInline())) {
186
+ return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
187
+ } else {
188
+ return &outOfLineStorage_[size()];
189
+ }
190
+ }
191
+
192
+ strides_const_iterator strides_end() const noexcept {
193
+ return strides_begin() + size();
194
+ }
195
+
196
+ strides_iterator strides_end() noexcept {
197
+ return strides_begin() + size();
198
+ }
199
+
200
+ IntArrayRef strides_arrayref() const noexcept {
201
+ return IntArrayRef{strides_data(), size()};
202
+ }
203
+
204
+ // Size accessors.
205
+ int64_t size_at(size_t idx) const noexcept {
206
+ assert(idx < size());
207
+ return sizes_data()[idx];
208
+ }
209
+
210
+ int64_t& size_at(size_t idx) noexcept {
211
+ assert(idx < size());
212
+ return sizes_data()[idx];
213
+ }
214
+
215
+ int64_t size_at_unchecked(size_t idx) const noexcept {
216
+ return sizes_data()[idx];
217
+ }
218
+
219
+ int64_t& size_at_unchecked(size_t idx) noexcept {
220
+ return sizes_data()[idx];
221
+ }
222
+
223
+ // Size accessors.
224
+ int64_t stride_at(size_t idx) const noexcept {
225
+ assert(idx < size());
226
+ return strides_data()[idx];
227
+ }
228
+
229
+ int64_t& stride_at(size_t idx) noexcept {
230
+ assert(idx < size());
231
+ return strides_data()[idx];
232
+ }
233
+
234
+ int64_t stride_at_unchecked(size_t idx) const noexcept {
235
+ return strides_data()[idx];
236
+ }
237
+
238
+ int64_t& stride_at_unchecked(size_t idx) noexcept {
239
+ return strides_data()[idx];
240
+ }
241
+
242
+ void resize(size_t newSize) {
243
+ const auto oldSize = size();
244
+ if (newSize == oldSize) {
245
+ return;
246
+ }
247
+ if (C10_LIKELY(
248
+ newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) {
249
+ if (oldSize < newSize) {
250
+ const auto bytesToZero =
251
+ (newSize - oldSize) * sizeof(inlineStorage_[0]);
252
+ memset(&inlineStorage_[oldSize], 0, bytesToZero);
253
+ memset(
254
+ &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize],
255
+ 0,
256
+ bytesToZero);
257
+ }
258
+ size_ = newSize;
259
+ } else {
260
+ resizeSlowPath(newSize, oldSize);
261
+ }
262
+ }
263
+
264
+ void resizeSlowPath(size_t newSize, size_t oldSize);
265
+
266
+ private:
267
+ bool isInline() const noexcept {
268
+ return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
269
+ }
270
+
271
+ void copyDataInline(const SizesAndStrides& rhs) {
272
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline());
273
+ memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
274
+ }
275
+
276
+ static size_t storageBytes(size_t size) noexcept {
277
+ return size * 2 * sizeof(int64_t);
278
+ }
279
+
280
+ void allocateOutOfLineStorage(size_t size) {
281
+ outOfLineStorage_ = static_cast<int64_t*>(malloc(storageBytes(size)));
282
+ TORCH_CHECK(
283
+ outOfLineStorage_,
284
+ "Could not allocate memory for Tensor SizesAndStrides!");
285
+ }
286
+
287
+ void resizeOutOfLineStorage(size_t newSize) {
288
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline());
289
+ outOfLineStorage_ = static_cast<int64_t*>(
290
+ realloc(outOfLineStorage_, storageBytes(newSize)));
291
+ TORCH_CHECK(
292
+ outOfLineStorage_,
293
+ "Could not allocate memory for Tensor SizesAndStrides!");
294
+ }
295
+
296
+ void copyDataOutline(const SizesAndStrides& rhs) noexcept {
297
+ memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_));
298
+ }
299
+
300
+ size_t size_;
301
+ union {
302
+ int64_t* outOfLineStorage_;
303
+ int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{};
304
+ };
305
+ };
306
+
307
+ } // namespace impl
308
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SafePyObject.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+ namespace impl {
8
+
9
+ enum class TorchDispatchModeKey : int8_t {
10
+ FAKE,
11
+ PROXY,
12
+ FUNCTIONAL,
13
+ NUM_MODE_KEYS
14
+ };
15
+
16
+ struct C10_API TorchDispatchModeTLS {
17
+ // This API is NOT invariant safe.
18
+ // It must not take in an infra mode that uses TorchDispatchModeKey
19
+ // If you're pushing an infra mode onto the stack, we expect
20
+ // you to use set_mode
21
+ static void push_non_infra_mode_onto_stack(
22
+ std::shared_ptr<SafePyObject> mode);
23
+ // Pops the top mode of the stack,
24
+ // giving precedence to user modes before attempting to pop
25
+ // any infra modes
26
+ static const std::shared_ptr<SafePyObject> pop_stack();
27
+ // Returns the highest-priority infra mode on the stack,
28
+ // along with its mode key.
29
+ static const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
30
+ pop_highest_infra_mode();
31
+
32
+ static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
33
+ static int64_t stack_len();
34
+
35
+ static const c10::optional<std::shared_ptr<SafePyObject>> get_mode(
36
+ TorchDispatchModeKey mode_key);
37
+ static const c10::optional<std::shared_ptr<SafePyObject>> unset_mode(
38
+ TorchDispatchModeKey mode_key);
39
+ static void set_mode(
40
+ const std::shared_ptr<SafePyObject>& mode,
41
+ TorchDispatchModeKey mode_key);
42
+
43
+ static const TorchDispatchModeTLS& get_state();
44
+ static void set_state(TorchDispatchModeTLS state);
45
+
46
+ static bool any_modes_set(bool skip_infra_modes = false);
47
+
48
+ private:
49
+ std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
50
+ // Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
51
+ // stack
52
+ // However, we only allow a single FakeTensorMode onto the stack at a time
53
+ // (Pushing additional FakeTensorModes onto the stack is a no-op)
54
+ std::array<
55
+ c10::optional<std::shared_ptr<c10::SafePyObject>>,
56
+ static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
57
+ infra_modes_;
58
+ };
59
+
60
+ C10_API bool dispatch_mode_enabled();
61
+
62
+ C10_API std::string to_string(TorchDispatchModeKey mode_key);
63
+
64
+ } // namespace impl
65
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+
8
+ /**
9
+ * An implementation of DeviceGuardImplInterface which delegates
10
+ * to virtual dispatch on the DeviceGuardImpl registry.
11
+ */
12
+ class VirtualGuardImpl final : public DeviceGuardImplInterface {
13
+ public:
14
+ VirtualGuardImpl(DeviceType device_type)
15
+ : impl_(getDeviceGuardImpl(device_type)) {}
16
+ // This constructor exists purely for testing
17
+ VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {}
18
+
19
+ // Copying and moving is OK!
20
+ VirtualGuardImpl(const VirtualGuardImpl&) = default;
21
+ VirtualGuardImpl& operator=(const VirtualGuardImpl&) = default;
22
+ VirtualGuardImpl(VirtualGuardImpl&&) noexcept = default;
23
+ VirtualGuardImpl& operator=(VirtualGuardImpl&&) noexcept = default;
24
+
25
+ DeviceType type() const override {
26
+ return impl_->type();
27
+ }
28
+ Device exchangeDevice(Device d) const override {
29
+ return impl_->exchangeDevice(d);
30
+ }
31
+ Device getDevice() const override {
32
+ return impl_->getDevice();
33
+ }
34
+ void setDevice(Device d) const override {
35
+ impl_->setDevice(d);
36
+ }
37
+ void uncheckedSetDevice(Device d) const noexcept override {
38
+ impl_->uncheckedSetDevice(d);
39
+ }
40
+ Stream getStream(Device d) const noexcept override {
41
+ return impl_->getStream(d);
42
+ }
43
+ Stream getDefaultStream(Device d) const override {
44
+ return impl_->getDefaultStream(d);
45
+ }
46
+ Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
47
+ const override {
48
+ return impl_->getStreamFromGlobalPool(d, isHighPriority);
49
+ }
50
+ Stream exchangeStream(Stream s) const noexcept override {
51
+ return impl_->exchangeStream(s);
52
+ }
53
+ DeviceIndex deviceCount() const noexcept override {
54
+ return impl_->deviceCount();
55
+ }
56
+
57
+ // Event functions
58
+ void record(
59
+ void** event,
60
+ const Stream& stream,
61
+ const DeviceIndex device_index,
62
+ const EventFlag flag) const override {
63
+ impl_->record(event, stream, device_index, flag);
64
+ }
65
+ void block(void* event, const Stream& stream) const override {
66
+ impl_->block(event, stream);
67
+ }
68
+ bool queryEvent(void* event) const override {
69
+ return impl_->queryEvent(event);
70
+ }
71
+ void destroyEvent(void* event, const DeviceIndex device_index)
72
+ const noexcept override {
73
+ impl_->destroyEvent(event, device_index);
74
+ }
75
+
76
+ bool queryStream(const Stream& stream) const override {
77
+ return impl_->queryStream(stream);
78
+ }
79
+ void synchronizeStream(const Stream& stream) const override {
80
+ impl_->synchronizeStream(stream);
81
+ }
82
+
83
+ void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
84
+ const override {
85
+ impl_->recordDataPtrOnStream(data_ptr, stream);
86
+ }
87
+
88
+ private:
89
+ const DeviceGuardImplInterface* impl_ = nullptr;
90
+ };
91
+
92
+ } // namespace impl
93
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ #include <cstddef>
6
+
7
+ namespace c10 {
8
+
9
+ C10_API void* alloc_cpu(size_t nbytes);
10
+ C10_API void free_cpu(void* data);
11
+
12
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/impl/cow/COW.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/intrusive_ptr.h>
5
+
6
+ namespace c10 {
7
+ struct StorageImpl;
8
+ class DataPtr;
9
+ }; // namespace c10
10
+
11
+ namespace c10::impl::cow {
12
+
13
+ // Creates a Copy-on-write (COW) clone of the given storage. This will also
14
+ // convert the given storage into a COW storage if it is not COW already.
15
+ //
16
+ // Converting the storage into a COW storage will not be successful if the
17
+ // storage's DataPtr has some context (`DataPtr::get_context()`) which is not
18
+ // equal to the data pointer (`DataPtr::get()`). In this case, a nullptr is
19
+ // returned.
20
+ C10_API c10::intrusive_ptr<StorageImpl> lazy_clone_storage(
21
+ StorageImpl& storage);
22
+
23
+ // Check if a storage has a simple DataPtr with no abnormal context
24
+ C10_API bool has_simple_data_ptr(const c10::StorageImpl& storage);
25
+
26
+ // Check if a DataPtr is COW
27
+ C10_API bool is_cow_data_ptr(const c10::DataPtr& data_ptr);
28
+
29
+ } // namespace c10::impl::cow