applied-ai-018 commited on
Commit
487534d
·
verified ·
1 Parent(s): adb2b67

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/1.word_embeddings.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/18.attention.dense.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/18.attention.dense.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/fp32.pt +3 -0
  9. venv/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h +319 -0
  10. venv/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h +72 -0
  11. venv/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h +104 -0
  12. venv/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h +48 -0
  13. venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h +15 -0
  14. venv/lib/python3.10/site-packages/torch/include/c10/core/Device.h +216 -0
  15. venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h +28 -0
  16. venv/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h +748 -0
  17. venv/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h +941 -0
  18. venv/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h +125 -0
  19. venv/lib/python3.10/site-packages/torch/include/c10/core/Event.h +125 -0
  20. venv/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h +44 -0
  21. venv/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h +86 -0
  22. venv/lib/python3.10/site-packages/torch/include/c10/core/Layout.h +78 -0
  23. venv/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h +290 -0
  24. venv/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h +31 -0
  25. venv/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h +76 -0
  26. venv/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h +46 -0
  27. venv/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h +50 -0
  28. venv/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h +52 -0
  29. venv/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h +83 -0
  30. venv/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h +461 -0
  31. venv/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h +620 -0
  32. venv/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h +57 -0
  33. venv/lib/python3.10/site-packages/torch/include/c10/core/Storage.h +272 -0
  34. venv/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h +276 -0
  35. venv/lib/python3.10/site-packages/torch/include/c10/core/Stream.h +176 -0
  36. venv/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h +170 -0
  37. venv/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h +107 -0
  38. venv/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h +113 -0
  39. venv/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h +423 -0
  40. venv/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h +72 -0
  41. venv/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h +214 -0
  42. venv/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h +0 -0
  43. venv/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h +787 -0
  44. venv/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h +48 -0
  45. venv/lib/python3.10/site-packages/torch/include/c10/core/alignment.h +21 -0
  46. venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h +255 -0
  47. venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h +190 -0
  48. venv/lib/python3.10/site-packages/torch/include/c10/core/thread_pool.h +120 -0
  49. venv/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h +81 -0
  50. venv/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h +115 -0
ckpts/universal/global_step120/zero/1.word_embeddings.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac770b3b8c778f73f05d14f99a0ee3a2ec4976c132d61080b0171ab4e202b206
3
+ size 415237325
ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9535b11edb7cea28fe5b18f86dac4fcb774c205a6fc5e2138bc731bdc73d762
3
+ size 50332843
ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4191ab7fb721962eca7ff2920f165f2838b2cd65f8ebfda2622d5ed836d45136
3
+ size 50332749
ckpts/universal/global_step120/zero/18.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56bcc383fc92f7c8a197eaa1f8a511614bc7006f9be344f6abc99ebaf5da959c
3
+ size 16778411
ckpts/universal/global_step120/zero/18.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d524d7c693d5283141dcf9a75432f5e340d169b0546bbca85e4222a2429e6256
3
+ size 16778317
ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65994b24a99e9fb6fb18d79d7997f03fbee9bf772866c75f1a2f34bee8a83ae0
3
+ size 9372
ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:156c8c4e391bb989e14a4b4fd88462467d0caa1d3d276a8a4cc74cd031741ea2
3
+ size 9387
ckpts/universal/global_step120/zero/24.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42c6f5607e53d7a68f772c3d89c11573a0fd44b96577934705c13a60c158f26
3
+ size 9293
venv/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <utility>
8
+
9
+ #include <c10/core/Device.h>
10
+ #include <c10/core/DeviceType.h>
11
+ #include <c10/macros/Export.h>
12
+ #include <c10/macros/Macros.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/ThreadLocalDebugInfo.h>
15
+ #include <c10/util/UniqueVoidPtr.h>
16
+
17
+ namespace c10 {
18
+
19
+ // A DataPtr is a unique pointer (with an attached deleter and some
20
+ // context for the deleter) to some memory, which also records what
21
+ // device is for its data.
22
+ //
23
+ // nullptr DataPtrs can still have a nontrivial device; this allows
24
+ // us to treat zero-size allocations uniformly with non-zero allocations.
25
+ //
26
+ class C10_API DataPtr {
27
+ private:
28
+ c10::detail::UniqueVoidPtr ptr_;
29
+ Device device_;
30
+
31
+ public:
32
+ // Choice of CPU here is arbitrary; if there's an "undefined" device
33
+ // we could use that too
34
+ DataPtr() : ptr_(), device_(DeviceType::CPU) {}
35
+ DataPtr(void* data, Device device) : ptr_(data), device_(device) {}
36
+ DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device)
37
+ : ptr_(data, ctx, ctx_deleter), device_(device) {}
38
+ void* operator->() const {
39
+ return ptr_.get();
40
+ }
41
+ void clear() {
42
+ ptr_.clear();
43
+ }
44
+ void* get() const {
45
+ return ptr_.get();
46
+ }
47
+ void* mutable_get() {
48
+ return ptr_.get();
49
+ }
50
+ void* get_context() const {
51
+ return ptr_.get_context();
52
+ }
53
+ void* release_context() {
54
+ return ptr_.release_context();
55
+ }
56
+ std::unique_ptr<void, DeleterFnPtr>&& move_context() {
57
+ return ptr_.move_context();
58
+ }
59
+ operator bool() const {
60
+ return static_cast<bool>(ptr_);
61
+ }
62
+ template <typename T>
63
+ T* cast_context(DeleterFnPtr expected_deleter) const {
64
+ return ptr_.cast_context<T>(expected_deleter);
65
+ }
66
+ DeleterFnPtr get_deleter() const {
67
+ return ptr_.get_deleter();
68
+ }
69
+ /**
70
+ * Compare the deleter in a DataPtr to expected_deleter.
71
+ * If it matches, replace the deleter with new_deleter
72
+ * and return true; otherwise, does nothing and returns
73
+ * false.
74
+ *
75
+ * In general, it is not safe to unconditionally set the
76
+ * deleter on a DataPtr, because you don't know what
77
+ * the deleter is, and thus will have a hard time properly
78
+ * disposing of the deleter without storing the original
79
+ * deleter (this is difficult to do, because DeleterFnPtr
80
+ * is not a closure, and because the context on DataPtr is
81
+ * only a single word, you generally don't have enough
82
+ * space to store both the original deleter and its context).
83
+ * However, in some cases, you know /exactly/ what the deleter
84
+ * is, and you have a new deleter that manually wraps
85
+ * the old one. In this case, you can safely swap the deleter
86
+ * after asserting that the deleters line up.
87
+ *
88
+ * What are the requirements on new_deleter? It must still
89
+ * properly dispose of the void* pointer passed in as its argument,
90
+ * where void* is whatever the context of the original deleter
91
+ * is. So in general, you expect the new deleter to look something
92
+ * like this:
93
+ *
94
+ * [](void* ptr) {
95
+ * some_new_stuff(ptr);
96
+ * get_orig_allocator()->raw_deleter(ptr);
97
+ * }
98
+ *
99
+ * Note that it won't work to close over the original
100
+ * allocator; you don't have enough space to do that! Also,
101
+ * it's unsafe to assume that the passed in pointer in
102
+ * question is the memory pointer in question; it might not
103
+ * be; be sure to read the source code of the Allocator
104
+ * in question to confirm this.
105
+ */
106
+ C10_NODISCARD bool compare_exchange_deleter(
107
+ DeleterFnPtr expected_deleter,
108
+ DeleterFnPtr new_deleter) {
109
+ return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
110
+ }
111
+ Device device() const {
112
+ return device_;
113
+ }
114
+ // Unsafely mutates the device on a DataPtr. Under normal use,
115
+ // you should never actually need to call this function.
116
+ // We need this for the implementation of the hack detailed
117
+ // in Note [Masquerading as CUDA]
118
+ void unsafe_set_device(Device device) {
119
+ device_ = device;
120
+ }
121
+ };
122
+
123
+ // NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a
124
+ // CPU nullptr
125
+
126
+ inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept {
127
+ return !dp;
128
+ }
129
+ inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept {
130
+ return !dp;
131
+ }
132
+ inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept {
133
+ return dp;
134
+ }
135
+ inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept {
136
+ return dp;
137
+ }
138
+
139
+ // Note [raw_allocate/raw_deallocate and Thrust]
140
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
141
+ // Thrust's support for custom allocators requires us to write something
142
+ // like this:
143
+ //
144
+ // class ThrustAllocator {
145
+ // char* allocate(size_t);
146
+ // void deallocate(char*, size_t);
147
+ // };
148
+ //
149
+ // This is not good for our unique_ptr based allocator interface, as
150
+ // there is no way to get to the context when we free.
151
+ //
152
+ // However, in some cases the context is exactly the same as
153
+ // the data pointer. In this case, we can support the "raw"
154
+ // allocate and deallocate interface. This is what
155
+ // raw_deleter signifies. By default, it returns a nullptr, which means that
156
+ // the raw interface is not implemented. Be sure to implement it whenever
157
+ // possible, or the raw interface will incorrectly reported as unsupported,
158
+ // when it is actually possible.
159
+
160
+ struct C10_API Allocator {
161
+ virtual ~Allocator() = default;
162
+
163
+ virtual DataPtr allocate(size_t n) = 0;
164
+
165
+ // Clones an allocation that came from this allocator.
166
+ //
167
+ // To perform the copy, this function calls `copy_data`, which
168
+ // must be implemented by derived classes.
169
+ //
170
+ // Note that this explicitly ignores any context that may have been
171
+ // attached to the input data.
172
+ //
173
+ // Requires: input data was allocated by the same allocator.
174
+ DataPtr clone(const void* data, std::size_t n);
175
+
176
+ // Checks if DataPtr has a simple context, not wrapped with any out of the
177
+ // ordinary contexts.
178
+ virtual bool is_simple_data_ptr(const DataPtr& data_ptr) const;
179
+
180
+ // If this returns a non nullptr, it means that allocate()
181
+ // is guaranteed to return a unique_ptr with this deleter attached;
182
+ // it means the rawAllocate and rawDeallocate APIs are safe to use.
183
+ // This function MUST always return the same BoundDeleter.
184
+ virtual DeleterFnPtr raw_deleter() const {
185
+ return nullptr;
186
+ }
187
+ void* raw_allocate(size_t n) {
188
+ auto dptr = allocate(n);
189
+ AT_ASSERT(dptr.get() == dptr.get_context());
190
+ return dptr.release_context();
191
+ }
192
+ void raw_deallocate(void* ptr) {
193
+ auto d = raw_deleter();
194
+ AT_ASSERT(d);
195
+ d(ptr);
196
+ }
197
+
198
+ // Copies data from one allocation to another.
199
+ // Pure virtual, so derived classes must define behavior.
200
+ // Derived class implementation can simply call `default_copy_data`
201
+ // to use `std::memcpy`.
202
+ //
203
+ // Requires: src and dest were allocated by this allocator
204
+ // Requires: src and dest both have length >= count
205
+ virtual void copy_data(void* dest, const void* src, std::size_t count)
206
+ const = 0;
207
+
208
+ protected:
209
+ // Uses `std::memcpy` to copy data.
210
+ // Child classes can use this as `copy_data` when an alternative copy
211
+ // API is not needed.
212
+ void default_copy_data(void* dest, const void* src, std::size_t count) const;
213
+ };
214
+
215
+ // This context is used to generate DataPtr which have arbitrary
216
+ // std::function deleters associated with them. In some user facing
217
+ // functions, we give a (user-friendly) interface for constructing
218
+ // tensors from external data which take an arbitrary std::function
219
+ // deleter. Grep for InefficientStdFunctionContext to find these
220
+ // occurrences.
221
+ //
222
+ // This context is inefficient because we have to do a dynamic
223
+ // allocation InefficientStdFunctionContext, on top of the dynamic
224
+ // allocation which is implied by std::function itself.
225
+ struct C10_API InefficientStdFunctionContext {
226
+ void* ptr_;
227
+ std::function<void(void*)> deleter_;
228
+ InefficientStdFunctionContext(void* ptr, std::function<void(void*)> deleter)
229
+ : ptr_(ptr), deleter_(std::move(deleter)) {}
230
+ ~InefficientStdFunctionContext() {
231
+ if (deleter_) {
232
+ deleter_(ptr_);
233
+ }
234
+ }
235
+ static DataPtr makeDataPtr(
236
+ void* ptr,
237
+ std::function<void(void*)> deleter,
238
+ Device device);
239
+ };
240
+
241
+ /** Set the allocator for DeviceType `t`. The passed in allocator pointer is
242
+ * expected to have static lifetime; this function does NOT take ownership
243
+ * of the raw pointer. (The reason for this is to prevent existing pointers
244
+ * to an allocator of a particular device from being invalidated when
245
+ * SetAllocator is called.)
246
+ *
247
+ * Also note that this is not thread-safe, and we assume this function will
248
+ * only be called during initialization.
249
+ *
250
+ * The 'priority' flag is introduced when we want to overwrite the default
251
+ * allocator, since the allocators are set statically. The default priority
252
+ * is 0, which means the lowest. Only higher or equal priority can overwrite
253
+ * existing ones.
254
+ */
255
+ C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0);
256
+ C10_API Allocator* GetAllocator(const DeviceType& t);
257
+
258
+ template <DeviceType t>
259
+ struct AllocatorRegisterer {
260
+ explicit AllocatorRegisterer(Allocator* alloc) {
261
+ SetAllocator(t, alloc);
262
+ }
263
+ };
264
+
265
+ #define REGISTER_ALLOCATOR(t, f) \
266
+ namespace { \
267
+ static c10::AllocatorRegisterer<t> g_allocator_d(f); \
268
+ }
269
+
270
+ // An interface for reporting thread local memory usage
271
+ // per device
272
+ struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase {
273
+ MemoryReportingInfoBase();
274
+ ~MemoryReportingInfoBase() override = default;
275
+
276
+ /**
277
+ * alloc_size corresponds to the size of the ptr.
278
+ *
279
+ * total_allocated corresponds to total allocated memory.
280
+ *
281
+ * total_reserved corresponds to total size of memory pool, both used and
282
+ * unused, if applicable.
283
+ */
284
+ virtual void reportMemoryUsage(
285
+ void* ptr,
286
+ int64_t alloc_size,
287
+ size_t total_allocated,
288
+ size_t total_reserved,
289
+ Device device) = 0;
290
+
291
+ virtual void reportOutOfMemory(
292
+ int64_t alloc_size,
293
+ size_t total_allocated,
294
+ size_t total_reserved,
295
+ Device device);
296
+
297
+ virtual bool memoryProfilingEnabled() const = 0;
298
+ };
299
+
300
+ C10_API bool memoryProfilingEnabled();
301
+ C10_API void reportMemoryUsageToProfiler(
302
+ void* ptr,
303
+ int64_t alloc_size,
304
+ size_t total_allocated,
305
+ size_t total_reserved,
306
+ Device device);
307
+
308
+ C10_API void reportOutOfMemoryToProfiler(
309
+ int64_t alloc_size,
310
+ size_t total_allocated,
311
+ size_t total_reserved,
312
+ Device device);
313
+
314
+ // used to hold traceback information in allocators
315
+ struct GatheredContext {
316
+ virtual ~GatheredContext() = default;
317
+ };
318
+
319
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+
7
+ // Structure used to pack all the thread local boolean
8
+ // flags used by autograd
9
+ struct C10_API AutogradState {
10
+ static AutogradState& get_tls_state();
11
+ static void set_tls_state(AutogradState state);
12
+
13
+ AutogradState(
14
+ bool grad_mode,
15
+ bool inference_mode,
16
+ bool fw_grad_mode,
17
+ bool multithreading_enabled)
18
+ : grad_mode_(grad_mode),
19
+ inference_mode_(inference_mode),
20
+ fw_grad_mode_(fw_grad_mode),
21
+ multithreading_enabled_(multithreading_enabled),
22
+ view_replay_enabled_(false) {}
23
+
24
+ void set_grad_mode(bool enabled) {
25
+ grad_mode_ = enabled;
26
+ }
27
+
28
+ void set_fw_grad_mode(bool enabled) {
29
+ fw_grad_mode_ = enabled;
30
+ }
31
+
32
+ void set_inference_mode(bool enabled) {
33
+ inference_mode_ = enabled;
34
+ }
35
+
36
+ void set_multithreading_enabled(bool multithreading_enabled) {
37
+ multithreading_enabled_ = multithreading_enabled;
38
+ }
39
+
40
+ void set_view_replay_enabled(bool view_replay_enabled) {
41
+ view_replay_enabled_ = view_replay_enabled;
42
+ }
43
+
44
+ bool get_grad_mode() const {
45
+ return grad_mode_;
46
+ }
47
+
48
+ bool get_fw_grad_mode() const {
49
+ return fw_grad_mode_;
50
+ }
51
+
52
+ bool get_inference_mode() const {
53
+ return inference_mode_;
54
+ }
55
+
56
+ bool get_multithreading_enabled() const {
57
+ return multithreading_enabled_;
58
+ }
59
+
60
+ bool get_view_replay_enabled() const {
61
+ return view_replay_enabled_;
62
+ }
63
+
64
+ private:
65
+ bool grad_mode_ : 1;
66
+ bool inference_mode_ : 1;
67
+ bool fw_grad_mode_ : 1;
68
+ bool multithreading_enabled_ : 1;
69
+ bool view_replay_enabled_ : 1;
70
+ };
71
+
72
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/ConstantSymNodeImpl.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <cstdint>
8
+ #include <string>
9
+ #include <variant>
10
+
11
+ namespace c10 {
12
+
13
+ // Unlike other SymNodeImpl, this cannot be "dispatched" conventionally,
14
+ // as it typically needs to defer to another SymNodeImpl
15
+ //
16
+ // Can either represent a bool, int (don't support float yet) this is useful
17
+ // for representing otherwise unrepresentable large negative integer constant.
18
+ template <typename T>
19
+ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
20
+ static_assert(
21
+ ::std::is_same_v<T, int64_t> || ::std::is_same_v<T, bool>,
22
+ "ConstantSymNodeImpl can only accept int64_t or bool types");
23
+
24
+ public:
25
+ ConstantSymNodeImpl(T val) : value_(val) {}
26
+
27
+ bool is_int() override {
28
+ return is_int_();
29
+ }
30
+ bool is_bool() override {
31
+ return is_bool_();
32
+ }
33
+ bool is_float() override {
34
+ return false;
35
+ }
36
+ int64_t guard_int(const char* file, int64_t line) override {
37
+ TORCH_CHECK(is_int(), "not an int");
38
+ return int_();
39
+ }
40
+ bool guard_bool(const char* file, int64_t line) override {
41
+ TORCH_CHECK(is_bool(), "not a bool");
42
+ return bool_();
43
+ }
44
+ double guard_float(const char* file, int64_t line) override {
45
+ TORCH_CHECK(false, "not a float");
46
+ }
47
+ int64_t int_() override {
48
+ TORCH_CHECK(is_int(), "not an int");
49
+ return ::std::get<int64_t>(value_);
50
+ }
51
+ bool bool_() override {
52
+ TORCH_CHECK(is_bool(), "not a bool");
53
+ return ::std::get<bool>(value_);
54
+ }
55
+ bool has_hint() override {
56
+ return true;
57
+ }
58
+ c10::SymNode eq(const c10::SymNode& other) override;
59
+ c10::SymNode ne(const c10::SymNode& other) override;
60
+ c10::SymNode ge(const c10::SymNode& other) override;
61
+ c10::SymNode le(const c10::SymNode& other) override;
62
+ c10::SymNode lt(const c10::SymNode& other) override;
63
+ c10::SymNode gt(const c10::SymNode& other) override;
64
+ c10::SymNode mul(const c10::SymNode& other) override;
65
+ ::std::string str() override {
66
+ if constexpr (is_int_()) {
67
+ return ::std::to_string(::std::get<int64_t>(value_));
68
+ } else {
69
+ return ::std::get<bool>(value_) ? "true" : "false";
70
+ }
71
+ }
72
+ c10::optional<int64_t> constant_int() override {
73
+ if constexpr (is_int_()) {
74
+ return ::std::get<int64_t>(value_);
75
+ } else {
76
+ return c10::nullopt;
77
+ }
78
+ }
79
+ c10::optional<bool> constant_bool() override {
80
+ if constexpr (is_bool_()) {
81
+ return ::std::get<bool>(value_);
82
+ } else {
83
+ return c10::nullopt;
84
+ }
85
+ }
86
+ bool is_constant() override {
87
+ return true;
88
+ }
89
+ bool is_symbolic() override {
90
+ return false;
91
+ }
92
+
93
+ private:
94
+ ::std::variant<int64_t, bool> value_;
95
+
96
+ static constexpr bool is_int_() {
97
+ return ::std::is_same_v<T, int64_t>;
98
+ }
99
+ static constexpr bool is_bool_() {
100
+ return ::std::is_same_v<T, bool>;
101
+ }
102
+ };
103
+
104
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <cstddef>
8
+
9
+ namespace c10 {
10
+
11
+ using CopyBytesFunction = void (*)(
12
+ size_t nbytes,
13
+ const void* src,
14
+ Device src_device,
15
+ void* dst,
16
+ Device dst_device);
17
+
18
+ struct C10_API _CopyBytesFunctionRegisterer {
19
+ _CopyBytesFunctionRegisterer(
20
+ DeviceType from,
21
+ DeviceType to,
22
+ CopyBytesFunction func_sync,
23
+ CopyBytesFunction func_async = nullptr);
24
+ };
25
+
26
+ #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \
27
+ namespace { \
28
+ static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \
29
+ g_copy_function)(from, to, __VA_ARGS__); \
30
+ }
31
+
32
+ /*
33
+ * WARNING: Implementations for this function are currently registered from
34
+ * ATen and caffe2, not yet from c10. Don't use this if not either ATen
35
+ * or caffe2 is present as well.
36
+ * We can't move them yet, because the CUDA implementations aren't unified yet
37
+ * between ATen and caffe2.
38
+ * We're planning to move the implementations into c10/backend/xxx
39
+ * to make c10 self contained again.
40
+ */
41
+ C10_API void CopyBytes(
42
+ size_t nbytes,
43
+ const void* src,
44
+ Device src_device,
45
+ void* dst,
46
+ Device dst_device,
47
+ bool async);
48
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace caffe2 {
7
+ class TypeMeta;
8
+ } // namespace caffe2
9
+
10
+ namespace c10 {
11
+ C10_API void set_default_dtype(caffe2::TypeMeta dtype);
12
+ C10_API const caffe2::TypeMeta get_default_dtype();
13
+ C10_API ScalarType get_default_dtype_as_scalartype();
14
+ C10_API const caffe2::TypeMeta get_default_complex_dtype();
15
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Device.h ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <iosfwd>
11
+ #include <string>
12
+
13
+ namespace c10 {
14
+
15
+ /// An index representing a specific device; e.g., the 1 in GPU 1.
16
+ /// A DeviceIndex is not independently meaningful without knowing
17
+ /// the DeviceType it is associated; try to use Device rather than
18
+ /// DeviceIndex directly.
19
+ using DeviceIndex = int8_t;
20
+
21
+ /// Represents a compute device on which a tensor is located. A device is
22
+ /// uniquely identified by a type, which specifies the type of machine it is
23
+ /// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
24
+ /// specific compute device when there is more than one of a certain type. The
25
+ /// device index is optional, and in its defaulted state represents (abstractly)
26
+ /// "the current device". Further, there are two constraints on the value of the
27
+ /// device index, if one is explicitly stored:
28
+ /// 1. A negative index represents the current device, a non-negative index
29
+ /// represents a specific, concrete device,
30
+ /// 2. When the device type is CPU, the device index must be zero.
31
+ struct C10_API Device final {
32
+ using Type = DeviceType;
33
+
34
+ /// Constructs a new `Device` from a `DeviceType` and an optional device
35
+ /// index.
36
+ /* implicit */ Device(DeviceType type, DeviceIndex index = -1)
37
+ : type_(type), index_(index) {
38
+ validate();
39
+ }
40
+
41
+ /// Constructs a `Device` from a string description, for convenience.
42
+ /// The string supplied must follow the following schema:
43
+ /// `(cpu|cuda)[:<device-index>]`
44
+ /// where `cpu` or `cuda` specifies the device type, and
45
+ /// `:<device-index>` optionally specifies a device index.
46
+ /* implicit */ Device(const std::string& device_string);
47
+
48
+ /// Returns true if the type and index of this `Device` matches that of
49
+ /// `other`.
50
+ bool operator==(const Device& other) const noexcept {
51
+ return this->type_ == other.type_ && this->index_ == other.index_;
52
+ }
53
+
54
+ /// Returns true if the type or index of this `Device` differs from that of
55
+ /// `other`.
56
+ bool operator!=(const Device& other) const noexcept {
57
+ return !(*this == other);
58
+ }
59
+
60
+ /// Sets the device index.
61
+ void set_index(DeviceIndex index) {
62
+ index_ = index;
63
+ }
64
+
65
+ /// Returns the type of device this is.
66
+ DeviceType type() const noexcept {
67
+ return type_;
68
+ }
69
+
70
+ /// Returns the optional index.
71
+ DeviceIndex index() const noexcept {
72
+ return index_;
73
+ }
74
+
75
+ /// Returns true if the device has a non-default index.
76
+ bool has_index() const noexcept {
77
+ return index_ != -1;
78
+ }
79
+
80
+ /// Return true if the device is of CUDA type.
81
+ bool is_cuda() const noexcept {
82
+ return type_ == DeviceType::CUDA;
83
+ }
84
+
85
+ /// Return true if the device is of PrivateUse1 type.
86
+ bool is_privateuseone() const noexcept {
87
+ return type_ == DeviceType::PrivateUse1;
88
+ }
89
+
90
+ /// Return true if the device is of MPS type.
91
+ bool is_mps() const noexcept {
92
+ return type_ == DeviceType::MPS;
93
+ }
94
+
95
+ /// Return true if the device is of HIP type.
96
+ bool is_hip() const noexcept {
97
+ return type_ == DeviceType::HIP;
98
+ }
99
+
100
+ /// Return true if the device is of VE type.
101
+ bool is_ve() const noexcept {
102
+ return type_ == DeviceType::VE;
103
+ }
104
+
105
+ /// Return true if the device is of XPU type.
106
+ bool is_xpu() const noexcept {
107
+ return type_ == DeviceType::XPU;
108
+ }
109
+
110
+ /// Return true if the device is of IPU type.
111
+ bool is_ipu() const noexcept {
112
+ return type_ == DeviceType::IPU;
113
+ }
114
+
115
+ /// Return true if the device is of XLA type.
116
+ bool is_xla() const noexcept {
117
+ return type_ == DeviceType::XLA;
118
+ }
119
+
120
+ /// Return true if the device is of MTIA type.
121
+ bool is_mtia() const noexcept {
122
+ return type_ == DeviceType::MTIA;
123
+ }
124
+
125
+ /// Return true if the device is of HPU type.
126
+ bool is_hpu() const noexcept {
127
+ return type_ == DeviceType::HPU;
128
+ }
129
+
130
+ /// Return true if the device is of Lazy type.
131
+ bool is_lazy() const noexcept {
132
+ return type_ == DeviceType::Lazy;
133
+ }
134
+
135
+ /// Return true if the device is of Vulkan type.
136
+ bool is_vulkan() const noexcept {
137
+ return type_ == DeviceType::Vulkan;
138
+ }
139
+
140
+ /// Return true if the device is of Metal type.
141
+ bool is_metal() const noexcept {
142
+ return type_ == DeviceType::Metal;
143
+ }
144
+
145
+ /// Return true if the device is of ORT type.
146
+ bool is_ort() const noexcept {
147
+ return type_ == DeviceType::ORT;
148
+ }
149
+
150
+ /// Return true if the device is of META type.
151
+ bool is_meta() const noexcept {
152
+ return type_ == DeviceType::Meta;
153
+ }
154
+
155
+ /// Return true if the device is of CPU type.
156
+ bool is_cpu() const noexcept {
157
+ return type_ == DeviceType::CPU;
158
+ }
159
+
160
+ /// Return true if the device supports arbitrary strides.
161
+ bool supports_as_strided() const noexcept {
162
+ return type_ != DeviceType::IPU && type_ != DeviceType::XLA &&
163
+ type_ != DeviceType::Lazy && type_ != DeviceType::MTIA;
164
+ }
165
+
166
+ /// Same string as returned from operator<<.
167
+ std::string str() const;
168
+
169
+ private:
170
+ DeviceType type_;
171
+ DeviceIndex index_ = -1;
172
+ void validate() {
173
+ // Removing these checks in release builds noticeably improves
174
+ // performance in micro-benchmarks.
175
+ // This is safe to do, because backends that use the DeviceIndex
176
+ // have a later check when we actually try to switch to that device.
177
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
178
+ index_ >= -1,
179
+ "Device index must be -1 or non-negative, got ",
180
+ static_cast<int>(index_));
181
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
182
+ !is_cpu() || index_ <= 0,
183
+ "CPU device index must be -1 or zero, got ",
184
+ static_cast<int>(index_));
185
+ }
186
+ };
187
+
188
+ C10_API std::ostream& operator<<(std::ostream& stream, const Device& device);
189
+
190
+ } // namespace c10
191
+
192
+ namespace std {
193
+ template <>
194
+ struct hash<c10::Device> {
195
+ size_t operator()(c10::Device d) const noexcept {
196
+ // Are you here because this static assert failed? Make sure you ensure
197
+ // that the bitmasking code below is updated accordingly!
198
+ static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit");
199
+ static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit");
200
+ // Note [Hazard when concatenating signed integers]
201
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
202
+ // We must first convert to a same-sized unsigned type, before promoting to
203
+ // the result type, to prevent sign extension when any of the values is -1.
204
+ // If sign extension occurs, you'll clobber all of the values in the MSB
205
+ // half of the resulting integer.
206
+ //
207
+ // Technically, by C/C++ integer promotion rules, we only need one of the
208
+ // uint32_t casts to the result type, but we put in both for explicitness's
209
+ // sake.
210
+ uint32_t bits = static_cast<uint32_t>(static_cast<uint8_t>(d.type()))
211
+ << 16 |
212
+ static_cast<uint32_t>(static_cast<uint8_t>(d.index()));
213
+ return std::hash<uint32_t>{}(bits);
214
+ }
215
+ };
216
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/core/Allocator.h>
2
+ #include <c10/util/Exception.h>
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+
7
+ namespace c10 {
8
+
9
+ template <typename T>
10
+ class DeviceArray {
11
+ public:
12
+ DeviceArray(c10::Allocator& allocator, size_t size)
13
+ : data_ptr_(allocator.allocate(size * sizeof(T))) {
14
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
15
+ TORCH_INTERNAL_ASSERT(
16
+ 0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
17
+ "c10::DeviceArray: Allocated memory is not aligned for this data type");
18
+ }
19
+
20
+ T* get() {
21
+ return static_cast<T*>(data_ptr_.get());
22
+ }
23
+
24
+ private:
25
+ c10::DataPtr data_ptr_;
26
+ };
27
+
28
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h ADDED
@@ -0,0 +1,748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+ #include <functional>
8
+ #include <ostream>
9
+ #include <string>
10
+
11
+ namespace c10 {
12
+
13
+ // Semantically, each value of BackendComponent identifies a "backend" for our
14
+ // dispatch. Some functionalities that we may dispatch to are allowed to
15
+ // register different handlers for each backend. The BackendComponent is then
16
+ // used to figure out which backend implementation to dispatch to.
17
+
18
+ // In implementation terms, the backend component identifies a specific "bit" in
19
+ // a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom
20
+ // ~12 "BackendComponent" bits, while the remaining upper bits are assigned to
21
+ // functionalities. When we encounter a functionality bit that is known to be
22
+ // customizable per-backend, then we also look at the lower BackendComponent
23
+ // bits and take the highest bit to determine which backend's implementation to
24
+ // use.
25
+
26
+ // WARNING! If you add a new backend component to the end of this list,
27
+ // make sure you register it before Meta.
28
+ // Meta must be at the end so that meta key in tls triggers meta kernels.
29
+ // (But you shouldn't: private use keys should have higher precedence than all
30
+ // built-in keys)
31
+
32
+ // If you add a new (non-privateuse) backend here,
33
+ // make sure to add an Autograd<Backend> fallthrough kernel
34
+ // in aten/src/ATen/core/VariableFallbackKernel.cpp
35
+
36
+ #define C10_FORALL_BACKEND_COMPONENTS(_, extra) \
37
+ _(CPU, extra) \
38
+ _(CUDA, extra) \
39
+ _(HIP, extra) \
40
+ _(XLA, extra) \
41
+ _(MPS, extra) \
42
+ _(IPU, extra) \
43
+ _(XPU, extra) \
44
+ _(HPU, extra) \
45
+ _(VE, extra) \
46
+ _(Lazy, extra) \
47
+ _(MTIA, extra) \
48
+ _(PrivateUse1, extra) \
49
+ _(PrivateUse2, extra) \
50
+ _(PrivateUse3, extra) \
51
+ _(Meta, extra)
52
+
53
+ // WARNING! If we add a new per-backend functionality key that has higher
54
+ // priority than Autograd, then make sure you update EndOfRuntimeBackendKeys
55
+
56
+ #define C10_FORALL_FUNCTIONALITY_KEYS(_) \
57
+ _(Dense, ) \
58
+ _(Quantized, Quantized) \
59
+ _(Sparse, Sparse) \
60
+ _(SparseCsr, SparseCsr) \
61
+ _(NestedTensor, NestedTensor) \
62
+ _(AutogradFunctionality, Autograd)
63
+
64
+ enum class BackendComponent : uint8_t {
65
+
66
+ // A "backend" is colloquially used to refer to handlers for dispatch
67
+ // which actually implement the numerics of an operation in question.
68
+ //
69
+ // Due to the nature of the enum, these backends are specified in
70
+ // an ordered way, but for most backends this order is not semantically
71
+ // meaningful (e.g., it's valid to reorder these backends without changing
72
+ // semantics). The only situation when backend ordering is meaningful
73
+ // is when the backend participates in multiple dispatch with another
74
+ // backend; e.g., CPU and CUDA (cuda must have higher priority).
75
+
76
+ // These keys don't correspond to individual kernels.
77
+ // Instead, they represent the backends that are allowed to override specific
78
+ // pieces of functionality:
79
+ // - dense kernels (e.g. DispatchKey::CPU)
80
+ // - sparse kernels (e.g. DispatchKey::SparseCPU)
81
+ // - quantized kernels (e.g. DispatchKey::QuantizedCPU)
82
+ // - autograd kernels (e.g. DispatchKey::AutogradCPU)
83
+ // We reserve space in the runtime operator table for this full cross product
84
+ // of
85
+ // [backends in this enum] x [keys below that are explicitly marked as having
86
+ // per-backend functionality]
87
+ //
88
+ // A meta tensor is a tensor without any data associated with it. (They
89
+ // have also colloquially been referred to as tensors on the "null" device).
90
+ // A meta tensor can be used to dry run operators without actually doing any
91
+ // computation, e.g., add on two meta tensors would give you another meta
92
+ // tensor with the output shape and dtype, but wouldn't actually add anything.
93
+
94
+ InvalidBit = 0,
95
+ #define DEFINE_BACKEND_COMPONENT(n, _) n##Bit,
96
+ C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused)
97
+ #undef DEFINE_BACKEND_COMPONENT
98
+
99
+ // Define an alias to represent end of backend dispatch keys.
100
+ // If you add new backend keys after PrivateUse3, please also update it here.
101
+ EndOfBackendKeys = MetaBit,
102
+ };
103
+
104
+ // Semantically, a dispatch key identifies a possible "level" in our
105
+ // dispatch, for which a handler may be registered. Each handler corresponds
106
+ // to a type of functionality.
107
+ //
108
+ // In implementation terms, the dispatch key identifies a specific "bit" in a
109
+ // DispatchKeySet. Higher bit indexes get handled by dispatching first (because
110
+ // we "count leading zeros" when we extract the highest priority dispatch
111
+ // key.)
112
+ //
113
+ // Note [DispatchKey Classification]
114
+ // This enum actually contains several types of keys, which are explained
115
+ // in more detail further down:
116
+ // (1) non-customizable backends (e.g. FPGA)
117
+ // (2) non-customizable functionalities (e.g. Functionalize)
118
+ // (3) functionalized that are customizable per backend (e.g. Dense, Sparse,
119
+ // AutogradFunctionality) (4) per-backend instances of customizable
120
+ // functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g.
121
+ // CompositeImplicitAutograd)
122
+ //
123
+ // Of the categories above, it's important to note:
124
+ // (a) which keys are assigned individual bits in a DispatchKeySet
125
+ // (b) which keys are assigned individual slots in the runtime operator table
126
+ // ("Runtime keys")
127
+ //
128
+ // (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet.
129
+ // (1), (2) and (4) all get their own dedicated slots in the runtime operator
130
+ // table.
131
+
132
+ // See Note [DispatchKeySet Internal Representation] for more details.
133
+ //
134
+ // NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py
135
+ enum class DispatchKey : uint16_t {
136
+
137
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
138
+ // This is not a "real" functionality, but it exists to give us a "nullopt"
139
+ // element we can return for cases when a DispatchKeySet contains no elements.
140
+ // You can think a more semantically accurate definition of DispatchKey is:
141
+ //
142
+ // using DispatchKey = optional<RealDispatchKey>
143
+ //
144
+ // and Undefined == nullopt. We didn't actually represent
145
+ // it this way because optional<RealDispatchKey> would take two
146
+ // words, when DispatchKey fits in eight bits.
147
+
148
+ Undefined = 0,
149
+
150
+ // Define an alias for Undefined to represent CatchAll (long term
151
+ // this will get eliminated, but for now it's convenient)
152
+ CatchAll = Undefined,
153
+
154
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ //
155
+ // Every value in the enum (up to EndOfFunctionalityKeys)
156
+ // corresponds to an individual "functionality" that can be dispatched to.
157
+ // This is represented in the DispatchKeySet by assigning each of these enum
158
+ // values
159
+ // to each of the remaining (64 - len(BackendComponent)) bits.
160
+ //
161
+ // Most of these functionalities have a single handler assigned to them,
162
+ // making them "runtime keys".
163
+ // That map to a single slot in the runtime operator table.
164
+ //
165
+ // A few functionalities are allowed to be customizable per backend.
166
+ // See [Note: Per-Backend Functionality Dispatch Keys] for details.
167
+
168
+ // See [Note: Per-Backend Functionality Dispatch Keys]
169
+ Dense,
170
+
171
+ // Below are non-extensible backends.
172
+ // These are backends that currently don't have their own overrides for
173
+ // Autograd/Sparse/Quantized kernels,
174
+ // and we therefore don't waste space in the runtime operator table allocating
175
+ // space for them.
176
+ // If any of these backends ever need to customize, e.g., Autograd, then we'll
177
+ // need to add a DispatchKey::*Bit for them.
178
+
179
+ // TODO: put this in BackendComponents
180
+ FPGA, // Xilinx support lives out of tree at
181
+ // https://gitlab.com/pytorch-complex/vitis_kernels
182
+
183
+ // TODO: put this in BackendComponents
184
+ // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and
185
+ // https://github.com/microsoft/onnxruntime, and is also used to test general
186
+ // backend/extension machinery in the core. cf:
187
+ // - test/cpp_extensions/ort_extension.cpp
188
+ // - test/test_torch.py
189
+ // - aten/src/ATen/test/extension_backend_test.cpp
190
+ ORT,
191
+
192
+ Vulkan, // TODO: put this in BackendComponents
193
+ Metal, // TODO: put this in BackendComponents
194
+
195
+ // See [Note: Per-Backend Functionality Dispatch Keys]
196
+ Quantized,
197
+
198
+ // This backend is to support custom RNGs; it lets you go
199
+ // to a different kernel if you pass in a generator that is not a
200
+ // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this
201
+ // key:
202
+ // 1) set it as a second parameter of at::Generator constructor call in
203
+ // the user-defined PRNG class.
204
+ // 2) use it as a dispatch key while registering custom kernels
205
+ // (templatized kernels specialized for user-defined PRNG class)
206
+ // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp
207
+ CustomRNGKeyId,
208
+
209
+ // TODO: Make Mkldnn a functionality key, so we can give it Meta
210
+ // support
211
+ // Here are backends which specify more specialized operators
212
+ // based on the layout of the tensor. Note that the sparse backends
213
+ // are one case where ordering matters: sparse multi-dispatches with
214
+ // the corresponding dense tensors, and must be handled before them.
215
+ MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp
216
+ // NB: not to be confused with MKLDNN, which is Caffe2 only
217
+
218
+ // See [Note: Per-Backend Functionality Dispatch Keys]
219
+ Sparse,
220
+
221
+ SparseCsr,
222
+
223
+ NestedTensor,
224
+
225
+ // In some situations, it is not immediately obvious what the correct
226
+ // backend for function is, because the function in question doesn't
227
+ // have any "tensor" arguments. In this case, a BackendSelect function
228
+ // can be registered to implement the custom determination of the
229
+ // correct backend.
230
+ BackendSelect,
231
+
232
+ Python,
233
+
234
+ // Out-of-core key for Fake Tensor in torchdistx.
235
+ // See https://pytorch.org/torchdistx/latest/fake_tensor.html
236
+ // TODO: delete this in favor of Python-implemented fake tensor
237
+ Fake,
238
+ // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key
239
+ // is to insert code after the "autograd subsystem" runs, so this key should
240
+ // be directly after ADInplaceOrView and all of the autograd keys.
241
+ FuncTorchDynamicLayerBackMode,
242
+
243
+ // Alias and mutation removal.
244
+ // If some backends want to opt into only alias removal or only mutation
245
+ // removal,
246
+ // we can consider adding separate keys dedicated to those individual passes.
247
+ // See Note [Functionalization Pass In Core] for details.
248
+ Functionalize,
249
+
250
+ // The named dispatch key is set for any tensors with named dimensions.
251
+ // Although we have a dispatch key for named tensors, for historical reasons,
252
+ // this dispatch key doesn't do any of the substantive functionality for named
253
+ // tensor (though, hypothetically, it could!) At the moment, it's just
254
+ // responsible for letting us give good error messages when operations
255
+ // don't support named tensors.
256
+ //
257
+ // NB: If you ever consider moving named tensor functionality into
258
+ // this dispatch key, note that it might be necessary add another dispatch
259
+ // key that triggers before composite operators, in case a composite operator
260
+ // has named dimension propagation that doesn't match that of its
261
+ // constituent parts.
262
+ // TODO: delete this once torchdim lands in functorch
263
+ Named,
264
+
265
+ // The Conjugate dispatch key is set for any tensors that need to perform
266
+ // conjugation
267
+ // This is implemented at a dispatch level right before any backends run
268
+ Conjugate,
269
+
270
+ // The Negative dispatch key is set for any tensors that need to perform
271
+ // negation
272
+ // This is implemented at a dispatch level right before any backends run
273
+ Negative,
274
+
275
+ ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp
276
+
277
+ // Note [ADInplaceOrView key]
278
+ // ADInplaceOrView key is used by inplace or view ops to register a kernel
279
+ // that does additional setup for future autograd computation.
280
+ //
281
+ // 1. For inplace ops this kernel does version bump
282
+ // 2. For view ops this kernel does `as_view` setup where we properly setup
283
+ // DifferentiableViewMeta on the view tensors.
284
+ //
285
+ // For other ops it's fallthrough kernel since there's no extra
286
+ // work to do.
287
+ //
288
+ // Note [Dream: skip VariableType kernel when requires_grad=false]
289
+ //
290
+ // In an ideal world where we can skip VariableType kernel for inputs
291
+ // with requires_grad=false, instead of a fallthrough kernel, we'll
292
+ // register a kernel shown below to all functional ops as well:
293
+ // torch::Tensor my_functional_op(...) {
294
+ // {
295
+ // // Note for every op in VariableType, you need to go through
296
+ // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the
297
+ // // key to TLS excluded set. If you don't go through it at all,
298
+ // // inplace/view ops called through `at::` inside your backend
299
+ // // kernel will dispatch to ADInplaceOrView kernels and do a lot
300
+ // // of extra work.
301
+ // at::AutoDispatchBelowADInplaceOrView guard;
302
+ // at::redispatch::my_functional_op(...);
303
+ // }
304
+ // }
305
+ // But this work is currently blocked since it adds an extra dispatch
306
+ // for all ops and it's non-trivial overhead at model level(a few percents).
307
+ // Thus our current approach takes advantage of the fact every kernel go
308
+ // through VariableType kernel first and pulls the
309
+ // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops
310
+ // up to the `VariableType` kernel. Thus we only add the extra dispatch
311
+ // to view/inplace ops to minimize its perf impact to real models.
312
+ ADInplaceOrView,
313
+ // Note [Alias Dispatch Key : Autograd]
314
+ // All backends are oblivious to autograd; autograd is handled as a
315
+ // layer which happens on top of all backends. It inspects the autograd
316
+ // metadata of all inputs, determines what autograd metadata should be
317
+ // constructed by the output, and otherwise defers to the backend to
318
+ // actually do the numeric computation. Autograd contains
319
+ // the bulk of this logic.
320
+
321
+ // Autograd is now an alias dispatch key which by default maps to all
322
+ // backend-specific autograd keys.
323
+ // Backend-specific allow backends to override the default kernel registered
324
+ // to Autograd key as needed.
325
+ // For example, XLA wants to define autograd for einsum directly.
326
+ // Registering a custom autograd implementation at the XLA key won't work
327
+ // because we process Autograd before XLA. This key has higher priority and
328
+ // gets processed first. You generally should NOT redispatch after handling
329
+ // autograd here (since that would result in execution of the Autograd
330
+ // operator, which you're trying to skip). In AutogradXLA implementations,
331
+ // you are responsible for handling autograd yourself, or deferring to other
332
+ // operators which support autograd.
333
+
334
+ // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and
335
+ // reserved user-defined backends. All other in-tree backends share the
336
+ // AutogradOther key. We can add specific autograd key for those backends
337
+ // upon request.
338
+ AutogradOther,
339
+
340
+ // See [Note: Per-Backend Functionality Dispatch Keys]
341
+ AutogradFunctionality,
342
+
343
+ // NestedTensor is an example of something that isn't a "real backend"
344
+ // (because it mostly consists of redispatching kernels)
345
+ // but it would like to override autograd functionality in C++.
346
+ // We can handle cases like this by adding an extra functionality key
347
+ // exclusively for handling autograd for NestedTensor.
348
+ // lives out of tree at
349
+ // https://github.com/pytorch/nestedtensor
350
+ AutogradNestedTensor,
351
+
352
+ Tracer,
353
+
354
+ // TODO: make Autocast a functionality key
355
+ // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed
356
+ // and inputs are saved for backward in the post-autocast type.
357
+ AutocastCPU,
358
+ AutocastXPU,
359
+ AutocastIPU,
360
+ AutocastHPU,
361
+ AutocastXLA,
362
+ // AutocastXLA is only being used for TPUs. XLA GPUs continue to use
363
+ // AutocastCUDA.
364
+ AutocastCUDA,
365
+ AutocastPrivateUse1,
366
+
367
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
368
+ // There are a number of alternative modes which may want to handle before
369
+ // autograd; for example, error checking, tracing, profiling or vmap. They
370
+ // go here.
371
+
372
+ FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype]
373
+
374
+ // Dispatch key for BatchedTensorImpl wrapping a nested tensor.
375
+ BatchedNestedTensor,
376
+
377
+ FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype]
378
+
379
+ // This is the dispatch key for BatchedTensorImpl, which is used to implement
380
+ // batching rules for vmap.
381
+ Batched,
382
+
383
+ // When we are inside a vmap, all tensors dispatch on this key.
384
+ // See Note: [DispatchKey::VmapMode usage] for more details.
385
+ VmapMode,
386
+
387
+ FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype]
388
+
389
+ // Out-of-core key for Deferred Module Initialization in torchdistx.
390
+ // See https://pytorch.org/torchdistx/latest/deferred_init.html
391
+ DeferredInit,
392
+
393
+ // Used by Python key logic to know the set of tls on entry to the dispatcher
394
+ // This kernel assumes it is the top-most non-functorch-related DispatchKey.
395
+ // If you add a key above, make sure to update the fallback implementation for
396
+ // this.
397
+ PythonTLSSnapshot,
398
+
399
+ // This key should be at the very top of the dispatcher
400
+ FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype]
401
+
402
+ // TESTING: This is intended to be a generic testing tensor type id.
403
+ // Don't use it for anything real; its only acceptable use is within a single
404
+ // process test. Use it by creating a TensorImpl with this DispatchKey, and
405
+ // then registering operators to operate on this type id. See
406
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example.
407
+ TESTING_ONLY_GenericWrapper,
408
+
409
+ // TESTING: This is intended to be a generic testing tensor type id.
410
+ // Don't use it for anything real; its only acceptable use is within a ingle
411
+ // process test. Use it by toggling the mode on and off via
412
+ // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators
413
+ // to operate on this type id. See
414
+ // aten/src/ATen/core/dispatch/backend_fallback_test.cpp
415
+ // for a usage example
416
+ TESTING_ONLY_GenericMode,
417
+
418
+ // This key is used for pre-dispatch tracing in make_fx.
419
+ // It has lower priority than the PythonDispatcher key
420
+ // because we use the PythonDispatcher to intercept the key from python,
421
+ // and avoid having to implement it in C++.
422
+ PreDispatch,
423
+
424
+ // This is a bypass that allows you to skip running the C++ dispatcher
425
+ // entirely
426
+ PythonDispatcher,
427
+
428
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
429
+ EndOfFunctionalityKeys, // End of functionality keys.
430
+
431
+ // ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ //
432
+ // Here are backends which you think of as traditionally specifying
433
+ // how to implement operations on some device.
434
+
435
+ #define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n,
436
+
437
+ #define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \
438
+ StartOf##fullname##Backends, \
439
+ C10_FORALL_BACKEND_COMPONENTS( \
440
+ DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \
441
+ EndOf##fullname##Backends = prefix##Meta,
442
+
443
+ C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS)
444
+
445
+ #undef DEFINE_PER_BACKEND_KEYS
446
+ #undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND
447
+
448
+ EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends,
449
+
450
+ // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ //
451
+ // Note [Alias Dispatch Keys]
452
+ // Alias dispatch keys are synthetic dispatch keys which map to multiple
453
+ // runtime dispatch keys. Alisa keys have precedence, but they are always
454
+ // lower precedence than runtime keys. You can register a kernel to an
455
+ // alias key, the kernel might be populated to the mapped runtime keys
456
+ // during dispatch table computation.
457
+ // If a runtime dispatch key has multiple kernels from alias keys, which
458
+ // kernel wins is done based on the precedence of alias keys (but runtime
459
+ // keys always have precedence over alias keys).
460
+ // Alias keys won't be directly called during runtime.
461
+
462
+ // See Note [Alias Dispatch Key : Autograd]
463
+ Autograd,
464
+ CompositeImplicitAutograd, // registered at
465
+ // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp
466
+
467
+ // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from
468
+ // all
469
+ // other alias keysets
470
+ // and so precedence order doesn't matter
471
+ FuncTorchBatchedDecomposition, // registered at
472
+ // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp
473
+ // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is
474
+ // disjoint from all other alias keysets
475
+ CompositeImplicitAutogradNestedTensor, // registered at
476
+ // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp
477
+ CompositeExplicitAutograd, // registered at
478
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
479
+ // See Note [CompositeExplicitAutogradNonFunctional Key]
480
+ CompositeExplicitAutogradNonFunctional, // registered at
481
+ // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp
482
+
483
+ // Define an alias key to represent end of alias dispatch keys.
484
+ // If you add new alias keys after Autograd, please also update it here.
485
+ StartOfAliasKeys = Autograd,
486
+ EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, //
487
+
488
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
489
+ // The aliases exist for backwards compatibility reasons, they shouldn't
490
+ // be used
491
+ CPUTensorId = CPU,
492
+ CUDATensorId = CUDA,
493
+ DefaultBackend = CompositeExplicitAutograd,
494
+ PrivateUse1_PreAutograd = AutogradPrivateUse1,
495
+ PrivateUse2_PreAutograd = AutogradPrivateUse2,
496
+ PrivateUse3_PreAutograd = AutogradPrivateUse3,
497
+ Autocast = AutocastCUDA,
498
+ };
499
+
500
+ // Note [Private use DispatchKey]
501
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
502
+ // Private use tensor IDs are preallocated tensor type IDs for use in user
503
+ // applications. Similar to private use fields in HTTP, they can be used
504
+ // by end users for experimental or private applications, without needing
505
+ // to "standardize" the tensor ID (which would be done by submitting a PR
506
+ // to PyTorch to add your type ID).
507
+ //
508
+ // Private use tensor IDs are appropriate to use if you want to experiment
509
+ // with adding a new tensor type (without having to patch PyTorch first) or
510
+ // have a private, non-distributed application that needs to make use of a
511
+ // new tensor type. Private use tensor IDs are NOT appropriate to use for
512
+ // libraries intended to be distributed to further users: please contact
513
+ // the PyTorch developers to get a type ID registered in this case.
514
+ //
515
+ // We provide two classes of private user tensor id: regular DispatchKeys
516
+ // and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend"
517
+ // DispatchKeys; if you were adding support for a new type of accelerator, you
518
+ // would use a backend DispatchKey, and ideally automatically reuse
519
+ // AutogradOther definitions already defined in PyTorch. AutogradPrivateUse
520
+ // DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for
521
+ // tensors that compose multiple internal tensors, and for cases when the
522
+ // built-in autograd formulas for operators are not appropriate.
523
+
524
+ static_assert(
525
+ (static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) +
526
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys)) <= 64,
527
+ "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)"
528
+ " both map to backend and functionality bits"
529
+ " into a 64-bit bitmask; you must have less than 64 total entries between them");
530
+
531
+ // Check if a DispatchKey is an alias mapping to other runtime keys.
532
+ constexpr bool isAliasDispatchKey(DispatchKey k) {
533
+ return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys;
534
+ }
535
+
536
+ // [Note: Per-Backend Functionality Dispatch Keys]
537
+ // Check if a DispatchKey is a per-backend functionality key
538
+ // Any functionalities that can be customized per-backend should be added here.
539
+ // These keys correspond to functionalities that can be customized individually
540
+ // per backend. While they only take up one bit in the `DispatchKeySet` bitset,
541
+ // they map to (# backends) slots in the operator table.
542
+ // Each of these keys also has a separate set of "runtime keys" in the dispatch
543
+ // key enum, per backend, which *do* map to the individual operator table slots.
544
+ // For example, the "Sparse" key maps to an individual bit in the
545
+ // DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual
546
+ // slots in the runtime operator table.
547
+
548
+ constexpr bool isPerBackendFunctionalityKey(DispatchKey k) {
549
+ if (k == DispatchKey::Dense || k == DispatchKey::Quantized ||
550
+ k == DispatchKey::Sparse || k == DispatchKey::SparseCsr ||
551
+ k == DispatchKey::AutogradFunctionality ||
552
+ k == DispatchKey::NestedTensor) {
553
+ return true;
554
+ } else {
555
+ return false;
556
+ }
557
+ }
558
+
559
+ // Note that this includes Undefined in the total count.
560
+ // BUT EndOfFunctionalityKeys is its own (placeholder) key.
561
+ // e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3.
562
+ // In the above example, there are 3 total functionality keys.
563
+ constexpr uint8_t num_functionality_keys =
564
+ static_cast<uint8_t>(DispatchKey::EndOfFunctionalityKeys);
565
+
566
+ constexpr uint8_t num_backends =
567
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys);
568
+
569
+ // Note [No More Than 16 Backends]
570
+ // Search for this note to find places in the code where the "no more than 16
571
+ // backends" invariant is baked in.
572
+ static_assert(
573
+ static_cast<uint8_t>(BackendComponent::EndOfBackendKeys) <= 16,
574
+ "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \
575
+ there are a few places where this invariant is baked in");
576
+
577
+ constexpr uint8_t numPerBackendFunctionalityKeys() {
578
+ uint8_t count = 0;
579
+ for (uint8_t k = 0; k <= num_functionality_keys; ++k) {
580
+ if (isPerBackendFunctionalityKey(static_cast<DispatchKey>(k)))
581
+ ++count;
582
+ }
583
+ return count;
584
+ }
585
+
586
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
587
+ // See [Note: Trimmed Mobile Dispatch Keys]
588
+ constexpr uint16_t num_runtime_entries = 8;
589
+ #else
590
+ constexpr uint16_t num_runtime_entries = num_functionality_keys +
591
+ (numPerBackendFunctionalityKeys() * (num_backends - 1));
592
+ #endif
593
+
594
+ // See Note [No More Than 16 Backends]
595
+ constexpr uint16_t full_backend_mask =
596
+ (static_cast<uint16_t>(1) << num_backends) - 1;
597
+
598
+ C10_API const char* toString(DispatchKey);
599
+ C10_API const char* toString(BackendComponent);
600
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
601
+ C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
602
+
603
+ C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
604
+
605
+ // Parses a string into a dispatch key.
606
+ // If the string cannot be correctly parsed, throws an exception.
607
+ C10_API c10::DispatchKey parseDispatchKey(const std::string& k);
608
+
609
+ // These are some convenience identifiers for dispatch keys which are
610
+ // shorter to type than their long counterparts. Note that some of these
611
+ // dispatch keys directly correspond to DeviceType; and most APIs that
612
+ // accept DispatchKey also accept DeviceType; e.g.,
613
+ // torch::dispatch(torch::kCPU, ...) is also valid.
614
+ constexpr DispatchKey kAutograd = DispatchKey::Autograd;
615
+
616
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
617
+ // This function relies on the invariant that the dispatch keys between
618
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
619
+ // in the same order as `BackendComponent`.
620
+ constexpr BackendComponent toBackendComponent(DispatchKey k) {
621
+ if (k >= DispatchKey::StartOfDenseBackends &&
622
+ k <= DispatchKey::EndOfDenseBackends) {
623
+ return static_cast<BackendComponent>(
624
+ static_cast<uint8_t>(k) -
625
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends));
626
+ } else if (
627
+ k >= DispatchKey::StartOfQuantizedBackends &&
628
+ k <= DispatchKey::EndOfQuantizedBackends) {
629
+ return static_cast<BackendComponent>(
630
+ static_cast<uint8_t>(k) -
631
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends));
632
+ } else if (
633
+ k >= DispatchKey::StartOfSparseBackends &&
634
+ k <= DispatchKey::EndOfSparseBackends) {
635
+ return static_cast<BackendComponent>(
636
+ static_cast<uint8_t>(k) -
637
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends));
638
+ } else if (
639
+ k >= DispatchKey::StartOfSparseCsrBackends &&
640
+ k <= DispatchKey::EndOfSparseCsrBackends) {
641
+ return static_cast<BackendComponent>(
642
+ static_cast<uint8_t>(k) -
643
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends));
644
+ } else if (
645
+ k >= DispatchKey::StartOfNestedTensorBackends &&
646
+ k <= DispatchKey::EndOfNestedTensorBackends) {
647
+ return static_cast<BackendComponent>(
648
+ static_cast<uint8_t>(k) -
649
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends));
650
+ } else if (
651
+ k >= DispatchKey::StartOfAutogradFunctionalityBackends &&
652
+ k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
653
+ return static_cast<BackendComponent>(
654
+ static_cast<uint8_t>(k) -
655
+ static_cast<uint8_t>(
656
+ DispatchKey::StartOfAutogradFunctionalityBackends));
657
+ } else {
658
+ return BackendComponent::InvalidBit;
659
+ }
660
+ }
661
+
662
+ constexpr DispatchKey toFunctionalityKey(DispatchKey k) {
663
+ if (k <= DispatchKey::EndOfFunctionalityKeys) {
664
+ return k;
665
+ } else if (k <= DispatchKey::EndOfDenseBackends) {
666
+ return DispatchKey::Dense;
667
+ } else if (k <= DispatchKey::EndOfQuantizedBackends) {
668
+ return DispatchKey::Quantized;
669
+ } else if (k <= DispatchKey::EndOfSparseBackends) {
670
+ return DispatchKey::Sparse;
671
+ } else if (k <= DispatchKey::EndOfSparseCsrBackends) {
672
+ return DispatchKey::SparseCsr;
673
+ } else if (k <= DispatchKey::EndOfNestedTensorBackends) {
674
+ return DispatchKey::NestedTensor;
675
+ } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) {
676
+ return DispatchKey::AutogradFunctionality;
677
+ } else {
678
+ return DispatchKey::Undefined;
679
+ }
680
+ }
681
+
682
+ BackendComponent toBackendComponent(DeviceType device_type);
683
+
684
+ // Given (DispatchKey::Dense, BackendComponent::CUDABit), returns
685
+ // DispatchKey::CUDA.
686
+ // See Note [The Ordering of Per-Backend Dispatch Keys Matters!]
687
+ // This function relies on the invariant that the dispatch keys between
688
+ // StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend
689
+ // in the same order as `BackendComponent`.
690
+ constexpr DispatchKey toRuntimePerBackendFunctionalityKey(
691
+ DispatchKey functionality_k,
692
+ BackendComponent backend_k) {
693
+ if (functionality_k == DispatchKey::Dense) {
694
+ return static_cast<DispatchKey>(
695
+ static_cast<uint8_t>(DispatchKey::StartOfDenseBackends) +
696
+ static_cast<uint8_t>(backend_k));
697
+ }
698
+ if (functionality_k == DispatchKey::Sparse) {
699
+ return static_cast<DispatchKey>(
700
+ static_cast<uint8_t>(DispatchKey::StartOfSparseBackends) +
701
+ static_cast<uint8_t>(backend_k));
702
+ }
703
+ if (functionality_k == DispatchKey::SparseCsr) {
704
+ return static_cast<DispatchKey>(
705
+ static_cast<uint8_t>(DispatchKey::StartOfSparseCsrBackends) +
706
+ static_cast<uint8_t>(backend_k));
707
+ }
708
+ if (functionality_k == DispatchKey::Quantized) {
709
+ return static_cast<DispatchKey>(
710
+ static_cast<uint8_t>(DispatchKey::StartOfQuantizedBackends) +
711
+ static_cast<uint8_t>(backend_k));
712
+ }
713
+ if (functionality_k == DispatchKey::NestedTensor) {
714
+ return static_cast<DispatchKey>(
715
+ static_cast<uint8_t>(DispatchKey::StartOfNestedTensorBackends) +
716
+ static_cast<uint8_t>(backend_k));
717
+ }
718
+ if (functionality_k == DispatchKey::AutogradFunctionality) {
719
+ return static_cast<DispatchKey>(
720
+ static_cast<uint8_t>(
721
+ DispatchKey::StartOfAutogradFunctionalityBackends) +
722
+ static_cast<uint8_t>(backend_k));
723
+ }
724
+ return DispatchKey::Undefined;
725
+ }
726
+
727
+ } // namespace c10
728
+
729
+ namespace torch {
730
+ // Expose the constant, but not the TYPE (DispatchKey is an implementation
731
+ // detail!)
732
+ // NOLINTNEXTLINE(misc-unused-using-decls)
733
+ using c10::kAutograd;
734
+ } // namespace torch
735
+
736
+ // NB: You really shouldn't use this instance; this enum is guaranteed
737
+ // to be pretty small so a regular array should be acceptable.
738
+ namespace std {
739
+ template <>
740
+ struct hash<c10::DispatchKey> {
741
+ typedef size_t result_type;
742
+ typedef c10::DispatchKey argument_type;
743
+
744
+ size_t operator()(c10::DispatchKey x) const {
745
+ return static_cast<size_t>(x);
746
+ }
747
+ };
748
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h ADDED
@@ -0,0 +1,941 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/DispatchKey.h>
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/TypeList.h>
8
+ #include <c10/util/llvmMathExtras.h>
9
+ #include <array>
10
+ #include <cstddef>
11
+ #include <cstdint>
12
+ #include <initializer_list>
13
+ #include <iterator>
14
+ #include <ostream>
15
+ #include <string>
16
+ #include <type_traits>
17
+
18
+ namespace c10 {
19
+
20
+ struct FunctionalityOffsetAndMask {
21
+ // empty constructor shouldn't be used; only needed to initialize
22
+ // the array before populating it.
23
+ FunctionalityOffsetAndMask() = default;
24
+ FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask)
25
+ : offset(offset), mask(mask) {}
26
+ // This needs to big enough to cover the size of the operator table.
27
+ uint16_t offset{};
28
+ // See Note [No More Than 16 Backends]
29
+ // This mask needs to be big enough to mask all of the backend bits.
30
+ // We probably don't ever want to have more than 16 backend bits, so uint16_t
31
+ // should be enough.
32
+ uint16_t mask{};
33
+ };
34
+ static_assert(
35
+ c10::num_runtime_entries < 65536,
36
+ "The dispatcher currently only supports up to 2^16 runtime entries");
37
+
38
+ C10_API std::array<FunctionalityOffsetAndMask, num_functionality_keys>
39
+ initializeFunctionalityOffsetsAndMasks();
40
+
41
+ C10_ALWAYS_INLINE static const std::
42
+ array<FunctionalityOffsetAndMask, num_functionality_keys>&
43
+ offsetsAndMasks() {
44
+ static auto offsets_and_masks_ = initializeFunctionalityOffsetsAndMasks();
45
+ return offsets_and_masks_;
46
+ }
47
+
48
+ // A representation of a set of DispatchKeys. A DispatchKeySet contains both
49
+ // "functionality" bits and "backend bits", and every tensor holds its own
50
+ // DispatchKeySet. The Dispatcher implements multiple dispatch by grabbing the
51
+ // keyset on every input tensor, or’ing them together, and dispatching to a
52
+ // specific piece of functionality. The functionality bits are *ordered*. When
53
+ // multiple functionality bits are set, we use the highest priority
54
+ // functionality. Similarly, multiple backend bits can theoretically be set if
55
+ // you call an operator with multiple tensors from difference devices (e.g. CPU
56
+ // and CUDA), although support for mixed device dispatch is limited (the only
57
+ // kernels that gracefully handle mixed device inputs for now are cuda kernels
58
+ // that take in a scalar cpu tensor).
59
+
60
+ // A representation of a set of DispatchKeys. A tensor may have multiple
61
+ // tensor type ids, e.g., a Variable tensor can also be a CPU tensor; the
62
+ // DispatchKeySet specifies what type ids apply. The internal representation is
63
+ // as a 64-bit bit set (this means only 64 tensor type ids are supported).
64
+ //
65
+ // As mentioned above, DispatchKeys are ordered; thus, we can ask questions like
66
+ // "what is the highest priority DispatchKey in the set"? (The set itself is
67
+ // not ordered; two sets with the same ids will always have the ids ordered in
68
+ // the same way.)
69
+ //
70
+ // Note [DispatchKeySet Internal Representation]
71
+ // Internally, dispatch keys are packed into 64-bit DispatchKeySet objects
72
+ // that get passed around at runtime.
73
+ // However, there isn't necessarily a 1-to-1 mapping between bits in the keyset
74
+ // and individual dispatch keys.
75
+ //
76
+ // First: why do we have this distinction, and why not map every dispatch key
77
+ // directly to a bit? This is mostly because we have several types of
78
+ // functionalities that different backends would like to customize. For example,
79
+ // we have:
80
+ // - "Dense": CPU, CUDA, XLA, ... (~12 keys)
81
+ // - "Sparse": SparseCPU, SparseCUDA, ...
82
+ // - "SparseCsr": SparseCsrCPU, SparseCsrCUDA, ...
83
+ // - "Quantized": QuantizedCPU, QuantizedCUDA, QuantizedXLA, ...
84
+ // - "Autograd": AutogradCPU, AutogradCUDA, Autograd XLA, ...
85
+ // The problem is that total number of keys grows quadratically with [#
86
+ // backends] x [# functionalities], making it very difficult to map each key
87
+ // directly to a bit in a bitset without dramatically increasing the size of the
88
+ // bitset over time.
89
+ //
90
+ // The two enums (BackendComponent and DispatchKey) can be divided roughly into
91
+ // 5 categories.
92
+ //
93
+ // (1) "Building block" keys
94
+ // (a) backends: Everything in the BackendComponent enum (e.g. CPUBit,
95
+ // CUDABit) (b) functionalities: (per-backend) functionality-bit DispatchKeys
96
+ // (e.g. AutogradFunctionality, SparseCsr, Sparse, Dense)
97
+ // (2) "Runtime" keys
98
+ // (a) "non-customizable backends" (e.g. FPGA)
99
+ // (b) "non-customizable functionalities" (e.g. Functionalize)
100
+ // (c) "per-backend instances of customizable functionalities" (e.g. CPU,
101
+ // SparseCPU, AutogradCPU)
102
+ // (3) "Alias" DispatchKeys (see Note [Alias Dispatch Keys])
103
+ //
104
+ // (1) Building block keys always correspond to individual bits in a
105
+ // DispatchKeySet. They can also be combined in a DispatchKeySet to form actual
106
+ // runtime keys. e.g.
107
+ // auto dense_cpu_ks = DispatchKeySet({DispatchKey::CPUBit,
108
+ // DispatchKey::Dense});
109
+ // // The keyset has the runtime dense-cpu key.
110
+ // dense_cpu_ks.has(DispatchKey::CPU);
111
+ // // And it contains the building block keys too.
112
+ // dense_cpu_ks.has(DispatchKey::CPUBit);
113
+ // dense_cpu_ks.has(DispatchKey::Dense);
114
+ //
115
+ // Not every backend and not every functionality counts as a "building block
116
+ // key". This is mostly to give us more levers to pull in the design space.
117
+ // Backend keys and functionality keys that count as "building blocks" will
118
+ // contribute to a full cross product of functionality that can be overriden.
119
+ //
120
+ // For example, right now we have at least 12 "backend" building
121
+ // blocks (CPU, CUDA, XLA, ...) and at least 5 "functionality"
122
+ // building blocks (Dense, Sparse, SparseCsr, Quantized,
123
+ // AutogradFunctionality, ...). These keys together allow every
124
+ // dispatcher operator to be customized in up to 12*4 different
125
+ // ways. Each of those requires a slot in the operator table of every
126
+ // dispatcher operator. Not every piece of functionality necessarily
127
+ // needs to be customizable per-backend, and not every backend
128
+ // necessarily needs to be able to customize every type of
129
+ // functionality.
130
+ //
131
+ //
132
+ // (2) Every runtime key corresponds directly to a slot in an operator's runtime
133
+ // dispatch table, and you can directly register kernels to a runtime dispatch
134
+ // key.
135
+ //
136
+ // For per-backend functionalities like "Dense" or "AutogradFunctionality",
137
+ // you can think of the corresponding runtime dispatch keys as "instances" of
138
+ // that functionality, per backend. E.g. "CPU", "CUDA", "XLA", etc. are all
139
+ // runtime instances of the "Dense" building block key.
140
+
141
+ // (2a) and (2b) are represented identically in the DispatchKeySet logic:
142
+ // - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT
143
+ // customizable per backend.
144
+ // In order to do so, we'd need to promote it to a per-backend functionality
145
+ // "building block" key.
146
+ // - non-customizable backends (e.g. FPGA) can NOT customize existing
147
+ // functionality like Sparse, Autograd, etc.
148
+ // In order to do so, we'd need to promote it to a backend "building block"
149
+ // key.
150
+ //
151
+ // In both cases, these keys directly correspond to runtime slots in the
152
+ // operator table.
153
+ //
154
+ //
155
+ // (3) "Alias" keys
156
+ // See Note [Alias Dispatch Keys]
157
+ //
158
+ // Final note: for anyone making future changes to the Dispatcher +
159
+ // DispatchKeySet internals, there's a closed PR with a basic
160
+ // python-implementation of the Dispatcher that might be useful in quickly
161
+ // testing out and validating changes. See it at
162
+ // https://github.com/pytorch/pytorch/pull/68743
163
+
164
+ // An undefined tensor is one with an empty tensor type set.
165
+ class DispatchKeySet final {
166
+ public:
167
+ enum Full { FULL };
168
+ enum FullAfter { FULL_AFTER };
169
+ enum Raw { RAW };
170
+
171
+ // NB: default constructor representation as zero is MANDATORY as
172
+ // use of DispatchKeySet in TLS requires this.
173
+ constexpr DispatchKeySet() = default;
174
+
175
+ constexpr DispatchKeySet(Full)
176
+ : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {}
177
+
178
+ constexpr DispatchKeySet(FullAfter, DispatchKey t)
179
+ // LSB after t are OK, but not t itself.
180
+ // "functionalities" have a notion of ordering (e.g. Autograd > Sparse >
181
+ // Quantized > Dense). But backends don't really have an ordering.
182
+ // Therefore, we're enforcing that FullAfter can only be used on
183
+ // "functionality" keys.
184
+ : repr_(
185
+ (1ULL
186
+ << (num_backends + static_cast<uint8_t>(toFunctionalityKey(t)) -
187
+ 1)) -
188
+ 1) {
189
+ *this = add(DispatchKey::PythonDispatcher);
190
+ }
191
+
192
+ // Public version of DispatchKeySet(uint64_t) API; external users
193
+ // must be explicit when they do this!
194
+ constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {}
195
+
196
+ constexpr explicit DispatchKeySet(BackendComponent k) {
197
+ if (k == BackendComponent::InvalidBit) {
198
+ repr_ = 0;
199
+ } else {
200
+ repr_ = 1ULL << (static_cast<uint8_t>(k) - 1);
201
+ }
202
+ }
203
+
204
+ constexpr explicit DispatchKeySet(DispatchKey k) {
205
+ // NOLINTNEXTLINE(bugprone-branch-clone)
206
+ if (k == DispatchKey::Undefined) {
207
+ // Case 1: handle Undefined specifically
208
+ repr_ = 0;
209
+ } else if (k <= DispatchKey::EndOfFunctionalityKeys) {
210
+ // Case 2: handle "functionality-only" keys
211
+ // These keys have a functionality bit set, but no backend bits
212
+ // These can technically be either:
213
+ // - valid runtime keys (e.g. DispatchKey::AutogradOther,
214
+ // DispatchKey::FuncTorchBatched, etc)
215
+ // - "building block" keys that aren't actual runtime keys (e.g.
216
+ // DispatchKey::Dense or Sparse)
217
+ uint64_t functionality_val = 1ULL
218
+ << (num_backends + static_cast<uint8_t>(k) - 1);
219
+ repr_ = functionality_val;
220
+ } else if (k <= DispatchKey::EndOfRuntimeBackendKeys) {
221
+ // Case 3: "runtime" keys that have a functionality bit AND a backend bit.
222
+ // First compute which bit to flip for the functionality.
223
+ auto functionality_k = toFunctionalityKey(k);
224
+ // The - 1 is because Undefined is technically a "functionality" that
225
+ // doesn't show up in the bitset. So e.g. Dense is technically the second
226
+ // functionality, but the lowest functionality bit.
227
+ uint64_t functionality_val = 1ULL
228
+ << (num_backends + static_cast<uint8_t>(functionality_k) - 1);
229
+
230
+ // then compute which bit to flip for the backend
231
+ // Case 4a: handle the runtime instances of "per-backend functionality"
232
+ // keys For example, given DispatchKey::CPU, we should set:
233
+ // - the Dense functionality bit
234
+ // - the CPUBit backend bit
235
+ // first compute which bit to flip for the backend
236
+ auto backend_k = toBackendComponent(k);
237
+ uint64_t backend_val = backend_k == BackendComponent::InvalidBit
238
+ ? 0
239
+ : 1ULL << (static_cast<uint8_t>(backend_k) - 1);
240
+ repr_ = functionality_val + backend_val;
241
+ } else {
242
+ // At this point, we should have covered every case except for alias keys.
243
+ // Technically it would be possible to add alias dispatch keys to a
244
+ // DispatchKeySet, but the semantics are a little confusing and this
245
+ // currently isn't needed anywhere.
246
+ repr_ = 0;
247
+ }
248
+ }
249
+
250
+ constexpr uint64_t keys_to_repr(std::initializer_list<DispatchKey> ks) {
251
+ uint64_t repr = 0;
252
+ for (auto k : ks) {
253
+ repr |= DispatchKeySet(k).repr_;
254
+ }
255
+ return repr;
256
+ }
257
+
258
+ constexpr uint64_t backend_bits_to_repr(
259
+ std::initializer_list<BackendComponent> ks) {
260
+ uint64_t repr = 0;
261
+ for (auto k : ks) {
262
+ repr |= DispatchKeySet(k).repr_;
263
+ }
264
+ return repr;
265
+ }
266
+
267
+ explicit constexpr DispatchKeySet(std::initializer_list<DispatchKey> ks)
268
+ : repr_(keys_to_repr(ks)) {}
269
+
270
+ explicit constexpr DispatchKeySet(std::initializer_list<BackendComponent> ks)
271
+ // Note: for some reason, putting this logic directly in the constructor
272
+ // appears to fail to compile on CUDA 10.1.
273
+ // See an example internal failure at
274
+ // https://www.internalfb.com/intern/skycastle/run/76561193669136035/artifact/actionlog.76561193742069401.stderr
275
+ : repr_(backend_bits_to_repr(ks)) {}
276
+
277
+ // Test if a DispatchKey is in the set
278
+ inline bool has(DispatchKey t) const {
279
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t != DispatchKey::Undefined);
280
+ return has_all(DispatchKeySet(t));
281
+ }
282
+ constexpr bool has_backend(BackendComponent t) const {
283
+ return has_all(DispatchKeySet(t));
284
+ }
285
+
286
+ // Test if a DispatchKey is in the set
287
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
288
+ // keys, tests if all of them are in the current set.
289
+ constexpr bool has_all(DispatchKeySet ks) const {
290
+ return static_cast<bool>((repr_ & ks.repr_) == ks.repr_);
291
+ }
292
+
293
+ // Given a DispatchKeySet of functionality keys and (potentially) backend
294
+ // keys, tests if any of them are in the current set. This could technically
295
+ // be pretty easily implemented using has(). It is strictly a perf
296
+ // optimization though. There are many places in the code base where we want
297
+ // to test for multiple functionality keys together. HOWEVER, runtime
298
+ // per-backend functionality keys aren't allowed to be used with this
299
+ // function, because you can end up with weird results. e.g.
300
+ // DispatchKeySet(DispatchKey::AutogradCPU).has_any(DispatchKeySet(DispatchKey::CPU))
301
+ // would return true.
302
+ inline bool has_any(DispatchKeySet ks) const {
303
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
304
+ // Either there are no backend bits in the input keyset
305
+ ((ks.repr_ & full_backend_mask) == 0) ||
306
+ // or there are no per-backend-functionality bits
307
+ // See [Note: Per-Backend Functionality Dispatch Keys]
308
+ ((ks &
309
+ DispatchKeySet({
310
+ DispatchKey::Dense,
311
+ DispatchKey::Quantized,
312
+ DispatchKey::Sparse,
313
+ DispatchKey::SparseCsr,
314
+ DispatchKey::AutogradFunctionality,
315
+ })
316
+ .repr_) == 0));
317
+ return static_cast<bool>((repr_ & ks.repr_) != 0);
318
+ }
319
+ // Test if DispatchKeySet is a superset of ks.
320
+ bool isSupersetOf(DispatchKeySet ks) const {
321
+ return (repr_ & ks.repr_) == ks.repr_;
322
+ }
323
+ // Perform set union
324
+ constexpr DispatchKeySet operator|(DispatchKeySet other) const {
325
+ return DispatchKeySet(repr_ | other.repr_);
326
+ }
327
+ // Perform set intersection
328
+ constexpr DispatchKeySet operator&(DispatchKeySet other) const {
329
+ return DispatchKeySet(repr_ & other.repr_);
330
+ }
331
+ // Compute the set difference self - other,
332
+ // but ONLY for the functionality keys.
333
+ // Any backend bits set on self will remain unchanged.
334
+ // See Note [Removing keys from DispatchKeySet Only Affects Functionality
335
+ // Keys]
336
+ constexpr DispatchKeySet operator-(DispatchKeySet other) const {
337
+ return DispatchKeySet(repr_ & (full_backend_mask | ~other.repr_));
338
+ }
339
+
340
+ // Compute self ^ other
341
+ constexpr DispatchKeySet operator^(DispatchKeySet other) const {
342
+ return DispatchKeySet(repr_ ^ other.repr_);
343
+ }
344
+ bool operator==(DispatchKeySet other) const {
345
+ return repr_ == other.repr_;
346
+ }
347
+ bool operator!=(DispatchKeySet other) const {
348
+ return repr_ != other.repr_;
349
+ }
350
+ // Add a DispatchKey to the DispatchKey set. Does NOT mutate,
351
+ // returns the extended DispatchKeySet!
352
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const {
353
+ return *this | DispatchKeySet(t);
354
+ }
355
+ C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const {
356
+ return *this | ks;
357
+ }
358
+
359
+ // Remove a DispatchKey from the DispatchKey set.
360
+ // This is generally not an operation you should be doing
361
+ // (it's used to implement the printing overload, operator<<)
362
+ //
363
+ // Note [Removing keys from DispatchKeySet Only Affects Functionality Keys]
364
+ // Only functionality bits are allowed to be removed from a keyset.
365
+ // For now, we're only allowing removal of "functionality bits" from the
366
+ // keyset, which is specifically needed by the fallthrough key calculation
367
+ // logic. Why is removing backend bits problematic? Consider this example:
368
+ //
369
+ // DispatchKeySet([DispatchKey.CPU, DispatchKey.AutogradCUDA,
370
+ // DispatchKey.CUDA]).remove(DispatchKey.AutogradCUDA)
371
+ // DispatchKeySet([DispatchKey.CPU,
372
+ // DispatchKey.AutogradCUDA]).remove(DispatchKey.AutogradCUDA)
373
+ //
374
+ // What do we want to happen?
375
+ // Technically, we'd like it to be true that after removal,
376
+ // the first keyset still has the CUDA dispatch key while the second doesn't.
377
+ // Unfortunately there's no way to represent that, because the two keysets are
378
+ // represented the same way internally: functionality bits: Autograd, Dense
379
+ // backend bits: CPU, CUDA
380
+ //
381
+ // Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd"
382
+ // bit from the bitset.
383
+ C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const {
384
+ return DispatchKeySet(
385
+ repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask));
386
+ }
387
+ // You're allowed to remove a backend bit from a DispatchKeySet,
388
+ // but you have to be explicit about it (remove_backend() instead of
389
+ // remove()).
390
+ constexpr DispatchKeySet remove_backend(BackendComponent b) const {
391
+ return DispatchKeySet(repr_ & ~(DispatchKeySet(b).repr_));
392
+ }
393
+ // Is the set empty? (AKA undefined tensor)
394
+ bool empty() const {
395
+ return repr_ == 0;
396
+ }
397
+ uint64_t raw_repr() {
398
+ return repr_;
399
+ }
400
+
401
+ DispatchKey highestFunctionalityKey() const {
402
+ auto functionality_idx = indexOfHighestBit();
403
+ // This means that none of the functionality bits were set.
404
+ if (functionality_idx < num_backends)
405
+ return DispatchKey::Undefined;
406
+ // The first num_backend bits in the keyset don't correspond to real
407
+ // dispatch keys.
408
+ return static_cast<DispatchKey>(functionality_idx - num_backends);
409
+ }
410
+
411
+ // This is similar like toBackendComponent(DispatchKey), but less restrictive.
412
+ // toBackendComponent() errors out if the key that it was passed has no
413
+ // backend bits, which is useful for error checking. We need a version of that
414
+ // here that can also handle "fake" backends like FPGA, because they need to
415
+ // map to the AutogradOther key. For those backends, we return
416
+ // BackendComponent::InvalidBit.
417
+ BackendComponent highestBackendKey() const {
418
+ // mask to mask out functionality bits
419
+ auto backend_idx =
420
+ DispatchKeySet(repr_ & full_backend_mask).indexOfHighestBit();
421
+ // all zeros across the backend bits means that no backend bits are set.
422
+ if (backend_idx == 0)
423
+ return BackendComponent::InvalidBit;
424
+ return static_cast<BackendComponent>(backend_idx);
425
+ }
426
+
427
+ // returns the DispatchKey of highest priority in the set.
428
+ DispatchKey highestPriorityTypeId() const {
429
+ auto functionality_k = highestFunctionalityKey();
430
+ if (isPerBackendFunctionalityKey(functionality_k)) {
431
+ return toRuntimePerBackendFunctionalityKey(
432
+ functionality_k, highestBackendKey());
433
+ }
434
+ return functionality_k;
435
+ }
436
+
437
+ // Returns the index of the most-significant bit in the keyset.
438
+ // This is used to as part of the calculation into the operator table to get:
439
+ // - the highest "functionality" bit in the keyset.
440
+ // - the highest "backend" bit in the keyset.
441
+ uint8_t indexOfHighestBit() const {
442
+ return 64 - llvm::countLeadingZeros(repr_);
443
+ }
444
+
445
+ #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS)
446
+ // [Note: Trimmed Mobile Dispatch Keys]
447
+ /**
448
+ * The method below maps the dispatch key in the enum DispatchKey to an
449
+ * integer index in the dispatchTable_ array in OperatorEntry. The array
450
+ * is trimmed for mobile to reduce peak memory usage since it's
451
+ * unnecessary to reserve additional space for dispatch keys that will
452
+ * never be used on mobile.
453
+ */
454
+ int getDispatchTableIndexForDispatchKeySet() const {
455
+ auto dk = highestPriorityTypeId();
456
+ switch (dk) {
457
+ case DispatchKey::Undefined:
458
+ return 0;
459
+ case DispatchKey::CPU:
460
+ return 1;
461
+ case DispatchKey::QuantizedCPU:
462
+ return 2;
463
+ case DispatchKey::SparseCPU:
464
+ return 3;
465
+ case DispatchKey::BackendSelect:
466
+ return 4;
467
+ case DispatchKey::ADInplaceOrView:
468
+ return 5;
469
+ case DispatchKey::AutogradOther:
470
+ return 6;
471
+ case DispatchKey::AutogradCPU:
472
+ return 7;
473
+ default:
474
+ return -1;
475
+ }
476
+ }
477
+ #else
478
+ // returns the index in the operator table of highest priority key in the the
479
+ // keyset Note that we could in theory implement this using
480
+ // highestPriorityTypeId(), but this code is very hotpath and we can do it
481
+ // faster without it.
482
+ int getDispatchTableIndexForDispatchKeySet() const {
483
+ auto functionality_idx =
484
+ DispatchKeySet(repr_ >> num_backends).indexOfHighestBit();
485
+ auto offset_and_mask = offsetsAndMasks()[functionality_idx];
486
+ // Mask the functionality bits out first, then right-shift by 1.
487
+ // right-shifting by 1 because everything is zero-indexed.
488
+ // E.g. 000001 (CPU) should give us an offset of 0, 000010 (CUDA) should
489
+ // give us an offset of 1, etc.
490
+ auto backend_idx =
491
+ DispatchKeySet((repr_ & offset_and_mask.mask) >> 1).indexOfHighestBit();
492
+ return offset_and_mask.offset + backend_idx;
493
+ }
494
+ #endif
495
+
496
+ // returns the "index" of the highest priority backend in the keyset.
497
+ // This is pretty similar to getBackendKey(), but:
498
+ // - It's hotpath code (part of the runtime bitset calculation)
499
+ // - I's returns an integer index, not an enum value
500
+ // - Everything is shifted to the right by 1.
501
+ // BackendComponent::InvalidBit is technically the lowest enum value,
502
+ // but it isn't included in the runtime table. So CPUBit = 1, CUDABit = 2,
503
+ // etc.
504
+ uint64_t getBackendIndex() const {
505
+ return DispatchKeySet((repr_ & full_backend_mask) >> 1).indexOfHighestBit();
506
+ }
507
+
508
+ private:
509
+ constexpr DispatchKeySet(uint64_t repr) : repr_(repr) {}
510
+ uint64_t repr_ = 0;
511
+
512
+ public:
513
+ // STL iterator for DispatchKeySet. Iterates through all runtime DispatchKeys
514
+ // in the set. The iterator is only invalidated by the destruction of the
515
+ // underlying DispatchKeySet as the iterator stores a pointer to the raw
516
+ // representation of the DispatchKeySet. Note: When we encounter a per-backend
517
+ // functionality (e.g. Dense or Sparse), we will iterate through EVERY backend
518
+ // in the keyset, for that functionality. For example, if the next
519
+ // functionality key to iterate over is Autograd, and the backend bits in the
520
+ // keyset correspond to [BackendComponent::CPUBit, BackendComponent::CUDABit],
521
+ // then the next two keys we return will be DispatchKey::AutogradCPU,
522
+ // DispatchKey::AutogradCUDA (CPU first because it has lower precedence than
523
+ // CUDA in DispatchKey.h).
524
+ class iterator {
525
+ public:
526
+ using self_type = iterator;
527
+ using iterator_category = std::input_iterator_tag;
528
+ using value_type = DispatchKey;
529
+ using difference_type = ptrdiff_t;
530
+ using reference = value_type&;
531
+ using pointer = value_type*;
532
+ // final mask value should mask out the entire keyset
533
+ static const uint8_t end_iter_mask_val =
534
+ num_backends + num_functionality_keys;
535
+ // final key value should be the last DispatchKey
536
+ static const uint8_t end_iter_key_val = num_functionality_keys;
537
+
538
+ // current_dispatchkey_idx_ will iterate through all functionality bits.
539
+ // current_backendcomponent_idx_ will iterate through all backend bits.
540
+ explicit iterator(
541
+ const uint64_t* data_ptr,
542
+ uint8_t next_functionality = num_backends,
543
+ uint8_t next_backend = 0)
544
+ : data_ptr_(data_ptr),
545
+ next_functionality_(next_functionality),
546
+ next_backend_(next_backend),
547
+ // These are in an invalid state at construction time, and set by the
548
+ // first increment call
549
+ current_dispatchkey_idx_(end_iter_key_val),
550
+ current_backendcomponent_idx_(end_iter_key_val) {
551
+ // Go to the first key in the set
552
+ TORCH_INTERNAL_ASSERT(
553
+ next_functionality_ >= num_backends,
554
+ "num_backends=",
555
+ static_cast<uint32_t>(num_backends),
556
+ "next_functionality_=",
557
+ static_cast<uint32_t>(next_functionality_));
558
+ ++(*this);
559
+ }
560
+
561
+ C10_API self_type& operator++();
562
+
563
+ self_type operator++(int) {
564
+ self_type previous_iterator = *this;
565
+ ++(*this);
566
+ return previous_iterator;
567
+ }
568
+
569
+ bool operator==(const self_type& rhs) const {
570
+ return next_functionality_ == rhs.next_functionality_ &&
571
+ current_dispatchkey_idx_ == rhs.current_dispatchkey_idx_ &&
572
+ next_backend_ == rhs.next_backend_ &&
573
+ current_backendcomponent_idx_ == rhs.current_backendcomponent_idx_;
574
+ }
575
+ bool operator!=(const self_type& rhs) const {
576
+ return next_functionality_ != rhs.next_functionality_ ||
577
+ current_dispatchkey_idx_ != rhs.current_dispatchkey_idx_ ||
578
+ next_backend_ != rhs.next_backend_ ||
579
+ current_backendcomponent_idx_ != rhs.current_backendcomponent_idx_;
580
+ }
581
+ DispatchKey operator*() const {
582
+ auto functionality_key =
583
+ static_cast<DispatchKey>(current_dispatchkey_idx_);
584
+ if (isPerBackendFunctionalityKey(functionality_key)) {
585
+ auto next_key = toRuntimePerBackendFunctionalityKey(
586
+ functionality_key,
587
+ static_cast<BackendComponent>(current_backendcomponent_idx_));
588
+ // We expect all of the Dense, Sparse, Quantized, and Autograd keys to
589
+ // be ordered the same way with respect to their backends
590
+ TORCH_INTERNAL_ASSERT(
591
+ toBackendComponent(next_key) ==
592
+ static_cast<BackendComponent>(current_backendcomponent_idx_),
593
+ "Tried to map functionality key ",
594
+ toString(functionality_key),
595
+ " and backend bit ",
596
+ toString(
597
+ static_cast<BackendComponent>(current_backendcomponent_idx_)),
598
+ " to a runtime key, but ended up with ",
599
+ toString(next_key),
600
+ ". This can happen if the order of the backend dispatch keys in DispatchKey.h isn't consistent.",
601
+ " Please double check that enum for inconsistencies.");
602
+ return next_key;
603
+ } else {
604
+ return functionality_key;
605
+ }
606
+ }
607
+
608
+ private:
609
+ const uint64_t* data_ptr_;
610
+ uint8_t next_functionality_;
611
+ uint8_t next_backend_;
612
+ uint8_t current_dispatchkey_idx_;
613
+ uint8_t current_backendcomponent_idx_;
614
+ };
615
+
616
+ public:
617
+ // Returns iterator to the first key in the set. If no keys are in the
618
+ // set, then will return the end iterator.
619
+ iterator begin() const {
620
+ return iterator(&repr_);
621
+ }
622
+
623
+ // We do not need to iterate beyond EndOfFunctionalityKeys so we will treat
624
+ // this as the end iterator.
625
+ iterator end() const {
626
+ return iterator(&repr_, iterator::end_iter_mask_val);
627
+ }
628
+ };
629
+
630
+ C10_API std::string toString(DispatchKeySet);
631
+ C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
632
+
633
+ C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
634
+ return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
635
+ }
636
+
637
+ // Alias key DispatchKey::Autograd maps to
638
+ // (autograd_dispatch_keyset x full_backend_mask)
639
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
640
+ //
641
+ // Note [autograd_dispatch_keyset Does Not Include Backend Bits]
642
+ // We don't want to include any backend bits (BackendComponent::CPUBit, etc)
643
+ // directly in autograd_dispatch_keyset.
644
+ // Why? keysets like autograd_dispatch_keyset are commonly used to remove
645
+ // autograd keys from a DispatchKeySet throughout the code base. However, you
646
+ // are only allowed to remove functionality bits from a keyset, not backend
647
+ // bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality
648
+ // Keys] for details. To be consistent and avoid confusion, we're explicitly
649
+ // setting up autograd_dispatch_keyset to not have any backend bits.
650
+ constexpr DispatchKeySet autograd_dispatch_keyset = DispatchKeySet({
651
+ DispatchKey::AutogradFunctionality,
652
+ DispatchKey::AutogradOther,
653
+ DispatchKey::AutogradNestedTensor,
654
+ });
655
+
656
+ constexpr DispatchKeySet autocast_dispatch_keyset = DispatchKeySet({
657
+ DispatchKey::AutocastCPU,
658
+ DispatchKey::AutocastCUDA,
659
+ DispatchKey::AutocastXPU,
660
+ DispatchKey::AutocastIPU,
661
+ DispatchKey::AutocastHPU,
662
+ DispatchKey::AutocastXLA,
663
+ DispatchKey::AutocastPrivateUse1,
664
+ });
665
+
666
+ // See Note [TLS Initialization]
667
+ constexpr DispatchKeySet default_included_set = DispatchKeySet({
668
+ DispatchKey::BackendSelect,
669
+ DispatchKey::ADInplaceOrView,
670
+ });
671
+
672
+ constexpr DispatchKeySet default_excluded_set = DispatchKeySet({
673
+ DispatchKey::AutocastCPU,
674
+ DispatchKey::AutocastCUDA,
675
+ DispatchKey::AutocastXPU,
676
+ DispatchKey::AutocastIPU,
677
+ DispatchKey::AutocastHPU,
678
+ DispatchKey::AutocastXLA,
679
+ DispatchKey::AutocastPrivateUse1,
680
+ });
681
+
682
+ constexpr DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView =
683
+ autograd_dispatch_keyset | DispatchKeySet(DispatchKey::ADInplaceOrView);
684
+
685
+ constexpr DispatchKeySet python_ks = DispatchKeySet({
686
+ DispatchKey::Python,
687
+ DispatchKey::PythonTLSSnapshot,
688
+ });
689
+
690
+ constexpr DispatchKeySet sparse_ks = DispatchKeySet(DispatchKey::Sparse);
691
+
692
+ constexpr DispatchKeySet sparse_csr_ks = DispatchKeySet(DispatchKey::SparseCsr);
693
+
694
+ constexpr DispatchKeySet mkldnn_ks = DispatchKeySet(DispatchKey::MkldnnCPU);
695
+
696
+ // backend dispatch keys that map to DispatchKey::AutogradOther
697
+ // NB: keys in this set also get associated with CompositeImplicitAutograd
698
+ constexpr DispatchKeySet autogradother_backends =
699
+ DispatchKeySet(
700
+ // HIP and VE aren't in this list: they now have their own backend bits
701
+ // which means that they can now have their own Autograd keys.
702
+ // Technically, HIP will now redispatch to its own custom AutogradHIP
703
+ // slot in the runtime table.
704
+ {DispatchKey::FPGA,
705
+ DispatchKey::ORT,
706
+ DispatchKey::Vulkan,
707
+ DispatchKey::Metal,
708
+ DispatchKey::CustomRNGKeyId,
709
+ DispatchKey::MkldnnCPU,
710
+ // Sparse and Quantized backends also live here.
711
+ DispatchKey::Sparse,
712
+ DispatchKey::SparseCsr,
713
+ DispatchKey::Quantized})
714
+ // Including the backend bits because this keyset is used during op
715
+ // registration, which requires looping over all runtime autogradother
716
+ // backend keys.
717
+ | DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
718
+
719
+ // The set of dispatch keys that come after autograd
720
+ // n.b. this relies on the fact that AutogradOther is currently the lowest
721
+ // Autograd key
722
+ constexpr DispatchKeySet after_autograd_keyset =
723
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::AutogradOther);
724
+
725
+ // The set of dispatch keys that come after ADInplaceOrView
726
+ constexpr DispatchKeySet after_ADInplaceOrView_keyset = DispatchKeySet(
727
+ DispatchKeySet::FULL_AFTER,
728
+ c10::DispatchKey::ADInplaceOrView);
729
+
730
+ // The set of dispatch keys that come after Functionalize
731
+ constexpr DispatchKeySet after_func_keyset =
732
+ DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::Functionalize)
733
+ .remove(
734
+ // NOTE: we also need to remove ADInplaceOrView from the keyset when
735
+ // redispatching after the func kernels. This is because we're not
736
+ // calling the same op; we originally called an inplace op, and now
737
+ // we aren't. The original key calculation figured out which keys
738
+ // were Fallthrough based on the inplace op. That means that it did
739
+ // not include the ADInPlaceOrView kernel as a fallthrough key.
740
+ // However, we WANT the ADInPlaceOrView kernel to be ignored now
741
+ // that we're calling an out-of-place op. Re-invoking
742
+ // Dispatcher::call would re-run the Fallthrough key calculation and
743
+ // get us that, But at::redispatch is more performant. We can get
744
+ // away with it by explicitly removing the key here.
745
+ c10::DispatchKey::ADInplaceOrView);
746
+
747
+ constexpr DispatchKeySet backend_bitset_mask =
748
+ DispatchKeySet(DispatchKeySet::RAW, (1ULL << num_backends) - 1);
749
+
750
+ constexpr auto inplace_or_view_ks =
751
+ DispatchKeySet(DispatchKey::ADInplaceOrView);
752
+ constexpr auto autograd_cpu_ks = DispatchKeySet(DispatchKey::AutogradCPU);
753
+ constexpr auto autograd_ipu_ks = DispatchKeySet(DispatchKey::AutogradIPU);
754
+ constexpr auto autograd_xpu_ks = DispatchKeySet(DispatchKey::AutogradXPU);
755
+ constexpr auto autograd_cuda_ks = DispatchKeySet(DispatchKey::AutogradCUDA);
756
+ constexpr auto autograd_xla_ks = DispatchKeySet(DispatchKey::AutogradXLA);
757
+ constexpr auto autograd_lazy_ks = DispatchKeySet(DispatchKey::AutogradLazy);
758
+ constexpr auto autograd_meta_ks = DispatchKeySet(DispatchKey::AutogradMeta);
759
+ constexpr auto autograd_mps_ks = DispatchKeySet(DispatchKey::AutogradMPS);
760
+ constexpr auto autograd_hpu_ks = DispatchKeySet(DispatchKey::AutogradHPU);
761
+ constexpr auto autograd_privateuse1_ks =
762
+ DispatchKeySet(DispatchKey::AutogradPrivateUse1);
763
+ constexpr auto autograd_privateuse2_ks =
764
+ DispatchKeySet(DispatchKey::AutogradPrivateUse2);
765
+ constexpr auto autograd_privateuse3_ks =
766
+ DispatchKeySet(DispatchKey::AutogradPrivateUse3);
767
+ constexpr auto autograd_other_ks = DispatchKeySet(DispatchKey::AutogradOther);
768
+ constexpr auto autograd_nested =
769
+ DispatchKeySet(DispatchKey::AutogradNestedTensor);
770
+ // keyset corresponding to functorch keys that have their own dedicated
771
+ // TensorImpl subclass.
772
+ constexpr auto functorch_transforms_ks = DispatchKeySet(
773
+ {DispatchKey::FuncTorchBatched,
774
+ DispatchKey::FuncTorchVmapMode,
775
+ DispatchKey::Batched,
776
+ DispatchKey::VmapMode,
777
+ DispatchKey::FuncTorchGradWrapper});
778
+
779
+ constexpr auto functorch_batched_ks =
780
+ DispatchKeySet({DispatchKey::FuncTorchBatched});
781
+
782
+ // This keyset has:
783
+ // (1) the functionality bits corresponding to backends (dense, sparse,
784
+ // quantized) (2) all of the backend bits set
785
+ constexpr DispatchKeySet backend_functionality_keys =
786
+ DispatchKeySet({
787
+ DispatchKey::Dense,
788
+ DispatchKey::Quantized,
789
+ DispatchKey::Sparse,
790
+ DispatchKey::SparseCsr,
791
+ }) |
792
+ DispatchKeySet(DispatchKeySet::RAW, full_backend_mask);
793
+
794
+ struct OpTableOffsetAndMask {
795
+ uint16_t offset;
796
+ uint16_t backend_mask;
797
+ };
798
+
799
+ static_assert(
800
+ num_backends <= 16,
801
+ "Right now we expect the number of backends not to exceed 16. In the (unlikely) event"
802
+ " that this changes, the size of OpTableOffsetAndMask::backend_mask needs to be increased too.");
803
+
804
+ // true if t is a backend dispatch key
805
+ C10_API bool isBackendDispatchKey(DispatchKey t);
806
+
807
+ // Resolve alias dispatch key to DispatchKeySet if applicable
808
+ C10_API DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t);
809
+
810
+ // Resolve alias dispatch key to DispatchKeySet if applicable,
811
+ // and check if k is a part of that set
812
+ C10_API bool runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k);
813
+
814
+ // Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key
815
+ // t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd.
816
+ C10_API DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t);
817
+
818
+ // Returns a DispatchKeySet of autograd related keys mapped to backend.
819
+ // for a given backend key, use the associated autograd key.
820
+ // for non-backend keys, use AutogradOther as a default.
821
+ // Note: it's convenient and fast to return a default here rather than (say)
822
+ // returning an optional<DispatchKey>, or throwing. But it makes callers
823
+ // responsible for either a) enforcing the invariant that only backend keys
824
+ // be passed as arguments, or b) interpreting our return value carefully.
825
+ inline DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t) {
826
+ switch (t) {
827
+ case BackendComponent::CPUBit:
828
+ return inplace_or_view_ks | autograd_cpu_ks;
829
+ case BackendComponent::IPUBit:
830
+ return inplace_or_view_ks | autograd_ipu_ks;
831
+ case BackendComponent::XPUBit:
832
+ return inplace_or_view_ks | autograd_xpu_ks;
833
+ case BackendComponent::CUDABit:
834
+ return inplace_or_view_ks | autograd_cuda_ks;
835
+ case BackendComponent::XLABit:
836
+ return inplace_or_view_ks | autograd_xla_ks;
837
+ case BackendComponent::LazyBit:
838
+ return inplace_or_view_ks | autograd_lazy_ks;
839
+ case BackendComponent::MetaBit:
840
+ return inplace_or_view_ks | autograd_meta_ks;
841
+ case BackendComponent::MPSBit:
842
+ return inplace_or_view_ks | autograd_mps_ks;
843
+ case BackendComponent::HPUBit:
844
+ return inplace_or_view_ks | autograd_hpu_ks;
845
+ case BackendComponent::PrivateUse1Bit:
846
+ return inplace_or_view_ks | autograd_privateuse1_ks;
847
+ case BackendComponent::PrivateUse2Bit:
848
+ return inplace_or_view_ks | autograd_privateuse2_ks;
849
+ case BackendComponent::PrivateUse3Bit:
850
+ return inplace_or_view_ks | autograd_privateuse3_ks;
851
+ default:
852
+ return inplace_or_view_ks | autograd_other_ks;
853
+ }
854
+ }
855
+
856
+ // Returns a DispatchKeySet of autocast related keys mapped to backend.
857
+ inline DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t) {
858
+ constexpr auto autocast_cpu_ks = DispatchKeySet(DispatchKey::AutocastCPU);
859
+ constexpr auto autocast_xpu_ks = DispatchKeySet(DispatchKey::AutocastXPU);
860
+ constexpr auto autocast_ipu_ks = DispatchKeySet(DispatchKey::AutocastIPU);
861
+ constexpr auto autocast_hpu_ks = DispatchKeySet(DispatchKey::AutocastHPU);
862
+ constexpr auto autocast_cuda_ks = DispatchKeySet(DispatchKey::AutocastCUDA);
863
+ constexpr auto autocast_xla_ks = DispatchKeySet(DispatchKey::AutocastXLA);
864
+ constexpr auto autocast_privateuse1_ks =
865
+ DispatchKeySet(DispatchKey::AutocastPrivateUse1);
866
+ switch (t) {
867
+ case BackendComponent::CPUBit:
868
+ return autocast_cpu_ks;
869
+ case BackendComponent::XPUBit:
870
+ return autocast_xpu_ks;
871
+ case BackendComponent::IPUBit:
872
+ return autocast_ipu_ks;
873
+ case BackendComponent::HPUBit:
874
+ return autocast_hpu_ks;
875
+ case BackendComponent::CUDABit:
876
+ return autocast_cuda_ks;
877
+ case BackendComponent::XLABit:
878
+ return autocast_xla_ks;
879
+ case BackendComponent::PrivateUse1Bit:
880
+ return autocast_privateuse1_ks;
881
+ default:
882
+ return DispatchKeySet();
883
+ }
884
+ }
885
+
886
+ // returns the "backend" DispatchKey of highest priority in the set.
887
+ // This is basically like highestBackendKey(), except that we have some
888
+ // "functionality" bits that correspond to backends (Sparse, Quantized)
889
+ inline DispatchKey highestPriorityBackendTypeId(DispatchKeySet ks) {
890
+ return (ks & backend_functionality_keys).highestPriorityTypeId();
891
+ }
892
+
893
+ // This API exists because we have a use case for checking
894
+ // getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined)
895
+ // in OperatorEntry.cpp but we disallow it in has() API.
896
+ C10_API bool isIncludedInAlias(DispatchKey k, DispatchKey alias);
897
+
898
+ // Historically, every tensor only had a single DispatchKey, and it was always
899
+ // something like CPU, and there wasn't any of this business where TLS
900
+ // could cause the DispatchKey of a tensor to change. But we still have some
901
+ // legacy code that is still using DispatchKey for things like instanceof
902
+ // checks; if at all possible, refactor the code to stop using DispatchKey in
903
+ // those cases.
904
+ static inline DispatchKey legacyExtractDispatchKey(DispatchKeySet s) {
905
+ // NB: If you add any extra keys that can be stored in TensorImpl on
906
+ // top of existing "backend" keys like CPU/CUDA, you need to add it
907
+ // here. At the moment, autograd keys and ADInplaceOrView key need this
908
+ // treatment;
909
+ return (s - autograd_dispatch_keyset_with_ADInplaceOrView -
910
+ autocast_dispatch_keyset -
911
+ DispatchKeySet(
912
+ {DispatchKey::Functionalize,
913
+ DispatchKey::PythonTLSSnapshot,
914
+ DispatchKey::Python}))
915
+ .highestPriorityTypeId();
916
+ }
917
+
918
+ template <class T>
919
+ using is_not_DispatchKeySet = std::negation<std::is_same<DispatchKeySet, T>>;
920
+
921
+ // Given a function type, constructs a function_traits type that drops the first
922
+ // parameter type if the first parameter is of type DispatchKeySet. NB:
923
+ // DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid
924
+ // pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through
925
+ // the Dispatcher] for details). If at any point in the future we need to expose
926
+ // this type to JIT, revisit the usage of this type alias.
927
+ template <class FuncType>
928
+ using remove_DispatchKeySet_arg_from_func = guts::make_function_traits_t<
929
+ typename guts::infer_function_traits_t<FuncType>::return_type,
930
+ typename std::conditional_t<
931
+ std::is_same_v<
932
+ DispatchKeySet,
933
+ typename guts::typelist::head_with_default_t<
934
+ void,
935
+ typename guts::infer_function_traits_t<
936
+ FuncType>::parameter_types>>,
937
+ guts::typelist::drop_if_nonempty_t<
938
+ typename guts::infer_function_traits_t<FuncType>::parameter_types,
939
+ 1>,
940
+ typename guts::infer_function_traits_t<FuncType>::parameter_types>>;
941
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Load.h>
6
+ #include <c10/util/TypeCast.h>
7
+
8
+ namespace c10 {
9
+
10
+ // Dynamic type casting utils:
11
+ // - fetch_and_cast
12
+ // - cast_and_store
13
+ //
14
+ // fetch_and_cast fetch a value with dynamic type specified by a ScalarType
15
+ // from a void pointer and cast it to a static type.
16
+ //
17
+ // cast_and_store casts a static typed value into dynamic type specified
18
+ // by a ScalarType, and store it into a void pointer.
19
+ //
20
+ // NOTE:
21
+ //
22
+ // Dynamic casting allows us to support type promotion without blowing up
23
+ // the combination space: For example, without dynamic cast, in order to
24
+ // implement `add_` with type promotion, we would need something like
25
+ //
26
+ // AT_DISPATCH_ALL_TYPES(output.dtype(),
27
+ // AT_DISPATCH_ALL_TYPES(input1.dtype(),
28
+ // AT_DISPATCH_ALL_TYPES(input2.dtype(),
29
+ // [](arg0_t a, arg1_t b) -> out_t { return a + b; }
30
+ // )
31
+ // )
32
+ // )
33
+ //
34
+ // If we support N dtypes, the above code would generate the a+b kernel for
35
+ // all the N * N * N different supported types, the compilation time and
36
+ // binary size would become horrible.
37
+ //
38
+ // Dynamic casting might sounds like a bad idea in terms of performance.
39
+ // Especially if you ever do it in a loop, you are going to do a billion tests.
40
+ // But in practice it is not as bad as it might look:
41
+ //
42
+ // - on CPU, this is a branch that always has the same outcome, therefore
43
+ // hopefully the branch predictor could do the job pretty well
44
+ // - on GPU, these branches will not diverge, so we could still have the same
45
+ // warp executing the same line of code
46
+ // - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to
47
+ // check an integer does not hurt the performance much because the ALUs would
48
+ // wait for load instructions anyway.
49
+ //
50
+ // For the discussion and benchmark, refer to:
51
+ // - https://github.com/pytorch/pytorch/pull/28343
52
+ // - https://github.com/pytorch/pytorch/pull/28344
53
+ // - https://github.com/pytorch/pytorch/pull/28345
54
+ //
55
+
56
+ #ifdef C10_HOST_DEVICE
57
+ #define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false);
58
+ #else
59
+ #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
60
+ #endif
61
+
62
+ // Fetch a value with dynamic type src_type from ptr, and cast it to static type
63
+ // dest_t.
64
+ #define FETCH_AND_CAST_CASE(type, scalartype) \
65
+ case ScalarType::scalartype: \
66
+ return c10::convert<dest_t>(c10::load<type>(ptr));
67
+
68
+ template <typename dest_t>
69
+ C10_HOST_DEVICE inline dest_t fetch_and_cast(
70
+ const ScalarType src_type,
71
+ const void* ptr) {
72
+ switch (src_type) {
73
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE)
74
+ FETCH_AND_CAST_CASE(uint16_t, UInt16)
75
+ FETCH_AND_CAST_CASE(uint32_t, UInt32)
76
+ FETCH_AND_CAST_CASE(uint64_t, UInt64)
77
+ default:
78
+ ERROR_UNSUPPORTED_CAST
79
+ }
80
+ return dest_t(0); // just to avoid compiler warning
81
+ }
82
+
83
+ // Cast a value with static type src_t into dynamic dest_type, and store it to
84
+ // ptr.
85
+ #define CAST_AND_STORE_CASE(type, scalartype) \
86
+ case ScalarType::scalartype: \
87
+ *(type*)ptr = c10::convert<type>(value); \
88
+ return;
89
+ template <typename src_t>
90
+ C10_HOST_DEVICE inline void cast_and_store(
91
+ const ScalarType dest_type,
92
+ void* ptr,
93
+ src_t value) {
94
+ switch (dest_type) {
95
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE)
96
+ CAST_AND_STORE_CASE(uint16_t, UInt16)
97
+ CAST_AND_STORE_CASE(uint32_t, UInt32)
98
+ CAST_AND_STORE_CASE(uint64_t, UInt64)
99
+ default:;
100
+ }
101
+ ERROR_UNSUPPORTED_CAST
102
+ }
103
+
104
+ #define DEFINE_UNCASTABLE(T, scalartype_) \
105
+ template <> \
106
+ C10_HOST_DEVICE inline T fetch_and_cast<T>( \
107
+ const ScalarType src_type, const void* ptr) { \
108
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
109
+ return c10::load<T>(ptr); \
110
+ } \
111
+ template <> \
112
+ C10_HOST_DEVICE inline void cast_and_store<T>( \
113
+ const ScalarType dest_type, void* ptr, T value) { \
114
+ CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
115
+ *(T*)ptr = value; \
116
+ }
117
+
118
+ AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
119
+
120
+ #undef FETCH_AND_CAST_CASE
121
+ #undef CAST_AND_STORE_CASE
122
+ #undef DEFINE_UNCASTABLE
123
+ #undef ERROR_UNSUPPORTED_CAST
124
+
125
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Event.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/core/Stream.h>
6
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
7
+ #include <c10/core/impl/InlineEvent.h>
8
+ #include <c10/core/impl/VirtualGuardImpl.h>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * A backend-generic movable, not copyable, not thread-safe event.
14
+ *
15
+ * The design of this event follows that of CUDA and HIP events. These events
16
+ * are recorded and waited on by streams and can be rerecorded to,
17
+ * each rerecording essentially creating a new version of the event.
18
+ * For example, if (in CPU time), stream X is asked to record E,
19
+ * stream Y waits on E, and stream X is asked to record E again, then Y will
20
+ * wait for X to finish the first call to record and not the second, because
21
+ * it's waiting on the first version of event E, not the second.
22
+ * Querying an event only returns the status of its most recent version.
23
+ *
24
+ * Backend-generic events are implemented by this class and
25
+ * impl::InlineEvent. In addition to these events there are also
26
+ * some backend-specific events, like ATen's CUDAEvent. Each of these
27
+ * classes has its own use.
28
+ *
29
+ * impl::InlineEvent<...> or a backend-specific event should be
30
+ * preferred when the backend is known at compile time and known to
31
+ * be compiled. Backend-specific events may have additional functionality.
32
+ *
33
+ * This Event should be used if a particular backend may not be available,
34
+ * or the backend required is not known at compile time.
35
+ *
36
+ * These generic events are built on top of DeviceGuardImpls, analogous
37
+ * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls,"
38
+ * is no longer entirely accurate, as these classes implement the
39
+ * backend-specific logic for a generic backend interface.
40
+ *
41
+ * See DeviceGuardImplInterface.h for a list of all supported flags.
42
+ */
43
+
44
+ struct Event final {
45
+ // Constructors
46
+ Event() = delete;
47
+ Event(
48
+ const DeviceType _device_type,
49
+ const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
50
+ : impl_{_device_type, _flag} {}
51
+
52
+ // Copy constructor and copy assignment operator (deleted)
53
+ Event(const Event&) = delete;
54
+ Event& operator=(const Event&) = delete;
55
+
56
+ // Move constructor and move assignment operator
57
+ Event(Event&&) noexcept = default;
58
+ Event& operator=(Event&&) noexcept = default;
59
+
60
+ // Destructor
61
+ ~Event() = default;
62
+
63
+ // Getters
64
+ Device device() const noexcept {
65
+ return Device(device_type(), device_index());
66
+ }
67
+ DeviceType device_type() const noexcept {
68
+ return impl_.device_type();
69
+ }
70
+ DeviceIndex device_index() const noexcept {
71
+ return impl_.device_index();
72
+ }
73
+ EventFlag flag() const noexcept {
74
+ return impl_.flag();
75
+ }
76
+ bool was_marked_for_recording() const noexcept {
77
+ return impl_.was_marked_for_recording();
78
+ }
79
+
80
+ /**
81
+ * Calls record() if and only if record() has never been called for this
82
+ * event. Note: because Event is not thread-safe recordOnce() may call
83
+ * record() multiple times if called from multiple threads.
84
+ */
85
+ void recordOnce(const Stream& stream) {
86
+ impl_.recordOnce(stream);
87
+ }
88
+
89
+ /**
90
+ * Increments the event's version and enqueues a job with this version
91
+ * in the stream's work queue. When the stream process that job
92
+ * it notifies all streams waiting on / blocked by that version of the
93
+ * event to continue and marks that version as recorded.
94
+ * */
95
+ void record(const Stream& stream) {
96
+ impl_.record(stream);
97
+ }
98
+
99
+ /**
100
+ * Does nothing if the event has not been scheduled to be recorded.
101
+ * If the event was previously enqueued to be recorded, a command
102
+ * to wait for the version of the event that exists at the time of this call
103
+ * is inserted in the stream's work queue.
104
+ * When the stream reaches this command it will stop processing
105
+ * additional commands until that version of the event is marked as recorded.
106
+ */
107
+ void block(const Stream& stream) const {
108
+ impl_.block(stream);
109
+ }
110
+
111
+ /**
112
+ * Returns true if (and only if)
113
+ * (1) the event has never been scheduled to be recorded
114
+ * (2) the current version is marked as recorded.
115
+ * Returns false otherwise.
116
+ */
117
+ bool query() const {
118
+ return impl_.query();
119
+ }
120
+
121
+ private:
122
+ impl::InlineEvent<impl::VirtualGuardImpl> impl_;
123
+ };
124
+
125
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct C10_API GradMode {
9
+ static bool is_enabled();
10
+ static void set_enabled(bool enabled);
11
+ };
12
+
13
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
14
+ // construction, and sets it back to the original value upon destruction.
15
+ struct C10_API AutoGradMode {
16
+ AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
17
+ GradMode::set_enabled(enabled);
18
+ }
19
+ ~AutoGradMode() {
20
+ GradMode::set_enabled(prev_mode);
21
+ }
22
+ bool prev_mode;
23
+ };
24
+
25
+ // A RAII, thread local (!) guard that stops future operations from building
26
+ // gradients.
27
+ struct C10_API NoGradGuard : public AutoGradMode {
28
+ NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
29
+ };
30
+
31
+ // A RAII, thread local (!) guard that enables or disables forward grad mode
32
+ // upon construction, and sets it back to the original value upon destruction.
33
+ struct C10_API AutoFwGradMode {
34
+ AutoFwGradMode(bool enabled)
35
+ : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
36
+ AutogradState::get_tls_state().set_fw_grad_mode(enabled);
37
+ }
38
+ ~AutoFwGradMode() {
39
+ AutogradState::get_tls_state().set_fw_grad_mode(prev_mode);
40
+ }
41
+ bool prev_mode;
42
+ };
43
+
44
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/core/impl/LocalDispatchKeySet.h>
7
+ #include <c10/macros/Export.h>
8
+
9
+ namespace c10 {
10
+
11
+ // A RAII, thread local (!) guard that enables or disables inference mode upon
12
+ // construction, and sets it back to the original value upon destruction.
13
+ struct C10_API InferenceMode {
14
+ // Note [Expected TLS state in InferenceMode]:
15
+ // InferenceMode: ADInplaceOrView not in
16
+ // raw_local_dispatch_key_set.included(),
17
+ // Autograd in raw_local_dispatch_key_set.excluded()
18
+ // GradMode is disabled.
19
+ // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(),
20
+ // Autograd not in raw_local_dispatch_key_set.excluded()
21
+ // GradMode is enabled by default unless toggled manually
22
+ // through other APIs, e.g. NoGradGuard.
23
+ //
24
+ // Invariant:
25
+ // - ADInplaceOrView is never in the excluded set
26
+ // - Autograd is never in the included set
27
+ // - Setting InferenceMode will set GradMode accordingly, but not vice versa.
28
+ //
29
+ // 1. Why do we put ADInplaceOrView in included set outside InferenceMode?
30
+ //
31
+ // Inplace update to inference tensor outside InferenceMode is not
32
+ // allowed. See Note [Inplace update inference tensor] for more details.
33
+ // Without going through ADInplaceOrView kernel, we cannot throw error
34
+ // for `inference_tensor.add_(1)` case.
35
+ //
36
+ // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode?
37
+ //
38
+ // For example:
39
+ // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true);
40
+ // torch::Tensor k = a + 2;
41
+ // {
42
+ // c10::InferenceMode guard(true);
43
+ // k.add_(2);
44
+ // }
45
+ // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
46
+ // prepared for future autograd.
47
+ //
48
+ // 3. Why does setting InferenceMode also set GradMode?
49
+ //
50
+ // This is required since InferenceMode is a faster and more restrictive
51
+ // version of NoGradGuard. All runtime checks using GradMode::is_enabled()
52
+ // are applicable to InferenceMode as well, e.g.
53
+ // `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
54
+ InferenceMode(bool enabled = true)
55
+ : prev_mode(AutogradState::get_tls_state()),
56
+ prev_keyset(c10::impl::tls_local_dispatch_key_set()) {
57
+ // Enabling inference mode means disabling grad modes
58
+ // And disabling inference mode means enabling grad modes
59
+ AutogradState::set_tls_state(AutogradState(
60
+ /* grad_mode */ !enabled,
61
+ /* inference_mode */ enabled,
62
+ /* fw_grad_mode */ !enabled,
63
+ /* multithreading_enabled*/ !enabled));
64
+ DispatchKeySet included = enabled
65
+ ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView)
66
+ : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView);
67
+ DispatchKeySet excluded = enabled
68
+ ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
69
+ : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
70
+ c10::impl::PODLocalDispatchKeySet cur_keyset{};
71
+ cur_keyset.set_included(included);
72
+ cur_keyset.set_excluded(excluded);
73
+ c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);
74
+ }
75
+
76
+ ~InferenceMode() {
77
+ AutogradState::set_tls_state(prev_mode);
78
+ c10::impl::_force_tls_local_dispatch_key_set(prev_keyset);
79
+ }
80
+ static bool is_enabled();
81
+
82
+ private:
83
+ AutogradState prev_mode;
84
+ c10::impl::LocalDispatchKeySet prev_keyset;
85
+ };
86
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Layout.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <cstdint>
7
+ #include <ostream>
8
+
9
+ namespace c10 {
10
+ enum class Layout : int8_t {
11
+ Strided,
12
+ Sparse,
13
+ SparseCsr,
14
+ Mkldnn,
15
+ SparseCsc,
16
+ SparseBsr,
17
+ SparseBsc,
18
+ Jagged,
19
+ NumOptions
20
+ };
21
+
22
+ constexpr auto kStrided = Layout::Strided;
23
+ constexpr auto kSparse = Layout::Sparse;
24
+ constexpr auto kSparseCsr = Layout::SparseCsr;
25
+ constexpr auto kMkldnn = Layout::Mkldnn;
26
+ constexpr auto kSparseCsc = Layout::SparseCsc;
27
+ constexpr auto kSparseBsr = Layout::SparseBsr;
28
+ constexpr auto kSparseBsc = Layout::SparseBsc;
29
+ constexpr auto kJagged = Layout::Jagged;
30
+
31
+ inline Layout layout_from_backend(Backend backend) {
32
+ switch (backend) {
33
+ case Backend::SparseCPU:
34
+ case Backend::SparseCUDA:
35
+ case Backend::SparseHIP:
36
+ case Backend::SparseVE:
37
+ case Backend::SparseXPU:
38
+ case Backend::SparsePrivateUse1:
39
+ return Layout::Sparse;
40
+ case Backend::MkldnnCPU:
41
+ return Layout::Mkldnn;
42
+ case Backend::SparseCsrCPU:
43
+ case Backend::SparseCsrCUDA:
44
+ case Backend::SparseCsrHIP:
45
+ case Backend::SparseCsrVE:
46
+ case Backend::SparseCsrXPU:
47
+ TORCH_CHECK(
48
+ false,
49
+ "Cannot map Backend SparseCsr(CPU|CUDA|HIP|VE|XPU) to a unique layout.");
50
+ default:
51
+ return Layout::Strided;
52
+ }
53
+ }
54
+
55
+ inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
56
+ switch (layout) {
57
+ case at::kStrided:
58
+ return stream << "Strided";
59
+ case at::kSparse:
60
+ return stream << "Sparse";
61
+ case at::kSparseCsr:
62
+ return stream << "SparseCsr";
63
+ case at::kSparseCsc:
64
+ return stream << "SparseCsc";
65
+ case at::kSparseBsr:
66
+ return stream << "SparseBsr";
67
+ case at::kSparseBsc:
68
+ return stream << "SparseBsc";
69
+ case at::kMkldnn:
70
+ return stream << "Mkldnn";
71
+ case at::kJagged:
72
+ return stream << "Jagged";
73
+ default:
74
+ TORCH_CHECK(false, "Unknown layout");
75
+ }
76
+ }
77
+
78
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ #include <cstdint>
7
+ #include <ostream>
8
+ #include <vector>
9
+
10
+ // Memory format is not the property of a Tensor. It is the way to tell an
11
+ // operator how the result should be organized in memory and nothing more. That
12
+ // means memory format should never be used as return value for any tensor state
13
+ // interrogation functions (internally and externally).
14
+ //
15
+ // Possible options are:
16
+ // Preserve:
17
+ // If any of the input tensors is in channels_last format, operator output
18
+ // should be in channels_last format
19
+ //
20
+ // Contiguous:
21
+ // Regardless of input tensors format, the output should be contiguous
22
+ // Tensor.
23
+ //
24
+ // ChannelsLast:
25
+ // Regardless of input tensors format, the output should be in channels_last
26
+ // format.
27
+
28
+ namespace c10 {
29
+ enum class MemoryFormat : int8_t {
30
+ Contiguous,
31
+ Preserve,
32
+ ChannelsLast,
33
+ ChannelsLast3d,
34
+ NumOptions
35
+ };
36
+
37
+ // If you are seeing this, it means that this call site was not checked if
38
+ // the memory format could be preserved, and it was switched to old default
39
+ // behaviour of contiguous
40
+ #define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format()
41
+
42
+ inline MemoryFormat get_contiguous_memory_format() {
43
+ return MemoryFormat::Contiguous;
44
+ }
45
+
46
+ inline std::ostream& operator<<(
47
+ std::ostream& stream,
48
+ at::MemoryFormat memory_format) {
49
+ switch (memory_format) {
50
+ case MemoryFormat::Preserve:
51
+ return stream << "Preserve";
52
+ case MemoryFormat::Contiguous:
53
+ return stream << "Contiguous";
54
+ case MemoryFormat::ChannelsLast:
55
+ return stream << "ChannelsLast";
56
+ case MemoryFormat::ChannelsLast3d:
57
+ return stream << "ChannelsLast3d";
58
+ default:
59
+ TORCH_CHECK(false, "Unknown memory format ", memory_format);
60
+ }
61
+ }
62
+
63
+ // Note: Hardcoded the channel last stride indices here to get better
64
+ // performance
65
+ template <typename T>
66
+ inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) {
67
+ std::vector<T> strides(sizes.size());
68
+ switch (sizes.size()) {
69
+ case 4:
70
+ strides[1] = 1;
71
+ strides[3] = sizes[1];
72
+ strides[2] = strides[3] * sizes[3];
73
+ strides[0] = strides[2] * sizes[2];
74
+ return strides;
75
+ case 3:
76
+ strides[0] = 1;
77
+ strides[2] = sizes[0];
78
+ strides[1] = strides[2] * sizes[2];
79
+ return strides;
80
+ default:
81
+ TORCH_INTERNAL_ASSERT(
82
+ false, "ChannelsLast2d doesn't support size ", sizes.size());
83
+ }
84
+ }
85
+
86
+ inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) {
87
+ return get_channels_last_strides_2d<int64_t>(sizes);
88
+ }
89
+
90
+ template <typename T>
91
+ std::vector<T> get_channels_last_strides_3d(ArrayRef<T> sizes) {
92
+ std::vector<T> strides(sizes.size());
93
+ switch (sizes.size()) {
94
+ case 5:
95
+ strides[1] = 1;
96
+ strides[4] = sizes[1];
97
+ strides[3] = strides[4] * sizes[4];
98
+ strides[2] = strides[3] * sizes[3];
99
+ strides[0] = strides[2] * sizes[2];
100
+ return strides;
101
+ case 4:
102
+ strides[0] = 1;
103
+ strides[3] = sizes[0];
104
+ strides[2] = strides[3] * sizes[3];
105
+ strides[1] = strides[2] * sizes[2];
106
+ return strides;
107
+ default:
108
+ TORCH_INTERNAL_ASSERT(
109
+ false, "ChannelsLast3d doesn't support size ", sizes.size());
110
+ }
111
+ }
112
+
113
+ inline std::vector<int64_t> get_channels_last_strides_3d(IntArrayRef sizes) {
114
+ return get_channels_last_strides_3d<int64_t>(sizes);
115
+ }
116
+
117
+ // NOTE:
118
+ // Below are Helper functions for is_channels_last_strides_xd.
119
+ // 1. Please do not combine these helper functions, each helper function handles
120
+ // exactly one case of sizes + memory_format, by doing this, the strides indices
121
+ // will be a constant array and we can access it using constant index number,
122
+ // the compiler will fully unroll the loop on strides indices to gain a better
123
+ // performance.
124
+ // 2. No error check in helper function, caller ensures the correctness of the
125
+ // input
126
+ // 3. All helper functions have similar comments, only 1st helper function is
127
+ // commented here.
128
+ template <typename T>
129
+ inline bool is_channels_last_strides_2d_s4(
130
+ const ArrayRef<T> sizes,
131
+ const ArrayRef<T> strides) {
132
+ T min = 0;
133
+ // special case for trivial C dimension. default to NCHW
134
+ if (strides[1] == 0) {
135
+ return false;
136
+ }
137
+ // loop strides indices
138
+ for (auto& d : {1, 3, 2, 0}) {
139
+ if (sizes[d] == 0) {
140
+ return false;
141
+ }
142
+ if (strides[d] < min) {
143
+ return false;
144
+ }
145
+ // Fallback to NCHW as default layout for ambiguous cases
146
+ // This is the flaw of implicit memory_format from strides.
147
+ // N111 tensor with identical strides for size 1 dimension;
148
+ // Two cases could lead us here:
149
+ // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
150
+ // b. N11W contiguous Tensor sliced on the W-dimension.
151
+ // ([N,1,1,1]@[W,W,W,W])
152
+ if (d == 0 && min == strides[1]) {
153
+ return false;
154
+ }
155
+ // This is necessary to:
156
+ // 1. distinguish the memory_format of N1H1;
157
+ // [H, 1, 1, 1] channels_last stride
158
+ // [H, H, 1, 1] contiguous stride
159
+ // 2. permutation of 1C1W:
160
+ // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
161
+ // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last
162
+ min = strides[d];
163
+ if (sizes[d] > 1) {
164
+ min *= sizes[d];
165
+ }
166
+ }
167
+ return true;
168
+ }
169
+
170
+ template <typename T>
171
+ inline bool is_channels_last_strides_3d_s5(
172
+ const ArrayRef<T> sizes,
173
+ const ArrayRef<T> strides) {
174
+ T min = 0;
175
+ if (strides[1] == 0) {
176
+ return false;
177
+ }
178
+ for (auto& d : {1, 4, 3, 2, 0}) {
179
+ if (sizes[d] == 0) {
180
+ return false;
181
+ }
182
+ if (strides[d] < min) {
183
+ return false;
184
+ }
185
+ if (d == 0 && min == strides[1]) {
186
+ return false;
187
+ }
188
+ min = strides[d];
189
+ if (sizes[d] > 1) {
190
+ min *= sizes[d];
191
+ }
192
+ }
193
+ return true;
194
+ }
195
+
196
+ // Note [Ambiguous is_channels_last_strides_xd]
197
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
198
+ // The flaw of carrying memory_format implicitly through strides is very hard
199
+ // to WAR properly. issue #24090
200
+ // Without the history of permutation, we can't infer the memory_format of a
201
+ // tensor from the snapshot of its size & stride
202
+ // e.g.
203
+ //
204
+ // 1. We can NOT specify the memory_format of N111 tensor through strides in a
205
+ // meaningful way;
206
+ //
207
+ // 2. Two path that ended up with identical size/stride
208
+ // N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W]
209
+ // NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C]
210
+ // So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer
211
+ // the memory_format of the original tensor.
212
+ //
213
+ // Due to the limitations, our temporary WAR `is_channels_last_strides` does the
214
+ // best effort to infer whether the original memory_format of a tensor is
215
+ // at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered
216
+ // by their importance):
217
+ // 1. Ensure that normal shape manipulation does not accidentally change the
218
+ // MemoryFormat of an existing tensor.
219
+ // 2. Allows user to mark MemoryFormat::ChannelsLast to tensors;
220
+ //
221
+ // The function does so via checking strides of the tensor, including strides of
222
+ // size-1 dimensions. Although conventionally PyTorch implies no restriction on
223
+ // trivial stride (stride for size-1 dimension).
224
+ //
225
+ // Note that this approach is a compromise. We did not solve the problem
226
+ // completely. Many cases we will not be able to infer the correct memory
227
+ // format.
228
+ // The implementation of `is_channels_last_strides` is to serve the objectives:
229
+ // MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental
230
+ // conversion); Best effort to maintain the ChannelsLast flag.
231
+ //
232
+ // Due to the fact that this is not a bulletproof solution, through testing
233
+ // (aten/src/ATen/test/memory_format_test.cpp)
234
+ // a. we ensure that the common tasks are supported;
235
+ // a. we identify corner cases where the implementation compromises on.
236
+ //
237
+ // By the time accumulated permutation is enabled to replace implicit
238
+ // memory_format through strides, we should be updating our tests and fix the
239
+ // issues in our tests.
240
+ //
241
+ // We use Channels Last 2d as an example above.
242
+ // This is a general problem for all the is_channels_last_strides_xd
243
+ // implementation. Please check the helper functions
244
+ // (is_channels_last_strides_*d_s*) for more details.
245
+
246
+ template <typename T>
247
+ inline bool is_channels_last_strides_2d(
248
+ const ArrayRef<T> sizes,
249
+ const ArrayRef<T> strides) {
250
+ switch (sizes.size()) {
251
+ case 4:
252
+ return is_channels_last_strides_2d_s4(sizes, strides);
253
+ // NOLINTNEXTLINE(bugprone-branch-clone)
254
+ case 3:
255
+ // TODO dim == 3 case will be enabled once it is fully tested
256
+ return false;
257
+ default:
258
+ return false;
259
+ }
260
+ }
261
+
262
+ template <typename T>
263
+ inline bool is_channels_last_strides_3d(
264
+ const ArrayRef<T> sizes,
265
+ const ArrayRef<T> strides) {
266
+ switch (sizes.size()) {
267
+ case 5:
268
+ return is_channels_last_strides_3d_s5(sizes, strides);
269
+ // NOLINTNEXTLINE(bugprone-branch-clone)
270
+ case 4:
271
+ // TODO dim == 4 case will be enabled once it is fully tested
272
+ return false;
273
+ default:
274
+ return false;
275
+ }
276
+ }
277
+
278
+ inline bool is_channels_last_strides_2d(
279
+ const IntArrayRef sizes,
280
+ const IntArrayRef strides) {
281
+ return is_channels_last_strides_2d<int64_t>(sizes, strides);
282
+ }
283
+
284
+ inline bool is_channels_last_strides_3d(
285
+ const IntArrayRef sizes,
286
+ const IntArrayRef strides) {
287
+ return is_channels_last_strides_3d<int64_t>(sizes, strides);
288
+ }
289
+
290
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace c10 {
4
+
5
+ template <typename T>
6
+ class OptionalRef {
7
+ public:
8
+ OptionalRef() : data_(nullptr) {}
9
+ OptionalRef(const T* data) : data_(data) {
10
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
11
+ }
12
+ OptionalRef(const T& data) : data_(&data) {}
13
+
14
+ bool has_value() const {
15
+ return data_ != nullptr;
16
+ }
17
+
18
+ const T& get() const {
19
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_);
20
+ return *data_;
21
+ }
22
+
23
+ operator bool() const {
24
+ return has_value();
25
+ }
26
+
27
+ private:
28
+ const T* data_;
29
+ };
30
+
31
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10 {
11
+
12
+ // A PyHandleCache represents a cached pointer from a C++ object to
13
+ // a Python object that represents that object analogously in Python.
14
+ // Upon a cache hit, the relevant object can be retrieved after a test
15
+ // and then a memory load. Two conditions must hold to be able to use this
16
+ // class:
17
+ //
18
+ // - This must truly be a cache; e.g., the caller must be able to produce
19
+ // the object some other way if the cache hit misses.
20
+ //
21
+ // - This must truly be a handle; e.g., the Python object referenced by
22
+ // this class must have static lifetime. This means we don't have to
23
+ // maintain strong ownership or deallocate the object when the C++ object
24
+ // dies. Static lifetime is a good idea in conjunction with the cache,
25
+ // since if you are producing a fresh object on miss you won't be
26
+ // maintaining object identity. If you need bidirectional ownership,
27
+ // you will want to factor out the pattern in TensorImpl with
28
+ // resurrection.
29
+ //
30
+ // This cache is expected to not improve perf under torchdeploy, as one
31
+ // interpreter will fill up the cache, and all the interpreters will be
32
+ // unable to use the slot. A potential improvement is to have multiple
33
+ // slots (one per interpreter), which will work in deployment scenarios
34
+ // where there a stable, fixed number of interpreters. You can also store
35
+ // the relevant state in the Python library, rather than in the non-Python
36
+ // library (although in many cases, this is not convenient, as there may
37
+ // not be a way to conveniently index based on the object.)
38
+ class PyHandleCache {
39
+ public:
40
+ PyHandleCache() : pyinterpreter_(nullptr) {}
41
+
42
+ // Attempt to fetch the pointer from the cache, if the PyInterpreter
43
+ // matches. If it doesn't exist, or the cache entry is not valid,
44
+ // use slow_accessor to get the real pointer value and return that
45
+ // (possibly writing it to the cache, if the cache entry is
46
+ // available.)
47
+ template <typename F>
48
+ PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor)
49
+ const {
50
+ // Note [Memory ordering on Python interpreter tag]
51
+ impl::PyInterpreter* interpreter =
52
+ pyinterpreter_.load(std::memory_order_acquire);
53
+ if (C10_LIKELY(interpreter == self_interpreter)) {
54
+ return data_;
55
+ } else if (interpreter == nullptr) {
56
+ auto* r = slow_accessor();
57
+ impl::PyInterpreter* expected = nullptr;
58
+ // attempt to claim this cache entry with the specified interpreter tag
59
+ if (pyinterpreter_.compare_exchange_strong(
60
+ expected, self_interpreter, std::memory_order_acq_rel)) {
61
+ data_ = r;
62
+ }
63
+ // This shouldn't be possible, as you should be GIL protected
64
+ TORCH_INTERNAL_ASSERT(expected != self_interpreter);
65
+ return r;
66
+ } else {
67
+ return slow_accessor();
68
+ }
69
+ }
70
+
71
+ private:
72
+ mutable std::atomic<impl::PyInterpreter*> pyinterpreter_;
73
+ mutable PyObject* data_{nullptr};
74
+ };
75
+
76
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * QEngine is an enum that is used to select the engine to run quantized ops.
11
+ * Keep this enum in sync with get_qengine_id() in
12
+ * torch/backends/quantized/__init__.py
13
+ */
14
+ enum class QEngine : uint8_t {
15
+ NoQEngine = 0,
16
+ FBGEMM = 1,
17
+ QNNPACK = 2,
18
+ ONEDNN = 3,
19
+ X86 = 4,
20
+ };
21
+
22
+ constexpr auto kNoQEngine = QEngine::NoQEngine;
23
+ constexpr auto kFBGEMM = QEngine::FBGEMM;
24
+ constexpr auto kQNNPACK = QEngine::QNNPACK;
25
+ constexpr auto kONEDNN = QEngine::ONEDNN;
26
+ constexpr auto kX86 = QEngine::X86;
27
+
28
+ inline std::string toString(QEngine qengine) {
29
+ switch (qengine) {
30
+ case kNoQEngine:
31
+ return "NoQEngine";
32
+ case kFBGEMM:
33
+ return "FBGEMM";
34
+ case kQNNPACK:
35
+ return "QNNPACK";
36
+ case kONEDNN:
37
+ return "ONEDNN";
38
+ case kX86:
39
+ return "X86";
40
+ default:
41
+ TORCH_CHECK(
42
+ false, "Unrecognized Quantized Engine: ", static_cast<int>(qengine));
43
+ }
44
+ }
45
+
46
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * QScheme is an enum that specifies the type of quantization. This has a one
11
+ * to one correspondence with Quantizer
12
+ * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes.
13
+ * Keep this file in sync with torch/nn/_qscheme.py
14
+ */
15
+ enum class QScheme : uint8_t {
16
+ PER_TENSOR_AFFINE = 0,
17
+ PER_CHANNEL_AFFINE = 1,
18
+ PER_TENSOR_SYMMETRIC = 2,
19
+ PER_CHANNEL_SYMMETRIC = 3,
20
+ PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4,
21
+ COMPILE_TIME_NUM_QSCHEMES = 5,
22
+ };
23
+
24
+ constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE;
25
+ constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE;
26
+ constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC;
27
+ constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC;
28
+ constexpr auto kPerChannelAffineFloatQParams =
29
+ QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS;
30
+ constexpr int COMPILE_TIME_NUM_QSCHEMES =
31
+ static_cast<int>(QScheme::COMPILE_TIME_NUM_QSCHEMES);
32
+
33
+ inline std::string toString(QScheme qscheme) {
34
+ switch (qscheme) {
35
+ case kPerTensorAffine:
36
+ return "per_tensor_affine";
37
+ case kPerChannelAffine:
38
+ return "per_channel_affine";
39
+ case kPerTensorSymmetric:
40
+ return "per_tensor_symmetric";
41
+ case kPerChannelSymmetric:
42
+ return "per_channel_symmetric";
43
+ case kPerChannelAffineFloatQParams:
44
+ return "per_channel_affine_float_qparams";
45
+ default:
46
+ TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast<int>(qscheme));
47
+ }
48
+ }
49
+
50
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Storage.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/UniqueVoidPtr.h>
6
+
7
+ #include <atomic>
8
+ #include <memory>
9
+
10
+ namespace c10 {
11
+
12
+ // A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr
13
+ // to implement a shared DataPtr. Normally, a DataPtr is unique, but we use
14
+ // this custom context and the `refcounted_deleter` function below to make the
15
+ // DataPtr act like a non-unique DataPtr. This context object holds onto an
16
+ // inner context and deleter function which handle the actual deletion of the
17
+ // data when the refcount reaches 0.
18
+ //
19
+ // This shared DataPtr feature is only used when storages are shared between
20
+ // multiple Python interpreters in MultiPy. Before storages had PyObject
21
+ // preservation, interpreters could just share the same StorageImpl instance.
22
+ // But now a StorageImpl can only be associated with one interpreter in order
23
+ // to properly manage a zombie PyObject. So we share storages across Python
24
+ // interpreters by creating a different StorageImpl instance for each one, but
25
+ // they all point to the same data.
26
+ struct C10_API RefcountedDeleterContext {
27
+ RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter)
28
+ : other_ctx(other_ctx, other_deleter), refcount(1) {}
29
+
30
+ std::unique_ptr<void, c10::DeleterFnPtr> other_ctx;
31
+ std::atomic_int refcount;
32
+ };
33
+
34
+ // `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement
35
+ // a shared DataPtr.
36
+ //
37
+ // Warning: This should only be called on a pointer to
38
+ // a RefcountedDeleterContext that was allocated on the heap with `new`,
39
+ // because when the refcount reaches 0, the context is deleted with `delete`
40
+ C10_API void refcounted_deleter(void* ctx_);
41
+
42
+ // If the storage's DataPtr does not use `refcounted_deleter`, replace it with
43
+ // a DataPtr that does, so it can be shared between multiple StorageImpls
44
+ C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage);
45
+
46
+ // Create a new StorageImpl that points to the same data. If the original
47
+ // StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced
48
+ // with one that does
49
+ C10_API c10::Storage newStorageImplFromRefcountedDataPtr(
50
+ const c10::Storage& storage);
51
+
52
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/python_stub.h>
6
+ #include <utility>
7
+
8
+ namespace c10 {
9
+
10
+ // This is an safe owning holder for a PyObject, akin to pybind11's
11
+ // py::object, with two major differences:
12
+ //
13
+ // - It is in c10/core; i.e., you can use this type in contexts where
14
+ // you do not have a libpython dependency
15
+ //
16
+ // - It is multi-interpreter safe (ala torchdeploy); when you fetch
17
+ // the underlying PyObject* you are required to specify what the current
18
+ // interpreter context is and we will check that you match it.
19
+ //
20
+ // It is INVALID to store a reference to a Tensor object in this way;
21
+ // you should just use TensorImpl directly in that case!
22
+ struct C10_API SafePyObject {
23
+ // Steals a reference to data
24
+ SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
25
+ : data_(data), pyinterpreter_(pyinterpreter) {}
26
+ SafePyObject(SafePyObject&& other) noexcept
27
+ : data_(std::exchange(other.data_, nullptr)),
28
+ pyinterpreter_(other.pyinterpreter_) {}
29
+
30
+ // In principle this could be copyable if we add an incref to PyInterpreter
31
+ // but for now it's easier to just disallow it.
32
+ SafePyObject(SafePyObject const&) = delete;
33
+ SafePyObject& operator=(SafePyObject const&) = delete;
34
+
35
+ ~SafePyObject() {
36
+ if (data_ != nullptr) {
37
+ (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false);
38
+ }
39
+ }
40
+
41
+ c10::impl::PyInterpreter& pyinterpreter() const {
42
+ return *pyinterpreter_;
43
+ }
44
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
45
+
46
+ // stop tracking the current object, and return it
47
+ PyObject* release() {
48
+ auto rv = data_;
49
+ data_ = nullptr;
50
+ return rv;
51
+ }
52
+
53
+ private:
54
+ PyObject* data_;
55
+ c10::impl::PyInterpreter* pyinterpreter_;
56
+ };
57
+
58
+ // Like SafePyObject, but non-owning. Good for references to global PyObjects
59
+ // that will be leaked on interpreter exit. You get a copy constructor/assign
60
+ // this way.
61
+ struct C10_API SafePyHandle {
62
+ SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {}
63
+ SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
64
+ : data_(data), pyinterpreter_(pyinterpreter) {}
65
+
66
+ c10::impl::PyInterpreter& pyinterpreter() const {
67
+ return *pyinterpreter_;
68
+ }
69
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
70
+ void reset() {
71
+ data_ = nullptr;
72
+ pyinterpreter_ = nullptr;
73
+ }
74
+ operator bool() {
75
+ return data_;
76
+ }
77
+
78
+ private:
79
+ PyObject* data_;
80
+ c10::impl::PyInterpreter* pyinterpreter_;
81
+ };
82
+
83
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <stdexcept>
5
+ #include <type_traits>
6
+ #include <utility>
7
+
8
+ #include <c10/core/OptionalRef.h>
9
+ #include <c10/core/ScalarType.h>
10
+ #include <c10/core/SymBool.h>
11
+ #include <c10/core/SymFloat.h>
12
+ #include <c10/core/SymInt.h>
13
+ #include <c10/core/SymNodeImpl.h>
14
+ #include <c10/macros/Export.h>
15
+ #include <c10/macros/Macros.h>
16
+ #include <c10/util/Deprecated.h>
17
+ #include <c10/util/Exception.h>
18
+ #include <c10/util/Half.h>
19
+ #include <c10/util/TypeCast.h>
20
+ #include <c10/util/complex.h>
21
+ #include <c10/util/intrusive_ptr.h>
22
+
23
+ namespace c10 {
24
+
25
+ /**
26
+ * Scalar represents a 0-dimensional tensor which contains a single element.
27
+ * Unlike a tensor, numeric literals (in C++) are implicitly convertible to
28
+ * Scalar (which is why, for example, we provide both add(Tensor) and
29
+ * add(Scalar) overloads for many operations). It may also be used in
30
+ * circumstances where you statically know a tensor is 0-dim and single size,
31
+ * but don't know its type.
32
+ */
33
+ class C10_API Scalar {
34
+ public:
35
+ Scalar() : Scalar(int64_t(0)) {}
36
+
37
+ void destroy() {
38
+ if (Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag) {
39
+ raw::intrusive_ptr::decref(v.p);
40
+ v.p = nullptr;
41
+ }
42
+ }
43
+
44
+ ~Scalar() {
45
+ destroy();
46
+ }
47
+
48
+ #define DEFINE_IMPLICIT_CTOR(type, name) \
49
+ Scalar(type vv) : Scalar(vv, true) {}
50
+
51
+ AT_FORALL_SCALAR_TYPES_AND7(
52
+ Half,
53
+ BFloat16,
54
+ Float8_e5m2,
55
+ Float8_e4m3fn,
56
+ Float8_e5m2fnuz,
57
+ Float8_e4m3fnuz,
58
+ ComplexHalf,
59
+ DEFINE_IMPLICIT_CTOR)
60
+ AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR)
61
+
62
+ // Helper constructors to allow Scalar creation from long and long long types
63
+ // As std::is_same_v<long, long long> is false(except Android), one needs to
64
+ // provide a constructor from either long or long long in addition to one from
65
+ // int64_t
66
+ #if defined(__APPLE__) || defined(__MACOSX)
67
+ static_assert(
68
+ std::is_same_v<long long, int64_t>,
69
+ "int64_t is the same as long long on MacOS");
70
+ Scalar(long vv) : Scalar(vv, true) {}
71
+ #endif
72
+ #if defined(__linux__) && !defined(__ANDROID__)
73
+ static_assert(
74
+ std::is_same_v<long, int64_t>,
75
+ "int64_t is the same as long on Linux");
76
+ Scalar(long long vv) : Scalar(vv, true) {}
77
+ #endif
78
+
79
+ Scalar(uint16_t vv) : Scalar(vv, true) {}
80
+ Scalar(uint32_t vv) : Scalar(vv, true) {}
81
+ Scalar(uint64_t vv) {
82
+ if (vv > static_cast<uint64_t>(INT64_MAX)) {
83
+ tag = Tag::HAS_u;
84
+ v.u = vv;
85
+ } else {
86
+ tag = Tag::HAS_i;
87
+ // NB: no need to use convert, we've already tested convertibility
88
+ v.i = static_cast<int64_t>(vv);
89
+ }
90
+ }
91
+
92
+ #undef DEFINE_IMPLICIT_CTOR
93
+
94
+ // Value* is both implicitly convertible to SymbolicVariable and bool which
95
+ // causes ambiguity error. Specialized constructor for bool resolves this
96
+ // problem.
97
+ template <
98
+ typename T,
99
+ typename std::enable_if_t<std::is_same_v<T, bool>, bool>* = nullptr>
100
+ Scalar(T vv) : tag(Tag::HAS_b) {
101
+ v.i = convert<int64_t, bool>(vv);
102
+ }
103
+
104
+ template <
105
+ typename T,
106
+ typename std::enable_if_t<std::is_same_v<T, c10::SymBool>, bool>* =
107
+ nullptr>
108
+ Scalar(T vv) : tag(Tag::HAS_sb) {
109
+ v.i = convert<int64_t, c10::SymBool>(vv);
110
+ }
111
+
112
+ #define DEFINE_ACCESSOR(type, name) \
113
+ type to##name() const { \
114
+ if (Tag::HAS_d == tag) { \
115
+ return checked_convert<type, double>(v.d, #type); \
116
+ } else if (Tag::HAS_z == tag) { \
117
+ return checked_convert<type, c10::complex<double>>(v.z, #type); \
118
+ } \
119
+ if (Tag::HAS_b == tag) { \
120
+ return checked_convert<type, bool>(v.i, #type); \
121
+ } else if (Tag::HAS_i == tag) { \
122
+ return checked_convert<type, int64_t>(v.i, #type); \
123
+ } else if (Tag::HAS_u == tag) { \
124
+ return checked_convert<type, uint64_t>(v.u, #type); \
125
+ } else if (Tag::HAS_si == tag) { \
126
+ return checked_convert<type, int64_t>( \
127
+ toSymInt().guard_int(__FILE__, __LINE__), #type); \
128
+ } else if (Tag::HAS_sd == tag) { \
129
+ return checked_convert<type, int64_t>( \
130
+ toSymFloat().guard_float(__FILE__, __LINE__), #type); \
131
+ } else if (Tag::HAS_sb == tag) { \
132
+ return checked_convert<type, int64_t>( \
133
+ toSymBool().guard_bool(__FILE__, __LINE__), #type); \
134
+ } \
135
+ TORCH_CHECK(false) \
136
+ }
137
+
138
+ // TODO: Support ComplexHalf accessor
139
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR)
140
+ DEFINE_ACCESSOR(uint16_t, UInt16)
141
+ DEFINE_ACCESSOR(uint32_t, UInt32)
142
+ DEFINE_ACCESSOR(uint64_t, UInt64)
143
+
144
+ #undef DEFINE_ACCESSOR
145
+
146
+ SymInt toSymInt() const {
147
+ if (Tag::HAS_si == tag) {
148
+ return c10::SymInt(intrusive_ptr<SymNodeImpl>::reclaim_copy(
149
+ static_cast<SymNodeImpl*>(v.p)));
150
+ } else {
151
+ return toLong();
152
+ }
153
+ }
154
+
155
+ SymFloat toSymFloat() const {
156
+ if (Tag::HAS_sd == tag) {
157
+ return c10::SymFloat(intrusive_ptr<SymNodeImpl>::reclaim_copy(
158
+ static_cast<SymNodeImpl*>(v.p)));
159
+ } else {
160
+ return toDouble();
161
+ }
162
+ }
163
+
164
+ SymBool toSymBool() const {
165
+ if (Tag::HAS_sb == tag) {
166
+ return c10::SymBool(intrusive_ptr<SymNodeImpl>::reclaim_copy(
167
+ static_cast<SymNodeImpl*>(v.p)));
168
+ } else {
169
+ return toBool();
170
+ }
171
+ }
172
+
173
+ // also support scalar.to<int64_t>();
174
+ // Deleted for unsupported types, but specialized below for supported types
175
+ template <typename T>
176
+ T to() const = delete;
177
+
178
+ // audit uses of data_ptr
179
+ const void* data_ptr() const {
180
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
181
+ return static_cast<const void*>(&v);
182
+ }
183
+
184
+ bool isFloatingPoint() const {
185
+ return Tag::HAS_d == tag || Tag::HAS_sd == tag;
186
+ }
187
+
188
+ C10_DEPRECATED_MESSAGE(
189
+ "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")
190
+ bool isIntegral() const {
191
+ return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag;
192
+ }
193
+ bool isIntegral(bool includeBool) const {
194
+ return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag ||
195
+ (includeBool && isBoolean());
196
+ }
197
+
198
+ bool isComplex() const {
199
+ return Tag::HAS_z == tag;
200
+ }
201
+ bool isBoolean() const {
202
+ return Tag::HAS_b == tag || Tag::HAS_sb == tag;
203
+ }
204
+
205
+ // you probably don't actually want these; they're mostly for testing
206
+ bool isSymInt() const {
207
+ return Tag::HAS_si == tag;
208
+ }
209
+ bool isSymFloat() const {
210
+ return Tag::HAS_sd == tag;
211
+ }
212
+ bool isSymBool() const {
213
+ return Tag::HAS_sb == tag;
214
+ }
215
+
216
+ bool isSymbolic() const {
217
+ return Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag;
218
+ }
219
+
220
+ C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) noexcept {
221
+ if (&other == this) {
222
+ return *this;
223
+ }
224
+
225
+ destroy();
226
+ moveFrom(std::move(other));
227
+ return *this;
228
+ }
229
+
230
+ C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) {
231
+ if (&other == this) {
232
+ return *this;
233
+ }
234
+
235
+ *this = Scalar(other);
236
+ return *this;
237
+ }
238
+
239
+ Scalar operator-() const;
240
+ Scalar conj() const;
241
+ Scalar log() const;
242
+
243
+ template <
244
+ typename T,
245
+ typename std::enable_if_t<!c10::is_complex<T>::value, int> = 0>
246
+ bool equal(T num) const {
247
+ if (isComplex()) {
248
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
249
+ auto val = v.z;
250
+ return (val.real() == num) && (val.imag() == T());
251
+ } else if (isFloatingPoint()) {
252
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
253
+ return v.d == num;
254
+ } else if (tag == Tag::HAS_i) {
255
+ if (overflows<T>(v.i, /* strict_unsigned */ true)) {
256
+ return false;
257
+ } else {
258
+ return static_cast<T>(v.i) == num;
259
+ }
260
+ } else if (tag == Tag::HAS_u) {
261
+ if (overflows<T>(v.u, /* strict_unsigned */ true)) {
262
+ return false;
263
+ } else {
264
+ return static_cast<T>(v.u) == num;
265
+ }
266
+ } else if (tag == Tag::HAS_si) {
267
+ TORCH_INTERNAL_ASSERT(false, "NYI SymInt equality");
268
+ } else if (isBoolean()) {
269
+ // boolean scalar does not equal to a non boolean value
270
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
271
+ return false;
272
+ } else {
273
+ TORCH_INTERNAL_ASSERT(false);
274
+ }
275
+ }
276
+
277
+ template <
278
+ typename T,
279
+ typename std::enable_if_t<c10::is_complex<T>::value, int> = 0>
280
+ bool equal(T num) const {
281
+ if (isComplex()) {
282
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
283
+ return v.z == num;
284
+ } else if (isFloatingPoint()) {
285
+ TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
286
+ return (v.d == num.real()) && (num.imag() == T());
287
+ } else if (tag == Tag::HAS_i) {
288
+ if (overflows<T>(v.i, /* strict_unsigned */ true)) {
289
+ return false;
290
+ } else {
291
+ return static_cast<T>(v.i) == num.real() && num.imag() == T();
292
+ }
293
+ } else if (tag == Tag::HAS_u) {
294
+ if (overflows<T>(v.u, /* strict_unsigned */ true)) {
295
+ return false;
296
+ } else {
297
+ return static_cast<T>(v.u) == num.real() && num.imag() == T();
298
+ }
299
+ } else if (tag == Tag::HAS_si) {
300
+ TORCH_INTERNAL_ASSERT(false, "NYI SymInt equality");
301
+ } else if (isBoolean()) {
302
+ // boolean scalar does not equal to a non boolean value
303
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
304
+ return false;
305
+ } else {
306
+ TORCH_INTERNAL_ASSERT(false);
307
+ }
308
+ }
309
+
310
+ bool equal(bool num) const {
311
+ if (isBoolean()) {
312
+ TORCH_INTERNAL_ASSERT(!isSymbolic());
313
+ return static_cast<bool>(v.i) == num;
314
+ } else {
315
+ return false;
316
+ }
317
+ }
318
+
319
+ ScalarType type() const {
320
+ if (isComplex()) {
321
+ return ScalarType::ComplexDouble;
322
+ } else if (isFloatingPoint()) {
323
+ return ScalarType::Double;
324
+ } else if (isIntegral(/*includeBool=*/false)) {
325
+ // Represent all integers as long, UNLESS it is unsigned and therefore
326
+ // unrepresentable as long
327
+ if (Tag::HAS_u == tag) {
328
+ return ScalarType::UInt64;
329
+ }
330
+ return ScalarType::Long;
331
+ } else if (isBoolean()) {
332
+ return ScalarType::Bool;
333
+ } else {
334
+ throw std::runtime_error("Unknown scalar type.");
335
+ }
336
+ }
337
+
338
+ Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) {
339
+ moveFrom(std::move(rhs));
340
+ }
341
+
342
+ Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) {
343
+ if (isSymbolic()) {
344
+ c10::raw::intrusive_ptr::incref(v.p);
345
+ }
346
+ }
347
+
348
+ Scalar(c10::SymInt si) {
349
+ if (auto m = si.maybe_as_int()) {
350
+ tag = Tag::HAS_i;
351
+ v.i = *m;
352
+ } else {
353
+ tag = Tag::HAS_si;
354
+ v.p = std::move(si).release();
355
+ }
356
+ }
357
+
358
+ Scalar(c10::SymFloat sd) {
359
+ if (sd.is_symbolic()) {
360
+ tag = Tag::HAS_sd;
361
+ v.p = std::move(sd).release();
362
+ } else {
363
+ tag = Tag::HAS_d;
364
+ v.d = sd.as_float_unchecked();
365
+ }
366
+ }
367
+
368
+ Scalar(c10::SymBool sb) {
369
+ if (auto m = sb.maybe_as_bool()) {
370
+ tag = Tag::HAS_b;
371
+ v.i = *m;
372
+ } else {
373
+ tag = Tag::HAS_sb;
374
+ v.p = std::move(sb).release();
375
+ }
376
+ }
377
+
378
+ // We can't set v in the initializer list using the
379
+ // syntax v{ .member = ... } because it doesn't work on MSVC
380
+ private:
381
+ enum class Tag { HAS_d, HAS_i, HAS_u, HAS_z, HAS_b, HAS_sd, HAS_si, HAS_sb };
382
+
383
+ // Note [Meaning of HAS_u]
384
+ // ~~~~~~~~~~~~~~~~~~~~~~~
385
+ // HAS_u is a bit special. On its face, it just means that we
386
+ // are holding an unsigned integer. However, we generally don't
387
+ // distinguish between different bit sizes in Scalar (e.g., we represent
388
+ // float as double), instead, it represents a mathematical notion
389
+ // of some quantity (integral versus floating point). So actually,
390
+ // HAS_u is used solely to represent unsigned integers that could
391
+ // not be represented as a signed integer. That means only uint64_t
392
+ // potentially can get this tag; smaller types like uint8_t fits into a
393
+ // regular int and so for BC reasons we keep as an int.
394
+
395
+ // NB: assumes that self has already been cleared
396
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
397
+ C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept {
398
+ v = rhs.v;
399
+ tag = rhs.tag;
400
+ if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd ||
401
+ rhs.tag == Tag::HAS_sb) {
402
+ // Move out of scalar
403
+ rhs.tag = Tag::HAS_i;
404
+ rhs.v.i = 0;
405
+ }
406
+ }
407
+
408
+ Tag tag;
409
+
410
+ union v_t {
411
+ double d{};
412
+ int64_t i;
413
+ // See Note [Meaning of HAS_u]
414
+ uint64_t u;
415
+ c10::complex<double> z;
416
+ c10::intrusive_ptr_target* p;
417
+ // NOLINTNEXTLINE(modernize-use-equals-default)
418
+ v_t() {} // default constructor
419
+ } v;
420
+
421
+ template <
422
+ typename T,
423
+ typename std::enable_if_t<
424
+ std::is_integral_v<T> && !std::is_same_v<T, bool>,
425
+ bool>* = nullptr>
426
+ Scalar(T vv, bool) : tag(Tag::HAS_i) {
427
+ v.i = convert<decltype(v.i), T>(vv);
428
+ }
429
+
430
+ template <
431
+ typename T,
432
+ typename std::enable_if_t<
433
+ !std::is_integral_v<T> && !c10::is_complex<T>::value,
434
+ bool>* = nullptr>
435
+ Scalar(T vv, bool) : tag(Tag::HAS_d) {
436
+ v.d = convert<decltype(v.d), T>(vv);
437
+ }
438
+
439
+ template <
440
+ typename T,
441
+ typename std::enable_if_t<c10::is_complex<T>::value, bool>* = nullptr>
442
+ Scalar(T vv, bool) : tag(Tag::HAS_z) {
443
+ v.z = convert<decltype(v.z), T>(vv);
444
+ }
445
+ };
446
+
447
+ using OptionalScalarRef = c10::OptionalRef<Scalar>;
448
+
449
+ // define the scalar.to<int64_t>() specializations
450
+ #define DEFINE_TO(T, name) \
451
+ template <> \
452
+ inline T Scalar::to<T>() const { \
453
+ return to##name(); \
454
+ }
455
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO)
456
+ DEFINE_TO(uint16_t, UInt16)
457
+ DEFINE_TO(uint32_t, UInt32)
458
+ DEFINE_TO(uint64_t, UInt64)
459
+ #undef DEFINE_TO
460
+
461
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Deprecated.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+ #include <c10/util/bits.h>
12
+ #include <c10/util/complex.h>
13
+ #include <c10/util/qint32.h>
14
+ #include <c10/util/qint8.h>
15
+ #include <c10/util/quint2x4.h>
16
+ #include <c10/util/quint4x2.h>
17
+ #include <c10/util/quint8.h>
18
+
19
+ #include <array>
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <ostream>
24
+ #include <type_traits>
25
+
26
+ namespace c10 {
27
+
28
+ // dummy struct for uint1 to uint7, actual functionality
29
+ // of these dtypes will be implemented in python with Tensor subclass
30
+ template <unsigned int N>
31
+ struct dummy_uint1_7_t {};
32
+
33
+ // For the macros below:
34
+ //
35
+ // For users: If you want to macro some code for all non-QInt scalar types
36
+ // (i.e. types with complete information, you probably want one of the
37
+ // AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND macros below, which are
38
+ // designed to behave similarly to the Dispatch macros with the same name.
39
+ //
40
+ // For adding a new dtype: In the beginning, we had an idea that there was a
41
+ // list of all scalar types, and you could use AT_FORALL_SCALAR_TYPES to
42
+ // iterate over them. But over the years we added weird types which couldn't
43
+ // be handled uniformly everywhere and so in the end we ended up with some
44
+ // mish-mosh of some helper macros, but mostly use sites making a call about
45
+ // what dtypes they can or can't support. So if you want to add a new dtype,
46
+ // the preferred resolution is to find a dtype similar to what you want,
47
+ // grep for it and edit all the sites you find this way. If you need to add
48
+ // a completely new kind of dtype, you're going to have to laboriously audit
49
+ // all of the sites everywhere to figure out how it should work. Consulting
50
+ // some old PRs where we added new dtypes (check history of this file) can
51
+ // help give you an idea where to start.
52
+
53
+ // NB: Order matters for this macro; it is relied upon in
54
+ // _promoteTypesLookup and the serialization format.
55
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \
56
+ _(uint8_t, Byte) /* 0 */ \
57
+ _(int8_t, Char) /* 1 */ \
58
+ _(int16_t, Short) /* 2 */ \
59
+ _(int, Int) /* 3 */ \
60
+ _(int64_t, Long) /* 4 */ \
61
+ _(at::Half, Half) /* 5 */ \
62
+ _(float, Float) /* 6 */ \
63
+ _(double, Double) /* 7 */ \
64
+ _(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
65
+ _(c10::complex<float>, ComplexFloat) /* 9 */ \
66
+ _(c10::complex<double>, ComplexDouble) /* 10 */ \
67
+ _(bool, Bool) /* 11 */ \
68
+ _(c10::qint8, QInt8) /* 12 */ \
69
+ _(c10::quint8, QUInt8) /* 13 */ \
70
+ _(c10::qint32, QInt32) /* 14 */ \
71
+ _(at::BFloat16, BFloat16) /* 15 */ \
72
+ _(c10::quint4x2, QUInt4x2) /* 16 */ \
73
+ _(c10::quint2x4, QUInt2x4) /* 17 */ \
74
+ _(c10::bits1x8, Bits1x8) /* 18 */ \
75
+ _(c10::bits2x4, Bits2x4) /* 19 */ \
76
+ _(c10::bits4x2, Bits4x2) /* 20 */ \
77
+ _(c10::bits8, Bits8) /* 21 */ \
78
+ _(c10::bits16, Bits16) /* 22 */ \
79
+ _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \
80
+ _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \
81
+ _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \
82
+ _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ \
83
+ _(uint16_t, UInt16) /* 27 */ \
84
+ _(uint32_t, UInt32) /* 28 */ \
85
+ _(uint64_t, UInt64) /* 29 */ \
86
+ _(c10::dummy_uint1_7_t<1>, UInt1) /* 30 */ \
87
+ _(c10::dummy_uint1_7_t<2>, UInt2) /* 31 */ \
88
+ _(c10::dummy_uint1_7_t<3>, UInt3) /* 32 */ \
89
+ _(c10::dummy_uint1_7_t<4>, UInt4) /* 33 */ \
90
+ _(c10::dummy_uint1_7_t<5>, UInt5) /* 34 */ \
91
+ _(c10::dummy_uint1_7_t<6>, UInt6) /* 35 */ \
92
+ _(c10::dummy_uint1_7_t<7>, UInt7) /* 36 */
93
+
94
+ // If you want to support ComplexHalf for real, add ComplexHalf
95
+ // into this macro (and change the name). But beware: convert()
96
+ // doesn't work for all the conversions you need...
97
+ //
98
+ // TODO: To add unsigned int types here, we must define accumulate type.
99
+ // But uint8 currently accumulates into int64, so we would have to make
100
+ // an inconsistent choice for the larger types. Difficult.
101
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \
102
+ _(uint8_t, Byte) \
103
+ _(int8_t, Char) \
104
+ _(int16_t, Short) \
105
+ _(int, Int) \
106
+ _(int64_t, Long) \
107
+ _(at::Half, Half) \
108
+ _(float, Float) \
109
+ _(double, Double) \
110
+ _(c10::complex<float>, ComplexFloat) \
111
+ _(c10::complex<double>, ComplexDouble) \
112
+ _(bool, Bool) \
113
+ _(at::BFloat16, BFloat16) \
114
+ _(at::Float8_e5m2, Float8_e5m2) \
115
+ _(at::Float8_e4m3fn, Float8_e4m3fn)
116
+
117
+ // This macro controls many of our C++ APIs, including constructors
118
+ // for Scalar as well as the data() and item() accessors on Tensor
119
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
120
+ _(uint8_t, Byte) \
121
+ _(int8_t, Char) \
122
+ _(int16_t, Short) \
123
+ _(int, Int) \
124
+ _(int64_t, Long) \
125
+ _(at::Half, Half) \
126
+ _(float, Float) \
127
+ _(double, Double) \
128
+ _(c10::complex<c10::Half>, ComplexHalf) \
129
+ _(c10::complex<float>, ComplexFloat) \
130
+ _(c10::complex<double>, ComplexDouble) \
131
+ _(bool, Bool) \
132
+ _(at::BFloat16, BFloat16) \
133
+ _(at::Float8_e5m2, Float8_e5m2) \
134
+ _(at::Float8_e4m3fn, Float8_e4m3fn) \
135
+ _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \
136
+ _(at::Float8_e4m3fnuz, Float8_e4m3fnuz)
137
+
138
+ enum class ScalarType : int8_t {
139
+ #define DEFINE_ST_ENUM_VAL_(_1, n) n,
140
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_)
141
+ #undef DEFINE_ENUM_ST_ENUM_VAL_
142
+ Undefined,
143
+ NumOptions
144
+ };
145
+
146
+ constexpr uint16_t NumScalarTypes =
147
+ static_cast<uint16_t>(ScalarType::NumOptions);
148
+
149
+ namespace impl {
150
+
151
+ // These are used to map ScalarTypes to C++ types.
152
+
153
+ template <c10::ScalarType N>
154
+ struct ScalarTypeToCPPType;
155
+
156
+ #define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \
157
+ template <> \
158
+ struct ScalarTypeToCPPType<c10::ScalarType::scalar_type> { \
159
+ using type = cpp_type; \
160
+ \
161
+ /* This is a workaround for the CUDA bug which prevents */ \
162
+ /* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
163
+ /* ambiguous reference which can't to be resolved. For some reason it */ \
164
+ /* can't pick between at::detail and at::cuda::detail. */ \
165
+ /* For repro example, please see: */ \
166
+ /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
167
+ /* TODO: remove once the bug is fixed. */ \
168
+ static type t; \
169
+ };
170
+
171
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType)
172
+
173
+ #undef SPECIALIZE_ScalarTypeToCPPType
174
+
175
+ template <c10::ScalarType N>
176
+ using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType<N>::type;
177
+
178
+ } // namespace impl
179
+
180
+ template <typename T>
181
+ struct CppTypeToScalarType;
182
+
183
+ #define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \
184
+ template <> \
185
+ struct CppTypeToScalarType<cpp_type> \
186
+ : std:: \
187
+ integral_constant<c10::ScalarType, c10::ScalarType::scalar_type> { \
188
+ };
189
+
190
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
191
+
192
+ #undef SPECIALIZE_CppTypeToScalarType
193
+
194
+ // NB: despite its generic sounding name, the macros that don't take _AND
195
+ // are mostly only used by tensorexpr
196
+ #define AT_FORALL_INT_TYPES(_) \
197
+ _(uint8_t, Byte) \
198
+ _(int8_t, Char) \
199
+ _(int16_t, Short) \
200
+ _(int, Int) \
201
+ _(int64_t, Long)
202
+
203
+ #define AT_FORALL_SCALAR_TYPES(_) \
204
+ _(uint8_t, Byte) \
205
+ _(int8_t, Char) \
206
+ _(int16_t, Short) \
207
+ _(int, Int) \
208
+ _(int64_t, Long) \
209
+ _(float, Float) \
210
+ _(double, Double)
211
+
212
+ // These macros are often controlling how many template instantiations we
213
+ // create for kernels. It is typically inappropriate to add new dtypes here,
214
+ // instead, new types should be added to use sites on a case-by-case basis.
215
+ // We generally are not accepting new dtypes due to binary size concerns.
216
+
217
+ #define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \
218
+ _(uint8_t, Byte) \
219
+ _(int8_t, Char) \
220
+ _(int16_t, Short) \
221
+ _(int, Int) \
222
+ _(int64_t, Long) \
223
+ _(float, Float) \
224
+ _(double, Double) \
225
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
226
+ ::c10::ScalarType::SCALARTYPE>::t), \
227
+ SCALARTYPE)
228
+
229
+ #define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \
230
+ _(uint8_t, Byte) \
231
+ _(int8_t, Char) \
232
+ _(int16_t, Short) \
233
+ _(int, Int) \
234
+ _(int64_t, Long) \
235
+ _(float, Float) \
236
+ _(double, Double) \
237
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
238
+ ::c10::ScalarType::SCALARTYPE1>::t), \
239
+ SCALARTYPE1) \
240
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
241
+ ::c10::ScalarType::SCALARTYPE2>::t), \
242
+ SCALARTYPE2)
243
+
244
+ #define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \
245
+ _(uint8_t, Byte) \
246
+ _(int8_t, Char) \
247
+ _(int16_t, Short) \
248
+ _(int, Int) \
249
+ _(int64_t, Long) \
250
+ _(float, Float) \
251
+ _(double, Double) \
252
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
253
+ ::c10::ScalarType::SCALARTYPE1>::t), \
254
+ SCALARTYPE1) \
255
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
256
+ ::c10::ScalarType::SCALARTYPE2>::t), \
257
+ SCALARTYPE2) \
258
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
259
+ ::c10::ScalarType::SCALARTYPE3>::t), \
260
+ SCALARTYPE3)
261
+
262
+ #define AT_FORALL_SCALAR_TYPES_AND4( \
263
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, _) \
264
+ _(uint8_t, Byte) \
265
+ _(int8_t, Char) \
266
+ _(int16_t, Short) \
267
+ _(int, Int) \
268
+ _(int64_t, Long) \
269
+ _(float, Float) \
270
+ _(double, Double) \
271
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
272
+ ::c10::ScalarType::SCALARTYPE1>::t), \
273
+ SCALARTYPE1) \
274
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
275
+ ::c10::ScalarType::SCALARTYPE2>::t), \
276
+ SCALARTYPE2) \
277
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
278
+ ::c10::ScalarType::SCALARTYPE3>::t), \
279
+ SCALARTYPE3) \
280
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
281
+ ::c10::ScalarType::SCALARTYPE4>::t), \
282
+ SCALARTYPE4)
283
+
284
+ #define AT_FORALL_SCALAR_TYPES_AND5( \
285
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, _) \
286
+ _(uint8_t, Byte) \
287
+ _(int8_t, Char) \
288
+ _(int16_t, Short) \
289
+ _(int, Int) \
290
+ _(int64_t, Long) \
291
+ _(float, Float) \
292
+ _(double, Double) \
293
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
294
+ ::c10::ScalarType::SCALARTYPE1>::t), \
295
+ SCALARTYPE1) \
296
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
297
+ ::c10::ScalarType::SCALARTYPE2>::t), \
298
+ SCALARTYPE2) \
299
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
300
+ ::c10::ScalarType::SCALARTYPE3>::t), \
301
+ SCALARTYPE3) \
302
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
303
+ ::c10::ScalarType::SCALARTYPE4>::t), \
304
+ SCALARTYPE4) \
305
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
306
+ ::c10::ScalarType::SCALARTYPE5>::t), \
307
+ SCALARTYPE5)
308
+
309
+ #define AT_FORALL_SCALAR_TYPES_AND6( \
310
+ SCALARTYPE1, \
311
+ SCALARTYPE2, \
312
+ SCALARTYPE3, \
313
+ SCALARTYPE4, \
314
+ SCALARTYPE5, \
315
+ SCALARTYPE6, \
316
+ _) \
317
+ _(uint8_t, Byte) \
318
+ _(int8_t, Char) \
319
+ _(int16_t, Short) \
320
+ _(int, Int) \
321
+ _(int64_t, Long) \
322
+ _(float, Float) \
323
+ _(double, Double) \
324
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
325
+ ::c10::ScalarType::SCALARTYPE1>::t), \
326
+ SCALARTYPE1) \
327
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
328
+ ::c10::ScalarType::SCALARTYPE2>::t), \
329
+ SCALARTYPE2) \
330
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
331
+ ::c10::ScalarType::SCALARTYPE3>::t), \
332
+ SCALARTYPE3) \
333
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
334
+ ::c10::ScalarType::SCALARTYPE4>::t), \
335
+ SCALARTYPE4) \
336
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
337
+ ::c10::ScalarType::SCALARTYPE5>::t), \
338
+ SCALARTYPE5) \
339
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
340
+ ::c10::ScalarType::SCALARTYPE6>::t), \
341
+ SCALARTYPE6)
342
+
343
+ #define AT_FORALL_SCALAR_TYPES_AND7( \
344
+ SCALARTYPE1, \
345
+ SCALARTYPE2, \
346
+ SCALARTYPE3, \
347
+ SCALARTYPE4, \
348
+ SCALARTYPE5, \
349
+ SCALARTYPE6, \
350
+ SCALARTYPE7, \
351
+ _) \
352
+ _(uint8_t, Byte) \
353
+ _(int8_t, Char) \
354
+ _(int16_t, Short) \
355
+ _(int, Int) \
356
+ _(int64_t, Long) \
357
+ _(float, Float) \
358
+ _(double, Double) \
359
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
360
+ ::c10::ScalarType::SCALARTYPE1>::t), \
361
+ SCALARTYPE1) \
362
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
363
+ ::c10::ScalarType::SCALARTYPE2>::t), \
364
+ SCALARTYPE2) \
365
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
366
+ ::c10::ScalarType::SCALARTYPE3>::t), \
367
+ SCALARTYPE3) \
368
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
369
+ ::c10::ScalarType::SCALARTYPE4>::t), \
370
+ SCALARTYPE4) \
371
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
372
+ ::c10::ScalarType::SCALARTYPE5>::t), \
373
+ SCALARTYPE5) \
374
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
375
+ ::c10::ScalarType::SCALARTYPE6>::t), \
376
+ SCALARTYPE6) \
377
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
378
+ ::c10::ScalarType::SCALARTYPE7>::t), \
379
+ SCALARTYPE7)
380
+
381
+ #define AT_FORALL_QINT_TYPES(_) \
382
+ _(c10::qint8, QInt8) \
383
+ _(c10::quint8, QUInt8) \
384
+ _(c10::qint32, QInt32) \
385
+ _(c10::quint4x2, QUInt4x2) \
386
+ _(c10::quint2x4, QUInt2x4)
387
+
388
+ #define AT_FORALL_COMPLEX_TYPES(_) \
389
+ _(c10::complex<float>, ComplexFloat) \
390
+ _(c10::complex<double>, ComplexDouble)
391
+
392
+ #define DEFINE_CONSTANT(_, name) \
393
+ constexpr ScalarType k##name = ScalarType::name;
394
+
395
+ // NOLINTNEXTLINE(clang-diagnostic-unused-const-variable)
396
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
397
+ #undef DEFINE_CONSTANT
398
+
399
+ static inline const char* toString(ScalarType t) {
400
+ #define DEFINE_CASE(_, name) \
401
+ case ScalarType::name: \
402
+ return #name;
403
+
404
+ switch (t) {
405
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE)
406
+ default:
407
+ return "UNKNOWN_SCALAR";
408
+ }
409
+ #undef DEFINE_CASE
410
+ }
411
+
412
+ static inline size_t elementSize(ScalarType t) {
413
+ #define CASE_ELEMENTSIZE_CASE(ctype, name) \
414
+ case ScalarType::name: \
415
+ return sizeof(ctype);
416
+
417
+ switch (t) {
418
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
419
+ default:
420
+ TORCH_CHECK(false, "Unknown ScalarType");
421
+ }
422
+ #undef CASE_ELEMENTSIZE_CASE
423
+ }
424
+
425
+ static inline bool isIntegralType(ScalarType t, bool includeBool) {
426
+ bool isIntegral =
427
+ (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int ||
428
+ t == ScalarType::Long || t == ScalarType::Short ||
429
+ t == ScalarType::UInt16 || t == ScalarType::UInt32 ||
430
+ t == ScalarType::UInt64);
431
+
432
+ return isIntegral || (includeBool && t == ScalarType::Bool);
433
+ }
434
+
435
+ C10_DEPRECATED_MESSAGE(
436
+ "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
437
+ static inline bool isIntegralType(ScalarType t) {
438
+ return isIntegralType(t, /*includeBool=*/false);
439
+ }
440
+
441
+ static inline bool isFloat8Type(ScalarType t) {
442
+ return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz ||
443
+ t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz;
444
+ }
445
+
446
+ static inline bool isReducedFloatingType(ScalarType t) {
447
+ return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t);
448
+ }
449
+
450
+ static inline bool isFloatingType(ScalarType t) {
451
+ return t == ScalarType::Double || t == ScalarType::Float ||
452
+ isReducedFloatingType(t);
453
+ }
454
+
455
+ static inline bool isComplexType(ScalarType t) {
456
+ return (
457
+ t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat ||
458
+ t == ScalarType::ComplexDouble);
459
+ }
460
+
461
+ static inline bool isQIntType(ScalarType t) {
462
+ // Don't forget to extend this when adding new QInt types
463
+ return t == ScalarType::QInt8 || t == ScalarType::QUInt8 ||
464
+ t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 ||
465
+ t == ScalarType::QUInt2x4;
466
+ }
467
+
468
+ static inline bool isBitsType(ScalarType t) {
469
+ return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 ||
470
+ t == ScalarType::Bits4x2 || t == ScalarType::Bits8 ||
471
+ t == ScalarType::Bits16;
472
+ }
473
+
474
+ static inline bool isBarebonesUnsignedType(ScalarType t) {
475
+ return t == ScalarType::UInt1 || t == ScalarType::UInt2 ||
476
+ t == ScalarType::UInt3 || t == ScalarType::UInt4 ||
477
+ t == ScalarType::UInt5 || t == ScalarType::UInt6 ||
478
+ t == ScalarType::UInt7 || t == ScalarType::UInt16 ||
479
+ t == ScalarType::UInt32 || t == ScalarType::UInt64;
480
+ }
481
+
482
+ static inline ScalarType toQIntType(ScalarType t) {
483
+ switch (t) {
484
+ case ScalarType::Byte:
485
+ return ScalarType::QUInt8;
486
+ case ScalarType::Char:
487
+ return ScalarType::QInt8;
488
+ case ScalarType::Int:
489
+ return ScalarType::QInt32;
490
+ default:
491
+ return t;
492
+ }
493
+ }
494
+
495
+ static inline ScalarType toUnderlying(ScalarType t) {
496
+ switch (t) {
497
+ case ScalarType::QUInt8:
498
+ case ScalarType::QUInt4x2:
499
+ [[fallthrough]];
500
+ case ScalarType::QUInt2x4:
501
+ return ScalarType::Byte;
502
+ case ScalarType::QInt8:
503
+ return ScalarType::Char;
504
+ case ScalarType::QInt32:
505
+ return ScalarType::Int;
506
+ default:
507
+ return t;
508
+ }
509
+ }
510
+
511
+ static inline bool isSignedType(ScalarType t) {
512
+ TORCH_CHECK(!isQIntType(t), "isSignedType not supported for quantized types");
513
+ #define CASE_SIGNED(ctype, name) \
514
+ case ScalarType::name: \
515
+ return std::numeric_limits<ctype>::is_signed;
516
+
517
+ switch (t) {
518
+ case ScalarType::Bits1x8:
519
+ case ScalarType::Bits2x4:
520
+ case ScalarType::Bits4x2:
521
+ case ScalarType::Bits8:
522
+ case ScalarType::Bits16:
523
+ TORCH_CHECK(false, "Bits types are undefined");
524
+ case ScalarType::ComplexHalf:
525
+ case ScalarType::ComplexFloat:
526
+ case ScalarType::ComplexDouble:
527
+ return true;
528
+ AT_FORALL_SCALAR_TYPES_AND7(
529
+ Half,
530
+ Bool,
531
+ BFloat16,
532
+ Float8_e5m2,
533
+ Float8_e4m3fn,
534
+ Float8_e5m2fnuz,
535
+ Float8_e4m3fnuz,
536
+ CASE_SIGNED)
537
+ default:
538
+ TORCH_CHECK(false, "Unknown ScalarType");
539
+ }
540
+ #undef CASE_SIGNED
541
+ }
542
+
543
+ static inline bool isUnderlying(ScalarType type, ScalarType qtype) {
544
+ return type == toUnderlying(qtype);
545
+ }
546
+
547
+ static inline ScalarType toRealValueType(ScalarType t) {
548
+ switch (t) {
549
+ case ScalarType::ComplexHalf:
550
+ return ScalarType::Half;
551
+ case ScalarType::ComplexFloat:
552
+ return ScalarType::Float;
553
+ case ScalarType::ComplexDouble:
554
+ return ScalarType::Double;
555
+ default:
556
+ return t;
557
+ }
558
+ }
559
+
560
+ static inline ScalarType toComplexType(ScalarType t) {
561
+ switch (t) {
562
+ case ScalarType::BFloat16:
563
+ // BFloat16 has range equivalent to Float,
564
+ // so we map it to ComplexFloat.
565
+ return ScalarType::ComplexFloat;
566
+ case ScalarType::Half:
567
+ return ScalarType::ComplexHalf;
568
+ case ScalarType::Float:
569
+ return ScalarType::ComplexFloat;
570
+ case ScalarType::Double:
571
+ return ScalarType::ComplexDouble;
572
+ case ScalarType::ComplexHalf:
573
+ return ScalarType::ComplexHalf;
574
+ case ScalarType::ComplexFloat:
575
+ return ScalarType::ComplexFloat;
576
+ case ScalarType::ComplexDouble:
577
+ return ScalarType::ComplexDouble;
578
+ default:
579
+ TORCH_CHECK(false, "Unknown Complex ScalarType for ", t);
580
+ }
581
+ }
582
+
583
+ // see tensor_attributes.rst for detailed explanation and examples
584
+ // of casting rules.
585
+ static inline bool canCast(const ScalarType from, const ScalarType to) {
586
+ // We disallow complex -> non complex, e.g., float_tensor *= complex is
587
+ // disallowed.
588
+ if (isComplexType(from) && !isComplexType(to)) {
589
+ return false;
590
+ }
591
+ // We disallow float -> integral, e.g., int_tensor *= float is disallowed.
592
+ if (isFloatingType(from) && isIntegralType(to, false)) {
593
+ return false;
594
+ }
595
+
596
+ // Treat bool as a distinct "category," to be consistent with type promotion
597
+ // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same
598
+ // category as `bool_tensor`, we would not promote. Differing categories
599
+ // implies `bool_tensor += 5` is disallowed.
600
+ //
601
+ // NB: numpy distinguishes "unsigned" as a category to get the desired
602
+ // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because:
603
+ // * We don't want the performance hit of checking the runtime sign of
604
+ // Scalars.
605
+ // * `uint8_tensor + 5 -> int64_tensor` would be undesirable.
606
+ if (from != ScalarType::Bool && to == ScalarType::Bool) {
607
+ return false;
608
+ }
609
+ return true;
610
+ }
611
+
612
+ C10_API ScalarType promoteTypes(ScalarType a, ScalarType b);
613
+
614
+ inline std::ostream& operator<<(
615
+ std::ostream& stream,
616
+ at::ScalarType scalar_type) {
617
+ return stream << toString(scalar_type);
618
+ }
619
+
620
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/typeid.h>
6
+
7
+ // these just expose TypeMeta/ScalarType bridge functions in c10
8
+ // TODO move to typeid.h (or codemod away) when TypeMeta et al
9
+ // are moved from caffe2 to c10 (see note at top of typeid.h)
10
+
11
+ namespace c10 {
12
+
13
+ /**
14
+ * convert ScalarType enum values to TypeMeta handles
15
+ */
16
+ static inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) {
17
+ return caffe2::TypeMeta::fromScalarType(scalar_type);
18
+ }
19
+
20
+ /**
21
+ * convert TypeMeta handles to ScalarType enum values
22
+ */
23
+ static inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
24
+ return dtype.toScalarType();
25
+ }
26
+
27
+ /**
28
+ * typeMetaToScalarType(), lifted to optional
29
+ */
30
+ static inline optional<at::ScalarType> optTypeMetaToScalarType(
31
+ optional<caffe2::TypeMeta> type_meta) {
32
+ if (!type_meta.has_value()) {
33
+ return c10::nullopt;
34
+ }
35
+ return type_meta->toScalarType();
36
+ }
37
+
38
+ /**
39
+ * convenience: equality across TypeMeta/ScalarType conversion
40
+ */
41
+ static inline bool operator==(ScalarType t, caffe2::TypeMeta m) {
42
+ return m.isScalarType(t);
43
+ }
44
+
45
+ static inline bool operator==(caffe2::TypeMeta m, ScalarType t) {
46
+ return t == m;
47
+ }
48
+
49
+ static inline bool operator!=(ScalarType t, caffe2::TypeMeta m) {
50
+ return !(t == m);
51
+ }
52
+
53
+ static inline bool operator!=(caffe2::TypeMeta m, ScalarType t) {
54
+ return !(t == m);
55
+ }
56
+
57
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Storage.h ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/StorageImpl.h>
7
+ #include <c10/core/SymInt.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/util/ExclusivelyOwned.h>
11
+ #include <c10/util/MaybeOwned.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ struct Storage;
20
+
21
+ C10_API bool isSharedStorageAlias(
22
+ const Storage& storage0,
23
+ const Storage& storage1);
24
+
25
+ struct C10_API Storage {
26
+ public:
27
+ struct use_byte_size_t {};
28
+ struct unsafe_borrow_t {
29
+ explicit unsafe_borrow_t() = default;
30
+ };
31
+
32
+ Storage() = default;
33
+ Storage(c10::intrusive_ptr<StorageImpl> ptr)
34
+ : storage_impl_(std::move(ptr)) {}
35
+
36
+ // Allocates memory buffer using given allocator and creates a storage with it
37
+ Storage(
38
+ use_byte_size_t /*use_byte_size*/,
39
+ const SymInt& size_bytes,
40
+ Allocator* allocator = nullptr,
41
+ bool resizable = false)
42
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
43
+ StorageImpl::use_byte_size_t(),
44
+ size_bytes,
45
+ allocator,
46
+ resizable)) {}
47
+
48
+ // Creates storage with pre-allocated memory buffer. Allocator is given for
49
+ // potential future reallocations, however it can be nullptr if the storage
50
+ // is non-resizable
51
+ Storage(
52
+ use_byte_size_t /*use_byte_size*/,
53
+ size_t size_bytes,
54
+ at::DataPtr data_ptr,
55
+ at::Allocator* allocator = nullptr,
56
+ bool resizable = false)
57
+ : storage_impl_(c10::make_intrusive<StorageImpl>(
58
+ StorageImpl::use_byte_size_t(),
59
+ size_bytes,
60
+ std::move(data_ptr),
61
+ allocator,
62
+ resizable)) {}
63
+
64
+ protected:
65
+ explicit Storage(unsafe_borrow_t, const Storage& rhs)
66
+ : storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
67
+ rhs.storage_impl_.get())) {}
68
+
69
+ friend MaybeOwnedTraits<Storage>;
70
+
71
+ public:
72
+ // Legacy constructor for partially initialized (dtype or memory) storages
73
+ // that can be temporarily created with Caffe2 APIs. See the note on top of
74
+ // TensorImpl.h for details.
75
+ static Storage create_legacy(at::Device device) {
76
+ auto allocator = GetAllocator(device.type());
77
+ return Storage(c10::make_intrusive<StorageImpl>(
78
+ StorageImpl::use_byte_size_t(),
79
+ 0,
80
+ allocator->allocate(0), // materialize a non-default Device.
81
+ allocator,
82
+ true));
83
+ }
84
+
85
+ // Mimic create_legacy, but without requiring a newly-created StorageImpl.
86
+ void reset_legacy() {
87
+ TORCH_CHECK(resizable() && allocator());
88
+ set_nbytes(0);
89
+ set_data_ptr_noswap(allocator()->allocate(0));
90
+ }
91
+
92
+ // TODO: remove later
93
+ void set_nbytes(size_t size_bytes) const {
94
+ storage_impl_->set_nbytes(size_bytes);
95
+ }
96
+
97
+ void set_nbytes(c10::SymInt size_bytes) const {
98
+ storage_impl_->set_nbytes(std::move(size_bytes));
99
+ }
100
+
101
+ bool resizable() const {
102
+ return storage_impl_->resizable();
103
+ }
104
+
105
+ size_t nbytes() const {
106
+ return storage_impl_->nbytes();
107
+ }
108
+
109
+ SymInt sym_nbytes() const {
110
+ return storage_impl_->sym_nbytes();
111
+ }
112
+ // get() use here is to get const-correctness
113
+
114
+ const void* data() const {
115
+ return storage_impl_->data();
116
+ }
117
+
118
+ void* mutable_data() const {
119
+ return storage_impl_->mutable_data();
120
+ }
121
+
122
+ at::DataPtr& mutable_data_ptr() const {
123
+ return storage_impl_->mutable_data_ptr();
124
+ }
125
+
126
+ const at::DataPtr& data_ptr() const {
127
+ return storage_impl_->data_ptr();
128
+ }
129
+
130
+ // Returns the previous data_ptr
131
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const {
132
+ return storage_impl_->set_data_ptr(std::move(data_ptr));
133
+ }
134
+
135
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) const {
136
+ return storage_impl_->set_data_ptr_noswap(std::move(data_ptr));
137
+ }
138
+
139
+ DeviceType device_type() const {
140
+ return storage_impl_->device_type();
141
+ }
142
+
143
+ at::Allocator* allocator() const {
144
+ return storage_impl_->allocator();
145
+ }
146
+
147
+ at::Device device() const {
148
+ return storage_impl_->device();
149
+ }
150
+
151
+ StorageImpl* unsafeReleaseStorageImpl() {
152
+ return storage_impl_.release();
153
+ }
154
+
155
+ StorageImpl* unsafeGetStorageImpl() const noexcept {
156
+ return storage_impl_.get();
157
+ }
158
+
159
+ c10::weak_intrusive_ptr<StorageImpl> getWeakStorageImpl() const {
160
+ return c10::weak_intrusive_ptr<StorageImpl>(storage_impl_);
161
+ }
162
+
163
+ operator bool() const {
164
+ return storage_impl_;
165
+ }
166
+
167
+ size_t use_count() const {
168
+ return storage_impl_.use_count();
169
+ }
170
+
171
+ inline bool unique() const {
172
+ return storage_impl_.unique();
173
+ }
174
+
175
+ bool is_alias_of(const Storage& other) const {
176
+ return (
177
+ storage_impl_ == other.storage_impl_ ||
178
+ isSharedStorageAlias(*this, other));
179
+ }
180
+
181
+ void UniqueStorageShareExternalPointer(
182
+ void* src,
183
+ size_t capacity,
184
+ DeleterFnPtr d = nullptr) {
185
+ if (!storage_impl_.unique()) {
186
+ TORCH_CHECK(
187
+ false,
188
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
189
+ }
190
+ storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
191
+ }
192
+
193
+ void UniqueStorageShareExternalPointer(
194
+ at::DataPtr&& data_ptr,
195
+ size_t capacity) {
196
+ if (!storage_impl_.unique()) {
197
+ TORCH_CHECK(
198
+ false,
199
+ "UniqueStorageShareExternalPointer can only be called when use_count == 1");
200
+ }
201
+ storage_impl_->UniqueStorageShareExternalPointer(
202
+ std::move(data_ptr), capacity);
203
+ }
204
+
205
+ protected:
206
+ c10::intrusive_ptr<StorageImpl> storage_impl_;
207
+ };
208
+
209
+ template <>
210
+ struct MaybeOwnedTraits<c10::Storage> {
211
+ using owned_type = c10::Storage;
212
+ using borrow_type = c10::Storage;
213
+
214
+ static borrow_type createBorrow(const owned_type& from) {
215
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
216
+ }
217
+
218
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
219
+ lhs.unsafeReleaseStorageImpl();
220
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
221
+ }
222
+
223
+ static void destroyBorrow(borrow_type& toDestroy) {
224
+ toDestroy.unsafeReleaseStorageImpl(); // "leak" it, but it was already +0.
225
+ }
226
+
227
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
228
+ return borrow;
229
+ }
230
+
231
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
232
+ return &borrow;
233
+ }
234
+
235
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
236
+ return true;
237
+ }
238
+ };
239
+
240
+ template <>
241
+ struct ExclusivelyOwnedTraits<c10::Storage> {
242
+ using repr_type = c10::Storage;
243
+ using pointer_type = c10::Storage*;
244
+ using const_pointer_type = const c10::Storage*;
245
+
246
+ static repr_type nullRepr() {
247
+ return c10::Storage();
248
+ }
249
+
250
+ template <class... Args>
251
+ static repr_type createInPlace(Args&&... args) {
252
+ return c10::Storage(std::forward<Args>(args)...);
253
+ }
254
+
255
+ static repr_type moveToRepr(c10::Storage&& x) {
256
+ return std::move(x);
257
+ }
258
+
259
+ static c10::Storage take(c10::Storage& x) {
260
+ return std::move(x);
261
+ }
262
+
263
+ static pointer_type getImpl(repr_type& x) {
264
+ return &x;
265
+ }
266
+
267
+ static const_pointer_type getImpl(const repr_type& x) {
268
+ return &x;
269
+ }
270
+ };
271
+
272
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/core/DeviceType.h>
6
+ #include <c10/core/SymInt.h>
7
+ #include <c10/core/impl/COW.h>
8
+ #include <c10/core/impl/COWDeleter.h>
9
+ #include <c10/core/impl/PyObjectSlot.h>
10
+ #include <c10/macros/Export.h>
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/UniqueVoidPtr.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <cstddef>
15
+ #include <utility>
16
+
17
+ namespace c10 {
18
+
19
+ // A storage represents the underlying backing data buffer for a
20
+ // tensor. This concept was inherited from the original Torch7
21
+ // codebase; we'd kind of like to get rid of the concept
22
+ // (see https://github.com/pytorch/pytorch/issues/14797) but
23
+ // it's hard work and no one has gotten around to doing it.
24
+ //
25
+ // NB: storage is supposed to uniquely own a data pointer; e.g.,
26
+ // two non-null data pointers alias if and only if they are from
27
+ // the same storage. Technically you can violate this invariant
28
+ // (e.g., you can create a non-owning StorageImpl with at::from_blob)
29
+ // but a lot of things won't work correctly, including:
30
+ //
31
+ // - An ordinary deleter on such a storage is wrong, because normal deleters
32
+ // assume unique ownership, but if you have two storages at the same data,
33
+ // that implies there is some sort of shared ownership. So your deleter would
34
+ // have to actually be internally doing some sort of refcount thing
35
+ // - Deepcopy in Python side relies on storage equality and not data pointer
36
+ // equality; so if there are two separate storages pointing to the same data,
37
+ // the data will actually get duplicated in that case (one data ptr before,
38
+ // two data ptrs after)
39
+ // - Version counts won't work correctly, because we do all VC tracking at the
40
+ // level of storages (unless you explicitly disconnect the VC with detach);
41
+ // mutation because data pointers are the same are totally untracked
42
+ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
43
+ public:
44
+ struct use_byte_size_t {};
45
+
46
+ StorageImpl(
47
+ use_byte_size_t /*use_byte_size*/,
48
+ SymInt size_bytes,
49
+ at::DataPtr data_ptr,
50
+ at::Allocator* allocator,
51
+ bool resizable)
52
+ : data_ptr_(std::move(data_ptr)),
53
+ size_bytes_(std::move(size_bytes)),
54
+ size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()),
55
+ resizable_(resizable),
56
+ received_cuda_(false),
57
+ allocator_(allocator) {
58
+ if (resizable) {
59
+ TORCH_INTERNAL_ASSERT(
60
+ allocator_, "For resizable storage, allocator must be provided");
61
+ }
62
+ }
63
+
64
+ StorageImpl(
65
+ use_byte_size_t /*use_byte_size*/,
66
+ const SymInt& size_bytes,
67
+ at::Allocator* allocator,
68
+ bool resizable)
69
+ : StorageImpl(
70
+ use_byte_size_t(),
71
+ size_bytes,
72
+ size_bytes.is_heap_allocated()
73
+ ? allocator->allocate(0)
74
+ : allocator->allocate(size_bytes.as_int_unchecked()),
75
+ allocator,
76
+ resizable) {}
77
+
78
+ StorageImpl& operator=(StorageImpl&& other) = delete;
79
+ StorageImpl& operator=(const StorageImpl&) = delete;
80
+ StorageImpl() = delete;
81
+ StorageImpl(StorageImpl&& other) = delete;
82
+ StorageImpl(const StorageImpl&) = delete;
83
+ ~StorageImpl() override = default;
84
+
85
+ void reset() {
86
+ data_ptr_.clear();
87
+ size_bytes_ = 0;
88
+ size_bytes_is_heap_allocated_ = false;
89
+ }
90
+
91
+ // Destructor doesn't call release_resources because it's
92
+ // unnecessary; don't forget to change that if needed!
93
+ void release_resources() override {
94
+ data_ptr_.clear();
95
+ }
96
+
97
+ size_t nbytes() const {
98
+ // OK to do this instead of maybe_as_int as nbytes is guaranteed positive
99
+ TORCH_CHECK(!size_bytes_is_heap_allocated_);
100
+ return size_bytes_.as_int_unchecked();
101
+ }
102
+
103
+ SymInt sym_nbytes() const {
104
+ return size_bytes_;
105
+ }
106
+
107
+ // TODO: remove later
108
+ void set_nbytes(size_t size_bytes) {
109
+ size_bytes_ = static_cast<int64_t>(size_bytes);
110
+ size_bytes_is_heap_allocated_ = false;
111
+ }
112
+
113
+ void set_nbytes(c10::SymInt size_bytes) {
114
+ size_bytes_ = std::move(size_bytes);
115
+ }
116
+
117
+ bool resizable() const {
118
+ return resizable_;
119
+ }
120
+
121
+ at::DataPtr& mutable_data_ptr() {
122
+ maybe_materialize_cow();
123
+ return data_ptr_;
124
+ }
125
+
126
+ const at::DataPtr& data_ptr() const {
127
+ return data_ptr_;
128
+ }
129
+
130
+ // Returns the previous data_ptr
131
+ at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) {
132
+ // We need to materialize the old COW DataPtr because it is
133
+ // being returned as mutable.
134
+ maybe_materialize_cow();
135
+ return set_data_ptr_no_materialize_cow(std::move(data_ptr));
136
+ }
137
+
138
+ void set_data_ptr_noswap(at::DataPtr&& data_ptr) {
139
+ data_ptr_ = std::move(data_ptr);
140
+ }
141
+
142
+ const void* data() const {
143
+ return data_ptr_.get();
144
+ }
145
+
146
+ void* mutable_data() {
147
+ maybe_materialize_cow();
148
+ return data_ptr_.mutable_get();
149
+ }
150
+
151
+ at::DeviceType device_type() const {
152
+ return data_ptr_.device().type();
153
+ }
154
+
155
+ at::Allocator* allocator() {
156
+ return allocator_;
157
+ }
158
+
159
+ const at::Allocator* allocator() const {
160
+ return allocator_;
161
+ }
162
+
163
+ // You generally shouldn't use this method, but it is occasionally
164
+ // useful if you want to override how a tensor will be reallocated,
165
+ // after it was already allocated (and its initial allocator was
166
+ // set)
167
+ void set_allocator(at::Allocator* allocator) {
168
+ allocator_ = allocator;
169
+ }
170
+
171
+ Device device() const {
172
+ return data_ptr_.device();
173
+ }
174
+
175
+ void set_resizable(bool resizable) {
176
+ if (resizable) {
177
+ // We need an allocator to be resizable
178
+ AT_ASSERT(allocator_);
179
+ }
180
+ resizable_ = resizable;
181
+ }
182
+
183
+ /**
184
+ * Can only be called when use_count is 1
185
+ */
186
+ void UniqueStorageShareExternalPointer(
187
+ void* src,
188
+ size_t size_bytes,
189
+ DeleterFnPtr d = nullptr) {
190
+ UniqueStorageShareExternalPointer(
191
+ at::DataPtr(src, src, d, data_ptr_.device()), size_bytes);
192
+ }
193
+
194
+ /**
195
+ * Can only be called when use_count is 1
196
+ */
197
+ void UniqueStorageShareExternalPointer(
198
+ at::DataPtr&& data_ptr,
199
+ size_t size_bytes) {
200
+ data_ptr_ = std::move(data_ptr);
201
+ size_bytes_ = static_cast<int64_t>(size_bytes);
202
+ size_bytes_is_heap_allocated_ = false;
203
+ allocator_ = nullptr;
204
+ resizable_ = false;
205
+ }
206
+
207
+ // This method can be used only after storage construction and cannot be used
208
+ // to modify storage status
209
+ void set_received_cuda(bool received_cuda) {
210
+ received_cuda_ = received_cuda;
211
+ }
212
+
213
+ bool received_cuda() {
214
+ return received_cuda_;
215
+ }
216
+
217
+ impl::PyObjectSlot* pyobj_slot() {
218
+ return &pyobj_slot_;
219
+ }
220
+
221
+ const impl::PyObjectSlot* pyobj_slot() const {
222
+ return &pyobj_slot_;
223
+ }
224
+
225
+ protected:
226
+ // materialize_cow_storage needs to call set_data_ptr_no_materlize_cow
227
+ friend void c10::impl::cow::materialize_cow_storage(StorageImpl& storage);
228
+
229
+ // Returns the previous data_ptr. If the old data_ptr was COW,
230
+ // this avoids materializing it
231
+ at::DataPtr set_data_ptr_no_materialize_cow(at::DataPtr&& data_ptr) {
232
+ at::DataPtr old_data_ptr(std::move(data_ptr_));
233
+ data_ptr_ = std::move(data_ptr);
234
+ return old_data_ptr;
235
+ }
236
+
237
+ private:
238
+ // Triggers a copy if this is a copy-on-write tensor.
239
+ void maybe_materialize_cow() {
240
+ if (data_ptr_.get_deleter() == impl::cow::cow_deleter) {
241
+ impl::cow::materialize_cow_storage(*this);
242
+ }
243
+ }
244
+
245
+ DataPtr data_ptr_;
246
+ SymInt size_bytes_;
247
+ bool size_bytes_is_heap_allocated_;
248
+ bool resizable_;
249
+ // Identifies that Storage was received from another process and doesn't have
250
+ // local to process cuda memory allocation
251
+ bool received_cuda_;
252
+ Allocator* allocator_;
253
+ impl::PyObjectSlot pyobj_slot_;
254
+ };
255
+
256
+ // Declare StorageImpl create function pointer types.
257
+ using StorageImplCreateHelper = intrusive_ptr<StorageImpl> (*)(
258
+ StorageImpl::use_byte_size_t,
259
+ SymInt size_bytes,
260
+ DataPtr data_ptr,
261
+ Allocator* allocator,
262
+ bool resizable);
263
+
264
+ C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr);
265
+
266
+ C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t);
267
+
268
+ C10_API c10::intrusive_ptr<c10::StorageImpl> make_storage_impl(
269
+ c10::StorageImpl::use_byte_size_t use_byte_size,
270
+ c10::SymInt size_bytes,
271
+ c10::DataPtr data_ptr,
272
+ c10::Allocator* allocator,
273
+ bool resizable,
274
+ c10::optional<at::Device> device_opt);
275
+
276
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/Stream.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <functional>
10
+ #include <ostream>
11
+
12
+ namespace c10 {
13
+
14
+ /// An index representing a specific stream. A StreamId is not independently
15
+ /// meaningful without knowing the Device it is associated with; try to
16
+ /// use Stream rather than StreamId directly.
17
+ ///
18
+ /// StreamIds are opaque; they are assigned by some DeviceType-specific
19
+ /// numbering system which is not visible to the user. HOWEVER, we
20
+ /// guarantee that StreamId 0 is always a valid stream, and corresponds
21
+ /// to some sort of "default" stream.
22
+ using StreamId = int64_t;
23
+
24
+ struct C10_API StreamData3 {
25
+ StreamId stream_id;
26
+ DeviceIndex device_index;
27
+ DeviceType device_type;
28
+ };
29
+
30
+ // NB: I decided not to call the above StreamIndex to avoid confusion with
31
+ // DeviceIndex. This way, you access device index with index(), and stream id
32
+ // with id()
33
+
34
+ /**
35
+ * A stream is a software mechanism used to synchronize launched kernels
36
+ * without requiring explicit synchronizations between kernels. The basic
37
+ * model is that every kernel launch is associated with a stream: every
38
+ * kernel on the same stream is implicitly synchronized so that if I launch
39
+ * kernels A and B on the same stream, A is guaranteed to finish before B
40
+ * launches. If I want B to run concurrently with A, I must schedule
41
+ * it on a different stream.
42
+ *
43
+ * The Stream class is a backend agnostic value class representing a stream
44
+ * which I may schedule a kernel on. Every stream is associated with a device,
45
+ * which is recorded in stream, which is used to avoid confusion about which
46
+ * device a stream refers to.
47
+ *
48
+ * Streams are explicitly thread-safe, in the sense that it is OK to pass
49
+ * a Stream from one thread to another, and kernels queued from two different
50
+ * threads will still get serialized appropriately. (Of course, the
51
+ * time when the kernels get queued is undetermined unless you synchronize
52
+ * host side ;)
53
+ *
54
+ * Stream does NOT have a default constructor. Streams are for expert
55
+ * users; if you want to use Streams, we're going to assume you know
56
+ * how to deal with C++ template error messages if you try to
57
+ * resize() a vector of Streams.
58
+ *
59
+ * Known instances of streams in backends:
60
+ *
61
+ * - cudaStream_t (CUDA)
62
+ * - hipStream_t (HIP)
63
+ * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration
64
+ * does NOT support command queues.)
65
+ *
66
+ * Because this class is device agnostic, it cannot provide backend-specific
67
+ * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are
68
+ * wrapper classes which provide this functionality, e.g., CUDAStream.
69
+ */
70
+ class C10_API Stream final {
71
+ private:
72
+ Device device_;
73
+ StreamId id_;
74
+
75
+ public:
76
+ enum Unsafe { UNSAFE };
77
+ enum Default { DEFAULT };
78
+
79
+ /// Unsafely construct a stream from a Device and a StreamId. In
80
+ /// general, only specific implementations of streams for a
81
+ /// backend should manufacture Stream directly in this way; other users
82
+ /// should use the provided APIs to get a stream. In particular,
83
+ /// we don't require backends to give any guarantees about non-zero
84
+ /// StreamIds; they are welcome to allocate in whatever way they like.
85
+ explicit Stream(Unsafe, Device device, StreamId id)
86
+ : device_(device), id_(id) {}
87
+
88
+ /// Construct the default stream of a Device. The default stream is
89
+ /// NOT the same as the current stream; default stream is a fixed stream
90
+ /// that never changes, whereas the current stream may be changed by
91
+ /// StreamGuard.
92
+ explicit Stream(Default, Device device) : device_(device), id_(0) {}
93
+
94
+ bool operator==(const Stream& other) const noexcept {
95
+ return this->device_ == other.device_ && this->id_ == other.id_;
96
+ }
97
+ bool operator!=(const Stream& other) const noexcept {
98
+ return !(*this == other);
99
+ }
100
+
101
+ Device device() const noexcept {
102
+ return device_;
103
+ }
104
+ DeviceType device_type() const noexcept {
105
+ return device_.type();
106
+ }
107
+ DeviceIndex device_index() const noexcept {
108
+ return device_.index();
109
+ }
110
+ StreamId id() const noexcept {
111
+ return id_;
112
+ }
113
+
114
+ // Enqueues a wait instruction in the stream's work queue.
115
+ // This instruction is a no-op unless the event is marked
116
+ // for recording. In that case the stream stops processing
117
+ // until the event is recorded.
118
+ template <typename T>
119
+ void wait(const T& event) const {
120
+ event.block(*this);
121
+ }
122
+
123
+ // Return whether all asynchronous work previously enqueued on this stream
124
+ // has completed running on the device.
125
+ bool query() const;
126
+
127
+ // Wait (by blocking the calling thread) until all asynchronous work enqueued
128
+ // on this stream has completed running on the device.
129
+ void synchronize() const;
130
+
131
+ // The purpose of this function is to more conveniently permit binding
132
+ // of Stream to and from Python. Without packing, I have to setup a whole
133
+ // class with two fields (device and stream id); with packing I can just
134
+ // store a single uint64_t.
135
+ //
136
+ // The particular way we pack streams into a uint64_t is considered an
137
+ // implementation detail and should not be relied upon.
138
+ uint64_t hash() const noexcept {
139
+ // Concat these together into a 64-bit integer
140
+ uint64_t bits = static_cast<uint64_t>(device_type()) << 56 |
141
+ static_cast<uint64_t>(device_index()) << 48 |
142
+ // Remove the sign extension part of the 64-bit address because
143
+ // the id might be used to hold a pointer.
144
+ (static_cast<uint64_t>(id()) & ((1ull << 48) - 1));
145
+ return bits;
146
+ }
147
+
148
+ struct StreamData3 pack3() const {
149
+ return {id(), device_index(), device_type()};
150
+ }
151
+
152
+ static Stream unpack3(
153
+ StreamId stream_id,
154
+ DeviceIndex device_index,
155
+ DeviceType device_type) {
156
+ TORCH_CHECK(isValidDeviceType(device_type));
157
+ return Stream(UNSAFE, Device(device_type, device_index), stream_id);
158
+ }
159
+
160
+ // I decided NOT to provide setters on this class, because really,
161
+ // why would you change the device of a stream? Just construct
162
+ // it correctly from the beginning dude.
163
+ };
164
+
165
+ C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s);
166
+
167
+ } // namespace c10
168
+
169
+ namespace std {
170
+ template <>
171
+ struct hash<c10::Stream> {
172
+ size_t operator()(c10::Stream s) const noexcept {
173
+ return std::hash<uint64_t>{}(s.hash());
174
+ }
175
+ };
176
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/core/impl/InlineStreamGuard.h>
6
+ #include <c10/core/impl/VirtualGuardImpl.h>
7
+ #include <c10/util/ArrayRef.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace c10 {
11
+
12
+ /**
13
+ * A StreamGuard is an RAII class that changes the current device
14
+ * to the device corresponding to some stream, and changes the
15
+ * default stream on that device to be this stream.
16
+ *
17
+ * Use of StreamGuard is HIGHLY discouraged in operator definitions. In
18
+ * a single operator, you probably don't know enough about the global
19
+ * state of the world to profitably decide how to set streams. Let
20
+ * the caller handle this appropriately, and just use the current stream
21
+ * in your operator code.
22
+ *
23
+ * This StreamGuard does NOT have an uninitialized state; it is guaranteed
24
+ * to reset the stream and device on exit. If you are in a situation
25
+ * where you *might* want to setup a stream guard, see OptionalStreamGuard.
26
+ */
27
+ struct StreamGuard {
28
+ /// No default constructor, see Note [Omitted default constructor from RAII]
29
+ explicit StreamGuard() = delete;
30
+
31
+ /// Set the current device to the device associated with the passed stream,
32
+ /// and set the current stream on that device to the passed stream.
33
+ explicit StreamGuard(Stream stream) : guard_(stream) {}
34
+
35
+ /// Copy is disallowed
36
+ StreamGuard(const StreamGuard&) = delete;
37
+ StreamGuard& operator=(const StreamGuard&) = delete;
38
+
39
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
40
+ /// which is required for moves on types with nontrivial destructors.
41
+ StreamGuard(StreamGuard&& other) = delete;
42
+ StreamGuard& operator=(StreamGuard&& other) = delete;
43
+
44
+ /// Resets the currently set stream to the original stream and
45
+ /// the currently set device to the original device. Then,
46
+ /// set the current device to the device associated with the passed stream,
47
+ /// and set the current stream on that device to the passed stream.
48
+ ///
49
+ /// NOTE: this implementation may skip some stream/device setting if
50
+ /// it can prove that it is unnecessary.
51
+ ///
52
+ /// WARNING: reset_stream does NOT preserve previously set streams on
53
+ /// different devices. If you need to set streams on multiple devices
54
+ /// on , use MultiStreamGuard instead.
55
+ void reset_stream(Stream stream) {
56
+ guard_.reset_stream(stream);
57
+ }
58
+
59
+ /// Returns the stream that was set at the time the guard was constructed.
60
+ Stream original_stream() const {
61
+ return guard_.original_stream();
62
+ }
63
+
64
+ /// Returns the most recent stream that was set using this device guard,
65
+ /// either from construction, or via set_stream.
66
+ Stream current_stream() const {
67
+ return guard_.current_stream();
68
+ }
69
+
70
+ /// Returns the most recent device that was set using this device guard,
71
+ /// either from construction, or via set_device/reset_device/set_index.
72
+ Device current_device() const {
73
+ return guard_.current_device();
74
+ }
75
+
76
+ /// Returns the device that was set at the most recent reset_stream(),
77
+ /// or otherwise the device at construction time.
78
+ Device original_device() const {
79
+ return guard_.original_device();
80
+ }
81
+
82
+ private:
83
+ c10::impl::InlineStreamGuard<impl::VirtualGuardImpl> guard_;
84
+ };
85
+
86
+ /**
87
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
88
+ * initialization, and resets the device to its original value on destruction.
89
+ * See OptionalDeviceGuard for more guidance on how to use this class.
90
+ */
91
+ struct OptionalStreamGuard {
92
+ /// Create an uninitialized guard.
93
+ explicit OptionalStreamGuard() = default;
94
+
95
+ /// Set the current device to the device associated with the passed stream,
96
+ /// and set the current stream on that device to the passed stream.
97
+ explicit OptionalStreamGuard(Stream stream) : guard_(stream) {}
98
+
99
+ /// Set the current device to the device associated with the passed stream,
100
+ /// and set the current stream on that device to the passed stream,
101
+ /// if the passed stream is not nullopt.
102
+ explicit OptionalStreamGuard(optional<Stream> stream_opt)
103
+ : guard_(stream_opt) {}
104
+
105
+ /// Copy is disallowed
106
+ OptionalStreamGuard(const OptionalStreamGuard&) = delete;
107
+ OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete;
108
+
109
+ // See Note [Move construction for RAII guards is tricky]
110
+ OptionalStreamGuard(OptionalStreamGuard&& other) = delete;
111
+
112
+ // See Note [Move assignment for RAII guards is tricky]
113
+ OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete;
114
+
115
+ /// Resets the currently set stream to the original stream and
116
+ /// the currently set device to the original device. Then,
117
+ /// set the current device to the device associated with the passed stream,
118
+ /// and set the current stream on that device to the passed stream.
119
+ /// Initializes the guard if it was not previously initialized.
120
+ void reset_stream(Stream stream) {
121
+ guard_.reset_stream(stream);
122
+ }
123
+
124
+ /// Returns the stream that was set at the time the guard was most recently
125
+ /// initialized, or nullopt if the guard is uninitialized.
126
+ optional<Stream> original_stream() const {
127
+ return guard_.original_stream();
128
+ }
129
+
130
+ /// Returns the most recent stream that was set using this stream guard,
131
+ /// either from construction, or via reset_stream, if the guard is
132
+ /// initialized, or nullopt if the guard is uninitialized.
133
+ optional<Stream> current_stream() const {
134
+ return guard_.current_stream();
135
+ }
136
+
137
+ /// Restore the original device and stream, resetting this guard to
138
+ /// uninitialized state.
139
+ void reset() {
140
+ guard_.reset();
141
+ }
142
+
143
+ private:
144
+ c10::impl::InlineOptionalStreamGuard<impl::VirtualGuardImpl> guard_{};
145
+ };
146
+
147
+ /**
148
+ * A MultiStreamGuard is an RAII class that sets the current streams of a set of
149
+ * devices all at once, and resets them to their original values on destruction.
150
+ */
151
+ struct MultiStreamGuard {
152
+ /// Set the current streams to the passed streams on each of their respective
153
+ /// devices.
154
+ explicit MultiStreamGuard(ArrayRef<Stream> streams) : guard_(streams) {}
155
+
156
+ /// Copy is disallowed
157
+ MultiStreamGuard(const MultiStreamGuard&) = delete;
158
+ MultiStreamGuard& operator=(const MultiStreamGuard&) = delete;
159
+
160
+ // See Note [Move construction for RAII guards is tricky]
161
+ MultiStreamGuard(MultiStreamGuard&& other) = delete;
162
+
163
+ // See Note [Move assignment for RAII guards is tricky]
164
+ MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete;
165
+
166
+ private:
167
+ c10::impl::InlineMultiStreamGuard<impl::VirtualGuardImpl> guard_;
168
+ };
169
+
170
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <cstdint>
9
+ #include <ostream>
10
+ #include <utility>
11
+
12
+ namespace c10 {
13
+
14
+ class C10_API SymBool {
15
+ public:
16
+ /*implicit*/ SymBool(bool b) : data_(b){};
17
+ SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) {
18
+ TORCH_CHECK(ptr_->is_bool());
19
+ };
20
+ SymBool() : data_(false) {}
21
+
22
+ SymNodeImpl* toSymNodeImplUnowned() const {
23
+ return ptr_.get();
24
+ }
25
+
26
+ SymNodeImpl* release() && {
27
+ return std::move(ptr_).release();
28
+ }
29
+
30
+ // Only valid if is_heap_allocated()
31
+ SymNode toSymNodeImpl() const;
32
+
33
+ // Guaranteed to return a SymNode, wrapping using base if necessary
34
+ SymNode wrap_node(const SymNode& base) const;
35
+
36
+ bool expect_bool() const {
37
+ c10::optional<bool> c = maybe_as_bool();
38
+ TORCH_CHECK(c.has_value());
39
+ return *c;
40
+ }
41
+
42
+ SymBool sym_and(const SymBool&) const;
43
+ SymBool sym_or(const SymBool&) const;
44
+ SymBool sym_not() const;
45
+
46
+ SymBool operator&(const SymBool& other) const {
47
+ return sym_and(other);
48
+ }
49
+ SymBool operator|(const SymBool& other) const {
50
+ return sym_or(other);
51
+ }
52
+ SymBool operator~() const {
53
+ return sym_not();
54
+ }
55
+
56
+ // Insert a guard for the bool to be its concrete value, and then return
57
+ // that value. Note that C++ comparison operations default to returning
58
+ // bool, so it's not so common to have to call this
59
+ bool guard_bool(const char* file, int64_t line) const;
60
+ bool expect_true(const char* file, int64_t line) const;
61
+ bool guard_size_oblivious(const char* file, int64_t line) const;
62
+
63
+ bool has_hint() const;
64
+
65
+ bool as_bool_unchecked() const {
66
+ return data_;
67
+ }
68
+
69
+ c10::optional<bool> maybe_as_bool() const {
70
+ if (!is_heap_allocated()) {
71
+ return c10::make_optional(data_);
72
+ }
73
+ return toSymNodeImplUnowned()->constant_bool();
74
+ }
75
+
76
+ bool is_heap_allocated() const {
77
+ return ptr_;
78
+ }
79
+
80
+ private:
81
+ // TODO: optimize to union
82
+ bool data_;
83
+ SymNode ptr_;
84
+ };
85
+
86
+ C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
87
+
88
+ #define TORCH_SYM_CHECK(cond, ...) \
89
+ TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
90
+ #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \
91
+ TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
92
+
93
+ inline bool guard_size_oblivious(bool b, const char* file, int64_t line) {
94
+ return b;
95
+ }
96
+
97
+ inline bool guard_size_oblivious(
98
+ const c10::SymBool& b,
99
+ const char* file,
100
+ int64_t line) {
101
+ return b.guard_size_oblivious(file, line);
102
+ }
103
+
104
+ #define TORCH_GUARD_SIZE_OBLIVIOUS(cond) \
105
+ c10::guard_size_oblivious((cond), __FILE__, __LINE__)
106
+
107
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+
10
+ #include <cstdint>
11
+ #include <limits>
12
+ #include <ostream>
13
+ #include <utility>
14
+
15
+ namespace c10 {
16
+
17
+ // NB: this is actually double precision; we're using the Python naming here
18
+ class C10_API SymFloat {
19
+ public:
20
+ /*implicit*/ SymFloat(double d) : data_(d){};
21
+ SymFloat(SymNode ptr)
22
+ : data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
23
+ TORCH_CHECK(ptr_->is_float());
24
+ };
25
+ SymFloat() : data_(0.0) {}
26
+
27
+ SymNodeImpl* toSymNodeImplUnowned() const {
28
+ return ptr_.get();
29
+ }
30
+
31
+ SymNodeImpl* release() && {
32
+ return std::move(ptr_).release();
33
+ }
34
+
35
+ // Only valid if is_symbolic()
36
+ SymNode toSymNodeImpl() const;
37
+
38
+ // Guaranteed to return a SymNode, wrapping using base if necessary
39
+ SymNode wrap_node(const SymNode& base) const;
40
+
41
+ double expect_float() const {
42
+ TORCH_CHECK(!is_symbolic());
43
+ return data_;
44
+ }
45
+
46
+ SymFloat operator+(const SymFloat&) const;
47
+ SymFloat operator-(const SymFloat&) const;
48
+ SymFloat operator*(const SymFloat&) const;
49
+ SymFloat operator/(const SymFloat&) const;
50
+
51
+ SymBool sym_eq(const SymFloat&) const;
52
+ SymBool sym_ne(const SymFloat&) const;
53
+ SymBool sym_lt(const SymFloat&) const;
54
+ SymBool sym_le(const SymFloat&) const;
55
+ SymBool sym_gt(const SymFloat&) const;
56
+ SymBool sym_ge(const SymFloat&) const;
57
+
58
+ bool operator==(const SymFloat& o) const {
59
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
60
+ }
61
+ bool operator!=(const SymFloat& o) const {
62
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
63
+ }
64
+ bool operator<(const SymFloat& o) const {
65
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
66
+ }
67
+ bool operator<=(const SymFloat& o) const {
68
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
69
+ }
70
+ bool operator>(const SymFloat& o) const {
71
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
72
+ }
73
+ bool operator>=(const SymFloat& o) const {
74
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
75
+ }
76
+
77
+ SymFloat min(const SymFloat& sci) const;
78
+ SymFloat max(const SymFloat& sci) const;
79
+
80
+ // Need guidance on where to put this code
81
+ SymFloat sqrt() const;
82
+
83
+ // Insert a guard for the float to be its concrete value, and then return
84
+ // that value. This operation always works, even if the float is symbolic,
85
+ // so long as we know what the underlying value is. Don't blindly put this
86
+ // everywhere; you can cause overspecialization of PyTorch programs with
87
+ // this method.
88
+ //
89
+ // It should be called as guard_float(__FILE__, __LINE__). The file and line
90
+ // number can be used to diagnose overspecialization.
91
+ double guard_float(const char* file, int64_t line) const;
92
+
93
+ bool has_hint() const;
94
+
95
+ // N.B. It's important to keep this definition in the header
96
+ // as we expect if checks to be folded for mobile builds
97
+ // where `is_symbolic` is always false
98
+ C10_ALWAYS_INLINE bool is_symbolic() const {
99
+ return ptr_;
100
+ }
101
+
102
+ double as_float_unchecked() const {
103
+ return data_;
104
+ }
105
+
106
+ private:
107
+ // TODO: optimize to union
108
+ double data_;
109
+ SymNode ptr_;
110
+ };
111
+
112
+ C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
113
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymBool.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/macros/Macros.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ #include <cstdint>
11
+ #include <iterator>
12
+ #include <numeric>
13
+ #include <ostream>
14
+ #include <type_traits>
15
+
16
+ namespace c10 {
17
+
18
+ class SymFloat;
19
+
20
+ // SymInt represents either a regular int64_t, or a symbolic integer
21
+ // (represented in a type erased way as SymNode). The intention is for SymInt
22
+ // to represent symbolic sizes that arise when doing shape computation in
23
+ // operator kernels. This allows for tracing through programs without baking in
24
+ // concrete sizes into kernel calls.
25
+ //
26
+ // SymInt has an API equivalent to int64_t. In particular, it is a value type.
27
+ // Internally, SymInt is represented in a clever packed way, so that it only
28
+ // occupies one word of space; but morally, it is a union between an int64_t
29
+ // and an intrusive pointer to SymNodeImpl.
30
+ //
31
+ // Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
32
+ // is_int() returns true
33
+
34
+ class C10_API SymInt {
35
+ public:
36
+ enum Unchecked {
37
+ UNCHECKED,
38
+ };
39
+
40
+ /*implicit*/ SymInt(int64_t d) : data_(d) {
41
+ if (is_heap_allocated()) {
42
+ // Large negative number, heap allocate it
43
+ promote_to_negative();
44
+ }
45
+ };
46
+ SymInt() : data_(0) {}
47
+ SymInt(SymNode n);
48
+
49
+ // unchecked c-tor accepting raw `data_`
50
+ // One appropriate use for this is when you are constructing a symint
51
+ // in a situation where you know it is non-negative (or, if it is negative,
52
+ // the negative value is -1; i.e., not user controlled)
53
+ SymInt(Unchecked, int64_t d) : data_(d) {}
54
+
55
+ // TODO: these implementations are not optimal because they allocate a
56
+ // temporary and then use the move constructor/assignment
57
+ SymInt(const SymInt& s) : data_(0) {
58
+ if (s.is_heap_allocated()) {
59
+ *this = SymInt(s.toSymNode());
60
+ } else {
61
+ data_ = s.data_;
62
+ }
63
+ }
64
+ SymInt(SymInt&& s) noexcept : data_(s.data_) {
65
+ s.data_ = 0;
66
+ }
67
+
68
+ SymInt& operator=(const SymInt& s) {
69
+ if (this != &s) {
70
+ if (s.is_heap_allocated()) {
71
+ *this = SymInt(s.toSymNode());
72
+ } else {
73
+ data_ = s.data_;
74
+ }
75
+ }
76
+ return *this;
77
+ }
78
+ SymInt& operator=(SymInt&& s) noexcept {
79
+ if (this != &s) {
80
+ release_(); // release the current SymNode if any
81
+ data_ = s.data_;
82
+ if (s.is_heap_allocated())
83
+ s.data_ = 0;
84
+ };
85
+ return *this;
86
+ }
87
+
88
+ SymNodeImpl* toSymNodeImplUnowned() const {
89
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
90
+ uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
91
+ uint64_t sign_bit_mask = 1ULL << (62 - 1);
92
+ // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
93
+ uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
94
+ return static_cast<SymNodeImpl*>(
95
+ // NOLINTNEXTLINE(performance-no-int-to-ptr)
96
+ reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
97
+ }
98
+
99
+ void release_() {
100
+ if (is_heap_allocated()) {
101
+ SymNode::reclaim(toSymNodeImplUnowned()); // steal
102
+ }
103
+ }
104
+
105
+ SymNodeImpl* release() && {
106
+ #ifndef C10_MOBILE
107
+ TORCH_INTERNAL_ASSERT(is_heap_allocated());
108
+ auto* r = toSymNodeImplUnowned();
109
+ data_ = 0; // transfer ownership
110
+ return r;
111
+ #else
112
+ TORCH_INTERNAL_ASSERT(false);
113
+ #endif
114
+ }
115
+
116
+ // Only valid if is_heap_allocated()
117
+ SymNode toSymNode() const;
118
+
119
+ // Guaranteed to return a SymNode, wrapping using base if necessary
120
+ SymNode wrap_node(const SymNode& base) const;
121
+
122
+ ~SymInt() {
123
+ release_();
124
+ }
125
+
126
+ // Require the int to be non-symbolic, and if it is symbolic raise an
127
+ // error. This is safe to use for C++ code that doesn't work for symbolic
128
+ // shapes, and you don't have time to fix it immediately, as if we
129
+ // try to trigger the path in C++ you'll appropriately get an error
130
+ int64_t expect_int() const {
131
+ if (auto r = maybe_as_int()) {
132
+ return *r;
133
+ }
134
+ TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(
135
+ false, "when unpacking SymInt, expected int but got ", *this);
136
+ }
137
+
138
+ // Test if we have a hint for this int (e.g., guard_int would work).
139
+ // Most of the time this is true; it is only false when you have
140
+ // an unbacked SymInt.
141
+ bool has_hint() const;
142
+
143
+ // Insert a guard for the int to be its concrete value, and then return
144
+ // that value. This operation always works, even if the int is symbolic,
145
+ // so long as we know what the underlying value is (e.g., this won't work
146
+ // if you call it on the size of nonzero output). Don't blindly put this
147
+ // everywhere; you can cause overspecialization of PyTorch programs with
148
+ // this method.
149
+ //
150
+ // It should be called as guard_int(__FILE__, __LINE__). The file and line
151
+ // number can be used to diagnose overspecialization.
152
+ int64_t guard_int(const char* file, int64_t line) const;
153
+
154
+ // Insert a guard that this SymInt must be size-like, returning true if
155
+ // the integer actually is >= 0. Unlike manually performing a >= 0 test,
156
+ // if the SymInt in question is an unbacked SymInt (or, potentially in the
157
+ // future, if it contains unbacked SymInts), we will also treat the
158
+ // unbacked SymInt as statically testing >= 2 (which will prevent us from
159
+ // choking on, e.g., contiguity checks.)
160
+ bool expect_size(const char* file, int64_t line) const;
161
+
162
+ // Distinguish actual symbolic values from constants stored on the heap
163
+ bool is_symbolic() const {
164
+ return is_heap_allocated() &&
165
+ !toSymNodeImplUnowned()->constant_int().has_value();
166
+ }
167
+
168
+ // N.B. It's important to keep this definition in the header
169
+ // as we expect if checks to be folded for mobile builds
170
+ // where `is_heap_allocated` is always false and optimize dead code paths
171
+ C10_ALWAYS_INLINE bool is_heap_allocated() const {
172
+ #ifdef C10_MOBILE
173
+ return false;
174
+ #else
175
+ return !check_range(data_);
176
+ #endif
177
+ }
178
+
179
+ SymInt operator+(const SymInt& sci) const;
180
+ SymInt operator-(const SymInt& sci) const;
181
+ SymInt operator*(const SymInt& sci) const;
182
+ SymInt operator/(const SymInt& sci) const;
183
+ SymInt operator%(const SymInt& sci) const;
184
+ void operator*=(const SymInt& sci);
185
+ void operator+=(const SymInt& sci);
186
+ void operator/=(const SymInt& sci);
187
+
188
+ SymInt clone() const;
189
+
190
+ SymBool sym_eq(const SymInt&) const;
191
+ SymBool sym_ne(const SymInt&) const;
192
+ SymBool sym_lt(const SymInt&) const;
193
+ SymBool sym_le(const SymInt&) const;
194
+ SymBool sym_gt(const SymInt&) const;
195
+ SymBool sym_ge(const SymInt&) const;
196
+
197
+ bool operator==(const SymInt& o) const {
198
+ return sym_eq(o).guard_bool(__FILE__, __LINE__);
199
+ }
200
+ bool operator!=(const SymInt& o) const {
201
+ return sym_ne(o).guard_bool(__FILE__, __LINE__);
202
+ }
203
+ bool operator<(const SymInt& o) const {
204
+ return sym_lt(o).guard_bool(__FILE__, __LINE__);
205
+ }
206
+ bool operator<=(const SymInt& o) const {
207
+ return sym_le(o).guard_bool(__FILE__, __LINE__);
208
+ }
209
+ bool operator>(const SymInt& o) const {
210
+ return sym_gt(o).guard_bool(__FILE__, __LINE__);
211
+ }
212
+ bool operator>=(const SymInt& o) const {
213
+ return sym_ge(o).guard_bool(__FILE__, __LINE__);
214
+ }
215
+
216
+ SymInt min(const SymInt& sci) const;
217
+ SymInt max(const SymInt& sci) const;
218
+
219
+ // If both are symbolic, this checks if
220
+ // they share the same node.
221
+ // If both are not symbolic this just checks normal equality.
222
+ bool is_same(const SymInt& other) const;
223
+
224
+ operator SymFloat() const;
225
+
226
+ // Don't use this. Prefer maybe_as_int instead
227
+ int64_t as_int_unchecked() const {
228
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
229
+ return data_;
230
+ }
231
+
232
+ c10::optional<int64_t> maybe_as_int() const {
233
+ if (!is_heap_allocated()) {
234
+ return c10::make_optional(data_);
235
+ }
236
+ auto* node = toSymNodeImplUnowned();
237
+ if (auto c = node->constant_int()) {
238
+ return c;
239
+ }
240
+ return node->maybe_as_int();
241
+ }
242
+
243
+ // Return whether the integer is directly coercible to a SymInt
244
+ // without requiring heap allocation. You don't need to use this
245
+ // to check if you can pass an integer to SymInt; this is guaranteed
246
+ // to work (it just might heap allocate!)
247
+ static bool check_range(int64_t i) {
248
+ return i > MAX_UNREPRESENTABLE_INT;
249
+ }
250
+
251
+ // Return the min representable integer as a SymInt without
252
+ // heap allocation. For quantities that count bytes (or larger),
253
+ // this is still much larger than you need, so you may consider
254
+ // using this as a more efficient version of MIN_INT
255
+ static constexpr int64_t min_representable_int() {
256
+ return MAX_UNREPRESENTABLE_INT + 1;
257
+ }
258
+
259
+ private:
260
+ void promote_to_negative();
261
+
262
+ // Constraints on the internal representation:
263
+ //
264
+ // - Should represent positive and small negative ints
265
+ // - No conversion necessary for operations on ints
266
+ // - Must represent valid 64-bit pointers
267
+ // - Is symbolic test should be FAST (two arithmetic instructions is too
268
+ // much).
269
+ // This code being a hotpath is based on Strobelight profiles of
270
+ // is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
271
+ // (you will need to change the time window).
272
+ //
273
+ // So, the scheme is to reserve large negative numbers (assuming
274
+ // two's complement):
275
+ //
276
+ // - 0b0.... means we are a positive int
277
+ // - 0b11... means we are a small negative int
278
+ // - 0b10... means we are are a pointer. This means that
279
+ // [-2^63, -2^62-1] are not representable as ints.
280
+ // We don't actually need all of this space as on x86_64
281
+ // as the top 16bits aren't used for anything
282
+ static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
283
+ static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
284
+ // We must manually translate the bit pattern test into a greater
285
+ // than test because compiler doesn't figure it out:
286
+ // https://godbolt.org/z/356aferaW
287
+ static constexpr int64_t MAX_UNREPRESENTABLE_INT =
288
+ -1LL & static_cast<int64_t>(~(1ULL << 62));
289
+ int64_t data_;
290
+ };
291
+
292
+ /// Sum of a list of SymInt; accumulates into the c10::SymInt expression
293
+ template <
294
+ typename C,
295
+ typename std::enable_if_t<
296
+ std::is_same_v<typename C::value_type, c10::SymInt>,
297
+ int> = 0>
298
+ inline c10::SymInt multiply_integers(const C& container) {
299
+ return std::accumulate(
300
+ container.begin(),
301
+ container.end(),
302
+ c10::SymInt(1),
303
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
304
+ }
305
+
306
+ template <
307
+ typename Iter,
308
+ typename = std::enable_if_t<std::is_same_v<
309
+ typename std::iterator_traits<Iter>::value_type,
310
+ c10::SymInt>>>
311
+ inline c10::SymInt multiply_integers(Iter begin, Iter end) {
312
+ return std::accumulate(
313
+ begin,
314
+ end,
315
+ c10::SymInt(1),
316
+ [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
317
+ }
318
+
319
+ #define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
320
+ C10_API RetTy operator%(const SymInt& a, scalar_t b); \
321
+ C10_API RetTy operator%(scalar_t a, const SymInt& b);
322
+
323
+ #define DECLARE_SYMINT_OP(scalar_t, RetTy) \
324
+ C10_API RetTy operator+(const SymInt& a, scalar_t b); \
325
+ C10_API RetTy operator-(const SymInt& a, scalar_t b); \
326
+ C10_API RetTy operator*(const SymInt& a, scalar_t b); \
327
+ C10_API RetTy operator/(const SymInt& a, scalar_t b); \
328
+ C10_API RetTy operator+(scalar_t a, const SymInt& b); \
329
+ C10_API RetTy operator-(scalar_t a, const SymInt& b); \
330
+ C10_API RetTy operator*(scalar_t a, const SymInt& b); \
331
+ C10_API RetTy operator/(scalar_t a, const SymInt& b); \
332
+ C10_API bool operator==(const SymInt& a, scalar_t b); \
333
+ C10_API bool operator!=(const SymInt& a, scalar_t b); \
334
+ C10_API bool operator<(const SymInt& a, scalar_t b); \
335
+ C10_API bool operator<=(const SymInt& a, scalar_t b); \
336
+ C10_API bool operator>(const SymInt& a, scalar_t b); \
337
+ C10_API bool operator>=(const SymInt& a, scalar_t b); \
338
+ C10_API bool operator==(scalar_t a, const SymInt& b); \
339
+ C10_API bool operator!=(scalar_t a, const SymInt& b); \
340
+ C10_API bool operator<(scalar_t a, const SymInt& b); \
341
+ C10_API bool operator<=(scalar_t a, const SymInt& b); \
342
+ C10_API bool operator>(scalar_t a, const SymInt& b); \
343
+ C10_API bool operator>=(scalar_t a, const SymInt& b);
344
+
345
+ DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
346
+ DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
347
+ DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
348
+ DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
349
+ DECLARE_SYMINT_OP(int64_t, SymInt)
350
+ DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
351
+ DECLARE_SYMINT_OP(uint64_t, SymInt)
352
+ DECLARE_SYMINT_OP(uint32_t, SymInt)
353
+ DECLARE_SYMINT_OP(double, SymFloat)
354
+ DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
355
+
356
+ // On OSX size_t is different than uint64_t so we have to
357
+ // define it separately
358
+ #if defined(__APPLE__)
359
+ DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
360
+ DECLARE_SYMINT_OP(size_t, SymInt)
361
+ #endif
362
+
363
+ #undef DECLARE_SYMINT_OP
364
+
365
+ C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
366
+ C10_API SymInt operator-(const SymInt& s);
367
+
368
+ inline bool sym_eq(int64_t a, int64_t b) {
369
+ return a == b;
370
+ }
371
+
372
+ inline SymBool sym_eq(const SymInt& a, const SymInt& b) {
373
+ return a.sym_eq(b);
374
+ }
375
+
376
+ inline bool sym_ne(int64_t a, int64_t b) {
377
+ return a != b;
378
+ }
379
+
380
+ inline SymBool sym_ne(const SymInt& a, const SymInt& b) {
381
+ return a.sym_ne(b);
382
+ }
383
+
384
+ inline bool sym_lt(int64_t a, int64_t b) {
385
+ return a < b;
386
+ }
387
+
388
+ inline SymBool sym_lt(const SymInt& a, const SymInt& b) {
389
+ return a.sym_lt(b);
390
+ }
391
+
392
+ inline bool sym_le(int64_t a, int64_t b) {
393
+ return a <= b;
394
+ }
395
+
396
+ inline SymBool sym_le(const SymInt& a, const SymInt& b) {
397
+ return a.sym_le(b);
398
+ }
399
+
400
+ inline bool sym_gt(int64_t a, int64_t b) {
401
+ return a > b;
402
+ }
403
+
404
+ inline SymBool sym_gt(const SymInt& a, const SymInt& b) {
405
+ return a.sym_gt(b);
406
+ }
407
+
408
+ inline bool sym_ge(int64_t a, int64_t b) {
409
+ return a >= b;
410
+ }
411
+
412
+ inline SymBool sym_ge(const SymInt& a, const SymInt& b) {
413
+ return a.sym_ge(b);
414
+ }
415
+
416
+ inline bool definitely_true(
417
+ const c10::SymBool& b,
418
+ const char* file,
419
+ int64_t line) {
420
+ return b.has_hint() && b.guard_bool(file, line);
421
+ }
422
+
423
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <cstdint>
8
+
9
+ namespace c10 {
10
+ using SymIntArrayRef = ArrayRef<SymInt>;
11
+
12
+ inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) {
13
+ return IntArrayRef(reinterpret_cast<const int64_t*>(ar.data()), ar.size());
14
+ }
15
+
16
+ // TODO: a SymIntArrayRef containing a heap allocated large negative integer
17
+ // can actually technically be converted to an IntArrayRef... but not with
18
+ // the non-owning API we have here. We can't reinterpet cast; we have to
19
+ // allocate another buffer and write the integers into it. If you need it,
20
+ // we can do it. But I don't think you need it.
21
+
22
+ inline c10::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
23
+ c10::SymIntArrayRef ar) {
24
+ for (const c10::SymInt& sci : ar) {
25
+ if (sci.is_heap_allocated()) {
26
+ return c10::nullopt;
27
+ }
28
+ }
29
+
30
+ return {asIntArrayRefUnchecked(ar)};
31
+ }
32
+
33
+ inline at::IntArrayRef asIntArrayRefSlow(
34
+ c10::SymIntArrayRef ar,
35
+ const char* file,
36
+ int64_t line) {
37
+ for (const c10::SymInt& sci : ar) {
38
+ TORCH_CHECK(
39
+ !sci.is_heap_allocated(),
40
+ file,
41
+ ":",
42
+ line,
43
+ ": SymIntArrayRef expected to contain only concrete integers");
44
+ }
45
+ return asIntArrayRefUnchecked(ar);
46
+ }
47
+
48
+ #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__)
49
+
50
+ // Prefer using a more semantic constructor, like
51
+ // fromIntArrayRefKnownNonNegative
52
+ inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) {
53
+ return SymIntArrayRef(
54
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
55
+ }
56
+
57
+ inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) {
58
+ return fromIntArrayRefUnchecked(array_ref);
59
+ }
60
+
61
+ inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) {
62
+ for (long i : array_ref) {
63
+ TORCH_CHECK(
64
+ SymInt::check_range(i),
65
+ "IntArrayRef contains an int that cannot be represented as a SymInt: ",
66
+ i);
67
+ }
68
+ return SymIntArrayRef(
69
+ reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
70
+ }
71
+
72
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/core/SymBool.h>
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/DimVector.h>
7
+
8
+ #include <atomic>
9
+ #include <cstdint>
10
+ #include <mutex>
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ class C10_API SymbolicShapeMeta {
16
+ public:
17
+ // Basic metadata from which other quantities are derived
18
+ SymDimVector sizes_ = {0};
19
+ SymDimVector strides_ = {1};
20
+ SymInt storage_offset_ = 0;
21
+
22
+ bool strides_valid_ = true; // e.g. for sparse where there are no strides
23
+
24
+ SymbolicShapeMeta() = default;
25
+ SymbolicShapeMeta(const SymbolicShapeMeta& other);
26
+
27
+ void refresh_numel() {
28
+ // Non-const, don't need to hold mutables_ lock
29
+ available_.fetch_and(~numel_avail);
30
+ numel_ = 1;
31
+ }
32
+
33
+ void refresh_contiguous() {
34
+ // Non-const, don't need to hold mutables_ lock
35
+ available_.fetch_and(numel_avail);
36
+ is_contiguous_ = false;
37
+ is_channels_last_contiguous_ = false;
38
+ is_channels_last_3d_contiguous_ = false;
39
+ is_channels_last_ = false;
40
+ is_channels_last_3d_ = false;
41
+ is_non_overlapping_and_dense_ = false;
42
+ }
43
+
44
+ int64_t dim() const {
45
+ return static_cast<int64_t>(sizes_.size());
46
+ }
47
+
48
+ // Accessors for derived quantities, computed lazily on first access
49
+
50
+ bool has_numel() const {
51
+ return available_.load() & numel_avail;
52
+ }
53
+ bool has_is_contiguous() const {
54
+ return available_.load() & is_contiguous_avail;
55
+ }
56
+ bool has_is_channels_last_contiguous() const {
57
+ return available_.load() & is_channels_last_contiguous_avail;
58
+ }
59
+ bool has_is_channels_last_3d_contiguous() const {
60
+ return available_.load() & is_channels_last_3d_contiguous_avail;
61
+ }
62
+ bool has_is_channels_last() const {
63
+ return available_.load() & is_channels_last_avail;
64
+ }
65
+ bool has_is_channels_last_3d() const {
66
+ return available_.load() & is_channels_last_3d_avail;
67
+ }
68
+ bool has_is_non_overlapping_and_dense() const {
69
+ return available_.load() & is_non_overlapping_and_dense_avail;
70
+ }
71
+
72
+ // Accessors to cached derived properties
73
+ // DO NOT call with mutables_ lock held
74
+ const SymInt& numel() const {
75
+ if (C10_UNLIKELY(!has_numel())) {
76
+ init_numel();
77
+ }
78
+ return numel_;
79
+ }
80
+
81
+ const SymBool& is_contiguous() const {
82
+ if (C10_UNLIKELY(!has_is_contiguous())) {
83
+ init_is_contiguous();
84
+ }
85
+ return is_contiguous_;
86
+ }
87
+
88
+ const SymBool& is_channels_last_contiguous() const {
89
+ if (C10_UNLIKELY(!has_is_channels_last_contiguous())) {
90
+ init_is_channels_last_contiguous();
91
+ }
92
+ return is_channels_last_contiguous_;
93
+ }
94
+
95
+ const SymBool& is_channels_last_3d_contiguous() const {
96
+ if (C10_UNLIKELY(!has_is_channels_last_3d_contiguous())) {
97
+ init_is_channels_last_3d_contiguous();
98
+ }
99
+ return is_channels_last_3d_contiguous_;
100
+ }
101
+
102
+ const SymBool& is_channels_last() const {
103
+ if (C10_UNLIKELY(!has_is_channels_last())) {
104
+ init_is_channels_last();
105
+ }
106
+ return is_channels_last_;
107
+ }
108
+
109
+ const SymBool& is_channels_last_3d() const {
110
+ if (C10_UNLIKELY(!has_is_channels_last_3d())) {
111
+ init_is_channels_last_3d();
112
+ }
113
+ return is_channels_last_3d_;
114
+ }
115
+
116
+ const SymBool& is_non_overlapping_and_dense() const {
117
+ if (C10_UNLIKELY(!has_is_non_overlapping_and_dense())) {
118
+ init_is_non_overlapping_and_dense();
119
+ }
120
+ return is_non_overlapping_and_dense_;
121
+ }
122
+
123
+ // Assumptions so we can short-circuit computation
124
+ // NOTE: Don't need to lock mutables_ since these aren't const
125
+ void assume_contiguous(SymBool val = true) {
126
+ is_contiguous_ = std::move(val);
127
+ available_.fetch_or(is_contiguous_avail);
128
+ }
129
+ void assume_channels_last_contiguous(SymBool val = true) {
130
+ is_contiguous_ = std::move(val);
131
+ available_.fetch_or(is_channels_last_contiguous_avail);
132
+ }
133
+ void assume_channels_last_3d_contiguous(SymBool val = true) {
134
+ is_channels_last_3d_contiguous_ = std::move(val);
135
+ available_.fetch_or(is_channels_last_3d_contiguous_avail);
136
+ }
137
+ void assume_channels_last(SymBool val = true) {
138
+ is_channels_last_ = std::move(val);
139
+ available_.fetch_or(is_channels_last_avail);
140
+ }
141
+ void assume_channels_last_3d(SymBool val = true) {
142
+ is_channels_last_3d_ = std::move(val);
143
+ available_.fetch_or(is_channels_last_3d_avail);
144
+ }
145
+ void assume_non_overlapping_and_dense(SymBool val = true) {
146
+ is_non_overlapping_and_dense_ = std::move(val);
147
+ available_.fetch_or(is_non_overlapping_and_dense_avail);
148
+ }
149
+
150
+ private:
151
+ SymBool compute_contiguous() const;
152
+ SymBool compute_channels_last_contiguous_2d() const;
153
+ SymBool compute_channels_last_contiguous_3d() const;
154
+ SymBool compute_strides_like_channels_last_2d() const;
155
+ SymBool compute_strides_like_channels_last_3d() const;
156
+ SymBool compute_non_overlapping_and_dense() const;
157
+
158
+ // These are little wrappers over the real compute_ functions that
159
+ // can make use of other contiguity fields to short circuit.
160
+ // They need to be implemented separately for SymBool, as SymBool does
161
+ // not short circuit.
162
+ // TODO: should the SymBool cases avoid the short circuit? Need to reason
163
+ // if its correct, and reason if the simpler expressions are better for
164
+ // analysis (maybe not!)
165
+
166
+ SymBool compute_channels_last_contiguous_3d_dim5() const;
167
+ SymBool compute_channels_last_2d_dim5() const;
168
+ SymBool compute_channels_last_3d_dim5() const;
169
+ SymBool compute_is_non_overlapping_and_dense_dim4() const;
170
+ SymBool compute_is_non_overlapping_and_dense_dim5() const;
171
+ SymBool compute_is_non_overlapping_and_dense_anydim() const;
172
+
173
+ void init_numel() const;
174
+ void init_is_contiguous() const;
175
+ void init_is_channels_last_contiguous() const;
176
+ void init_is_channels_last_3d_contiguous() const;
177
+ void init_is_channels_last() const;
178
+ void init_is_channels_last_3d() const;
179
+ void init_is_non_overlapping_and_dense() const;
180
+
181
+ // NOTE: These only set if !has_foo()
182
+ void set_numel(SymInt val) const;
183
+ void set_is_contiguous(SymBool val) const;
184
+ void set_is_channels_last_contiguous(SymBool val) const;
185
+ void set_is_channels_last_3d_contiguous(SymBool val) const;
186
+ void set_is_channels_last(SymBool val) const;
187
+ void set_is_channels_last_3d(SymBool val) const;
188
+ void set_is_non_overlapping_and_dense(SymBool val) const;
189
+
190
+ // Lazily initialized variables, with the corresponding available_ flag
191
+ // indicating whether the value has been initialized
192
+ mutable std::atomic<int> available_{0};
193
+ enum avail {
194
+ numel_avail = 1 << 0,
195
+ is_contiguous_avail = 1 << 1,
196
+ is_channels_last_contiguous_avail = 1 << 2,
197
+ is_channels_last_3d_contiguous_avail = 1 << 3,
198
+ is_channels_last_avail = 1 << 4,
199
+ is_channels_last_3d_avail = 1 << 5,
200
+ is_non_overlapping_and_dense_avail = 1 << 6,
201
+ };
202
+
203
+ // Mutex to prevent races when initializing the variable from const accessors
204
+ mutable std::mutex mutables_;
205
+ mutable SymInt numel_ = 1;
206
+ mutable SymBool is_contiguous_{true};
207
+ mutable SymBool is_channels_last_contiguous_{false};
208
+ mutable SymBool is_channels_last_3d_contiguous_{false};
209
+ mutable SymBool is_channels_last_{false};
210
+ mutable SymBool is_channels_last_3d_{false};
211
+ mutable SymBool is_non_overlapping_and_dense_{true};
212
+ };
213
+
214
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h ADDED
@@ -0,0 +1,787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Backend.h>
4
+ #include <c10/core/DefaultDtype.h>
5
+ #include <c10/core/Device.h>
6
+ #include <c10/core/DeviceType.h>
7
+ #include <c10/core/DispatchKey.h>
8
+ #include <c10/core/Layout.h>
9
+ #include <c10/core/MemoryFormat.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+
13
+ #include <c10/macros/Export.h>
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+ #include <cstdint>
19
+ #include <iosfwd>
20
+ #include <string>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace c10 {
25
+
26
+ DispatchKey computeDispatchKey(
27
+ c10::optional<ScalarType> dtype,
28
+ c10::optional<Layout> layout,
29
+ c10::optional<Device> device);
30
+
31
+ inline ScalarType dtype_or_default(c10::optional<ScalarType> dtype) {
32
+ return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); });
33
+ }
34
+
35
+ inline caffe2::TypeMeta dtype_or_default(
36
+ c10::optional<caffe2::TypeMeta> dtype) {
37
+ return value_or_else(dtype, [] { return get_default_dtype(); });
38
+ }
39
+
40
+ inline Layout layout_or_default(c10::optional<Layout> layout) {
41
+ return layout.value_or(kStrided);
42
+ }
43
+
44
+ inline Device device_or_default(c10::optional<Device> device) {
45
+ return value_or_else(device, [] { return Device(kCPU); });
46
+ }
47
+
48
+ inline bool pinned_memory_or_default(c10::optional<bool> pinned_memory) {
49
+ return pinned_memory.value_or(false);
50
+ }
51
+
52
+ /// A class to encapsulate construction axes of an Tensor. TensorOptions was
53
+ /// designed to support the Python style API for specifying construction options
54
+ /// on factory functions, e.g.,
55
+ ///
56
+ /// torch.zeros(2, 3, dtype=torch.int32)
57
+ ///
58
+ /// Because C++ doesn't natively support keyword arguments, there must be
59
+ /// another way of specifying keyword-like arguments. TensorOptions is a
60
+ /// builder class which can be used to construct this "dictionary" of keyword
61
+ /// arguments: functions which support TensorOptions conventionally take this
62
+ /// argument optionally as their last argument.
63
+ ///
64
+ /// WARNING: In PyTorch, there are `torch::` variants of factory functions,
65
+ /// e.g., torch::zeros for at::zeros. These return Variables (while the
66
+ /// stock ATen functions return plain Tensors). If you mix these functions
67
+ /// up, you WILL BE SAD.
68
+ ///
69
+ /// Rather than use the constructor of this class directly, you should prefer to
70
+ /// use the constructor functions, and then chain setter methods on top of them.
71
+ ///
72
+ /// at::device(at::kCUDA).dtype(kInt)
73
+ /// at::dtype(at::kInt)
74
+ ///
75
+ /// Additionally, anywhere a TensorOptions is expected, you can directly
76
+ /// pass at::kCUDA / at::kInt, and it will implicitly convert to a
77
+ /// TensorOptions.
78
+ ///
79
+ /// Here are some recommended ways to create a 2x2 tensor of zeros
80
+ /// with certain properties. These all *implicitly* make use of
81
+ /// TensorOptions, even if they don't mention the class explicitly:
82
+ ///
83
+ /// at::zeros({2,2}, at::kCUDA);
84
+ /// at::zeros({2,2}, at::kLong);
85
+ /// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong()));
86
+ /// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1
87
+ /// at::zeros({2,2}, at::requires_grad());
88
+ ///
89
+
90
+ /// NOTE [ TensorOptions Constructors ]
91
+ ///
92
+ /// TensorOptions is like a dictionary with entries from the set:
93
+ /// {requires_grad, device, dtype, layout}, where each entry may be
94
+ /// unspecified (i.e., is optional). It is used to specify the properties of
95
+ /// tensors in many places both in C++ internal and API, e.g., tensor factory
96
+ /// methods like `at::empty({10}, options)`, tensor conversions like
97
+ /// `tensor.to(...)`, etc.
98
+ ///
99
+ /// To provide a simple API that is consistent with Python, where one can do
100
+ /// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a
101
+ /// `torch.layout`, we want TensorOptions to be implicitly convertible from
102
+ /// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have
103
+ /// three implicit constructors from each of these three types.
104
+ ///
105
+ /// This is sufficient for `ScalarType` and `Layout` as they are simple Enum
106
+ /// classes. However, `Device` is an ordinary class with implicit constructors
107
+ /// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be
108
+ /// consistent with Python API, where strings are treated as equivalent with a
109
+ /// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a
110
+ /// `torch.device("cuda:1")` is accepted). To support the syntax
111
+ /// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure
112
+ /// that `TensorOptions` is implicitly constructible with any arguments that a
113
+ /// `Device` can constructed from. So we have,
114
+ ///
115
+ /// /* implicit */ TensorOptions(T&& device) : TensorOptions() {
116
+ /// this->set_device(device);
117
+ /// }
118
+ ///
119
+ /// template <typename... Args,
120
+ /// typename = std::enable_if_t<std::is_constructible<Device,
121
+ /// Args&&...>::value>>
122
+ /// /* implicit */ TensorOptions(Args&&... args)
123
+ /// : TensorOptions(Device(std::forward<Args>(args)...)) {}
124
+ ///
125
+ ///
126
+ /// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`.
127
+ /// Compiler will complain about ambiguity between the copy constructor and the
128
+ /// `Device` constructor because `{kCUDA, 1}` can be converted to both a
129
+ /// `TensorOption` and a `Device`.
130
+ ///
131
+ /// To get around this, we templatize the `Device` constructor. Since overload
132
+ /// resolution is done before template resolution, our problem is solved.
133
+
134
+ DispatchKey computeDispatchKey(
135
+ optional<ScalarType> dtype,
136
+ optional<Layout> layout,
137
+ optional<Device> device);
138
+
139
+ struct C10_API TensorOptions {
140
+ TensorOptions()
141
+ : requires_grad_(false),
142
+ pinned_memory_(false),
143
+ has_device_(false),
144
+ has_dtype_(false),
145
+ has_layout_(false),
146
+ has_requires_grad_(false),
147
+ has_pinned_memory_(false),
148
+ has_memory_format_(false) {}
149
+
150
+ /// Constructs a `TensorOptions` object with the given layout.
151
+ /* implicit */ TensorOptions(Layout layout) : TensorOptions() {
152
+ this->set_layout(layout);
153
+ }
154
+
155
+ /// Constructs a `TensorOptions` object with the given device.
156
+ /// See NOTE [ TensorOptions Constructors ] on why this is templatized.
157
+ template <
158
+ typename T,
159
+ typename = std::enable_if_t<std::is_same_v<std::decay_t<T>, Device>>>
160
+ /* implicit */ TensorOptions(T&& device) : TensorOptions() {
161
+ this->set_device(std::forward<T>(device));
162
+ }
163
+
164
+ /// Constructs a `TensorOptions` object from arguments allowed in `Device`
165
+ /// constructors.
166
+ ///
167
+ /// See NOTE [ TensorOptions Constructors ].
168
+ ///
169
+ /// NB: Ideally we only allow implicit constructors here. But there is no easy
170
+ /// way to detect them. So we have this one that allows explicit
171
+ /// constructors too.
172
+ template <
173
+ typename... Args,
174
+ typename = std::enable_if_t<std::is_constructible_v<Device, Args&&...>>>
175
+ /* implicit */ TensorOptions(Args&&... args)
176
+ : TensorOptions(Device(std::forward<Args>(args)...)) {}
177
+
178
+ /// Constructs a `TensorOptions` object with the given dtype.
179
+ /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() {
180
+ this->set_dtype(dtype);
181
+ }
182
+
183
+ /// legacy constructor to support ScalarType
184
+ /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() {
185
+ this->set_dtype(dtype);
186
+ }
187
+
188
+ /// Constructs a `TensorOptions` object with the given memory format.
189
+ /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() {
190
+ set_memory_format(memory_format);
191
+ }
192
+
193
+ /// Return a copy of `TensorOptions` with `device` set to the given one, or
194
+ /// cleared if `device` is `nullopt`.
195
+ C10_NODISCARD TensorOptions
196
+ device(c10::optional<Device> device) const noexcept {
197
+ TensorOptions r = *this;
198
+ r.set_device(device);
199
+ return r;
200
+ }
201
+
202
+ /// Return a copy of `TensorOptions` with `device` set to the given one.
203
+ /// (This overload ensures that variadic template c10::optional constructor
204
+ /// for Device work correctly.)
205
+ template <typename... Args>
206
+ C10_NODISCARD TensorOptions device(Args&&... args) const noexcept {
207
+ return device(
208
+ c10::optional<Device>(std::in_place, std::forward<Args>(args)...));
209
+ }
210
+
211
+ /// Return a copy of `TensorOptions`, but with device set to CUDA, and the
212
+ /// device index set to the given one.
213
+ ///
214
+ /// TODO: This function encourages bad behavior (assuming CUDA is
215
+ /// the only device that matters). Get rid of it / rename it.
216
+ C10_NODISCARD TensorOptions
217
+ device_index(c10::DeviceIndex device_index) const noexcept {
218
+ return device(Device::Type::CUDA, device_index);
219
+ }
220
+
221
+ /// Return a copy of `TensorOptions` with `dtype` set to the given one.
222
+ C10_NODISCARD TensorOptions
223
+ dtype(c10::optional<caffe2::TypeMeta> dtype) const noexcept {
224
+ TensorOptions r = *this;
225
+ r.set_dtype(dtype);
226
+ return r;
227
+ }
228
+
229
+ // legacy function to support ScalarType
230
+ C10_NODISCARD TensorOptions
231
+ dtype(c10::optional<ScalarType> dtype) const noexcept {
232
+ TensorOptions r = *this;
233
+ r.set_dtype(dtype);
234
+ return r;
235
+ }
236
+
237
+ // Since dtype is taken...
238
+ template <typename T>
239
+ TensorOptions& dtype() {
240
+ dtype_ = caffe2::TypeMeta::Make<T>();
241
+ has_dtype_ = true;
242
+ return *this;
243
+ }
244
+
245
+ /// Sets the layout of the `TensorOptions`.
246
+ C10_NODISCARD TensorOptions
247
+ layout(c10::optional<Layout> layout) const noexcept {
248
+ TensorOptions r = *this;
249
+ r.set_layout(layout);
250
+ return r;
251
+ }
252
+
253
+ /// Sets the `requires_grad` property of the `TensorOptions`.
254
+ C10_NODISCARD TensorOptions
255
+ requires_grad(c10::optional<bool> requires_grad) const noexcept {
256
+ TensorOptions r = *this;
257
+ r.set_requires_grad(requires_grad);
258
+ return r;
259
+ }
260
+
261
+ /// Sets the `pinned_memory` property on the `TensorOptions`.
262
+ C10_NODISCARD TensorOptions
263
+ pinned_memory(c10::optional<bool> pinned_memory) const noexcept {
264
+ TensorOptions r = *this;
265
+ r.set_pinned_memory(pinned_memory);
266
+ return r;
267
+ }
268
+
269
+ /// Sets the `memory_format` property on `TensorOptions`.
270
+ C10_NODISCARD TensorOptions
271
+ memory_format(c10::optional<MemoryFormat> memory_format) const noexcept {
272
+ TensorOptions r = *this;
273
+ r.set_memory_format(memory_format);
274
+ return r;
275
+ }
276
+
277
+ /// Returns the device of the `TensorOptions`.
278
+ Device device() const noexcept {
279
+ return device_or_default(device_opt());
280
+ }
281
+
282
+ /// Returns whether the device is specified.
283
+ bool has_device() const noexcept {
284
+ return has_device_;
285
+ }
286
+
287
+ /// Returns the device of the `TensorOptions`, or `c10::nullopt` if
288
+ /// device is not specified.
289
+ c10::optional<Device> device_opt() const noexcept {
290
+ return has_device_ ? c10::make_optional(device_) : c10::nullopt;
291
+ }
292
+
293
+ /// Returns the device index of the `TensorOptions`.
294
+ c10::DeviceIndex device_index() const noexcept {
295
+ return device().index();
296
+ }
297
+
298
+ /// Returns the dtype of the `TensorOptions`.
299
+ caffe2::TypeMeta dtype() const noexcept {
300
+ return dtype_or_default(dtype_opt());
301
+ }
302
+
303
+ /// Returns whether the dtype is specified.
304
+ bool has_dtype() const noexcept {
305
+ return has_dtype_;
306
+ }
307
+
308
+ /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if
309
+ /// device is not specified.
310
+ c10::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
311
+ return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt;
312
+ }
313
+
314
+ /// Returns the layout of the `TensorOptions`.
315
+ Layout layout() const noexcept {
316
+ return layout_or_default(layout_opt());
317
+ }
318
+
319
+ /// Returns whether the layout is specified.
320
+ bool has_layout() const noexcept {
321
+ return has_layout_;
322
+ }
323
+
324
+ /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if
325
+ /// layout is not specified.
326
+ c10::optional<Layout> layout_opt() const noexcept {
327
+ return has_layout_ ? c10::make_optional(layout_) : c10::nullopt;
328
+ }
329
+
330
+ /// Returns the `requires_grad` property of the `TensorOptions`.
331
+ bool requires_grad() const noexcept {
332
+ return has_requires_grad_ ? requires_grad_ : false;
333
+ }
334
+
335
+ /// Returns whether the `requires_grad` is specified.
336
+ bool has_requires_grad() const noexcept {
337
+ return has_requires_grad_;
338
+ }
339
+
340
+ /// Returns the `requires_grad` property of the `TensorOptions`, or
341
+ /// `c10::nullopt` if `requires_grad` is not specified.
342
+ c10::optional<bool> requires_grad_opt() const noexcept {
343
+ return has_requires_grad_ ? c10::make_optional(requires_grad_)
344
+ : c10::nullopt;
345
+ }
346
+
347
+ /// Returns the `pinned_memory` property of the `TensorOptions`.
348
+ bool pinned_memory() const noexcept {
349
+ return pinned_memory_or_default(pinned_memory_opt());
350
+ }
351
+
352
+ /// Returns whether the `pinned_memory` is specified.
353
+ bool has_pinned_memory() const noexcept {
354
+ return has_pinned_memory_;
355
+ }
356
+
357
+ /// Returns if the layout is sparse
358
+ bool is_sparse() const {
359
+ return layout_ == c10::Layout::Sparse;
360
+ }
361
+
362
+ /// Returns if the layout is sparse CSR, deprecated, use
363
+ /// is_sparse_compressed() instead
364
+ bool is_sparse_csr() const {
365
+ return layout_ == c10::Layout::SparseCsr;
366
+ }
367
+
368
+ bool is_sparse_compressed() const {
369
+ return layout_ == c10::Layout::SparseCsr ||
370
+ layout_ == c10::Layout::SparseCsc ||
371
+ layout_ == c10::Layout::SparseBsr || layout_ == c10::Layout::SparseBsc;
372
+ }
373
+
374
+ // For compatibility with legacy tensor.type() comparisons
375
+ bool type_equal(const TensorOptions& other) const {
376
+ return computeDispatchKey() == other.computeDispatchKey() &&
377
+ typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype());
378
+ }
379
+
380
+ /// Returns the `pinned_memory` property of the `TensorOptions`, or
381
+ /// `c10::nullopt` if `pinned_memory` is not specified.
382
+ c10::optional<bool> pinned_memory_opt() const noexcept {
383
+ return has_pinned_memory_ ? c10::make_optional(pinned_memory_)
384
+ : c10::nullopt;
385
+ }
386
+
387
+ /// Returns whether the `memory_layout` is specified
388
+ bool has_memory_format() const noexcept {
389
+ return has_memory_format_;
390
+ }
391
+
392
+ // NB: memory_format() getter is PURPOSELY not defined, as the default
393
+ // behavior of memory_format varies from function to function.
394
+
395
+ /// Returns the `memory_layout` property of `TensorOptions, or
396
+ /// `c10::nullopt` if `memory_format` is not specified.
397
+ c10::optional<MemoryFormat> memory_format_opt() const noexcept {
398
+ return has_memory_format_ ? c10::make_optional(memory_format_)
399
+ : c10::nullopt;
400
+ }
401
+
402
+ // Resolves the ATen backend specified by the current construction axes.
403
+ // TODO: Deprecate this
404
+ Backend backend() const {
405
+ return at::dispatchKeyToBackend(computeDispatchKey());
406
+ }
407
+
408
+ /// Return the right-biased merge of two TensorOptions. This has the
409
+ /// effect of overwriting settings from self with specified options
410
+ /// of options.
411
+ ///
412
+ /// NB: This merging operation does NOT respect device merges.
413
+ /// For example, if you device({kCUDA, 1}).merge_in(kCUDA)
414
+ /// you will get kCUDA in the end! Functions like Tensor.new_empty
415
+ /// ensure the right device is selected anyway by way of a
416
+ /// device guard.
417
+ ///
418
+ TensorOptions merge_in(TensorOptions options) const noexcept {
419
+ TensorOptions merged = *this;
420
+ if (options.has_device())
421
+ merged.set_device(options.device_opt());
422
+ if (options.has_dtype())
423
+ merged.set_dtype(options.dtype_opt());
424
+ if (options.has_layout())
425
+ merged.set_layout(options.layout_opt());
426
+ // NB: requires grad is right biased; not a logical AND/OR!
427
+ if (options.has_requires_grad())
428
+ merged.set_requires_grad(options.requires_grad_opt());
429
+ if (options.has_pinned_memory())
430
+ merged.set_pinned_memory(options.pinned_memory_opt());
431
+ if (options.has_memory_format())
432
+ merged.set_memory_format(options.memory_format_opt());
433
+ return merged;
434
+ }
435
+
436
+ // TODO remove after TensorOptions rationalization
437
+ TensorOptions merge_memory_format(
438
+ c10::optional<MemoryFormat> optional_memory_format) const noexcept {
439
+ TensorOptions merged = *this;
440
+ if (optional_memory_format.has_value()) {
441
+ merged.set_memory_format(*optional_memory_format);
442
+ }
443
+ return merged;
444
+ }
445
+
446
+ // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for
447
+ // which dispatchKeyToBackend is injective, if it is defined at all (for
448
+ // the most part, this just means that this function never returns an
449
+ // Autograd key)
450
+ DispatchKey computeDispatchKey() const {
451
+ return c10::computeDispatchKey(
452
+ optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt());
453
+ }
454
+
455
+ private:
456
+ // These methods are currently private because I'm not sure if it's wise
457
+ // to actually publish them. They are methods because I need them in
458
+ // the constructor and the functional API implementation.
459
+ //
460
+ // If you really, really need it, you can make these public, but check if you
461
+ // couldn't just do what you need with the functional API. Similarly, these
462
+ // methods are not chainable, because if you wanted chaining, you probably
463
+ // want to use the functional API instead. (It's probably OK to make
464
+ // these chainable, because these functions are all explicitly annotated
465
+ // with a ref-qualifier, the trailing &, that makes them illegal to call
466
+ // on temporaries.)
467
+
468
+ /// Mutably set the device of `TensorOptions`.
469
+ void set_device(c10::optional<Device> device) & noexcept {
470
+ if (device) {
471
+ device_ = *device;
472
+ has_device_ = true;
473
+ } else {
474
+ has_device_ = false;
475
+ }
476
+ }
477
+
478
+ /// Mutably set the dtype of `TensorOptions`.
479
+ void set_dtype(c10::optional<caffe2::TypeMeta> dtype) & noexcept {
480
+ if (dtype) {
481
+ dtype_ = *dtype;
482
+ has_dtype_ = true;
483
+ } else {
484
+ has_dtype_ = false;
485
+ }
486
+ }
487
+
488
+ // legacy function to support ScalarType
489
+ void set_dtype(c10::optional<ScalarType> dtype) & noexcept {
490
+ if (dtype) {
491
+ dtype_ = scalarTypeToTypeMeta(*dtype);
492
+ has_dtype_ = true;
493
+ } else {
494
+ has_dtype_ = false;
495
+ }
496
+ }
497
+
498
+ /// Mutably set the layout of `TensorOptions`.
499
+ void set_layout(c10::optional<Layout> layout) & noexcept {
500
+ if (layout) {
501
+ layout_ = *layout;
502
+ has_layout_ = true;
503
+ } else {
504
+ has_layout_ = false;
505
+ }
506
+ }
507
+
508
+ /// Mutably set the `requires_grad` property of `TensorOptions`.
509
+ void set_requires_grad(c10::optional<bool> requires_grad) & noexcept {
510
+ if (requires_grad) {
511
+ requires_grad_ = *requires_grad;
512
+ has_requires_grad_ = true;
513
+ } else {
514
+ has_requires_grad_ = false;
515
+ }
516
+ }
517
+
518
+ /// Mutably set the `pinned_memory` property of `TensorOptions`.
519
+ void set_pinned_memory(c10::optional<bool> pinned_memory) & noexcept {
520
+ if (pinned_memory) {
521
+ pinned_memory_ = *pinned_memory;
522
+ has_pinned_memory_ = true;
523
+ } else {
524
+ has_pinned_memory_ = false;
525
+ }
526
+ }
527
+
528
+ /// Mutably set the `memory_Format` property of `TensorOptions`.
529
+ void set_memory_format(c10::optional<MemoryFormat> memory_format) & noexcept {
530
+ if (memory_format) {
531
+ memory_format_ = *memory_format;
532
+ has_memory_format_ = true;
533
+ } else {
534
+ has_memory_format_ = false;
535
+ }
536
+ }
537
+
538
+ // WARNING: If you edit TensorOptions to add more options, you
539
+ // may need to adjust the implementation of Tensor::options.
540
+ // The criteria for whether or not Tensor::options must be adjusted
541
+ // is whether or not the new option you added should preserved
542
+ // by functions such as empty_like(); if it should be preserved,
543
+ // you must adjust options().
544
+ //
545
+ // TODO: MemoryFormat is not implemented in this way
546
+
547
+ // NB: We didn't use c10::optional here, because then we can't pack
548
+ // the has_***_ boolean fields.
549
+
550
+ Device device_ = at::kCPU; // 16-bit
551
+ caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 16-bit
552
+ Layout layout_ = at::kStrided; // 8-bit
553
+ MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit
554
+
555
+ // Bitmask required here to get this to fit inside 32 bits (or even 64 bits,
556
+ // for that matter)
557
+
558
+ bool requires_grad_ : 1;
559
+ bool pinned_memory_ : 1;
560
+
561
+ bool has_device_ : 1;
562
+ bool has_dtype_ : 1;
563
+ bool has_layout_ : 1;
564
+ bool has_requires_grad_ : 1;
565
+ bool has_pinned_memory_ : 1;
566
+ bool has_memory_format_ : 1;
567
+ };
568
+
569
+ // We should aspire to fit in one machine-size word; but a size greater than two
570
+ // words is too much. (We are doing terribly on 32-bit archs, where we require
571
+ // three machine size words to store tensor options. Eek!)
572
+ static_assert(
573
+ sizeof(TensorOptions) <= sizeof(int64_t) * 2,
574
+ "TensorOptions must fit in 128-bits");
575
+
576
+ /// Convenience function that returns a `TensorOptions` object with the `dtype`
577
+ /// set to the given one.
578
+ inline TensorOptions dtype(caffe2::TypeMeta dtype) {
579
+ return TensorOptions().dtype(dtype);
580
+ }
581
+
582
+ // legacy function to support ScalarType
583
+ inline TensorOptions dtype(ScalarType dtype) {
584
+ return TensorOptions().dtype(scalarTypeToTypeMeta(dtype));
585
+ }
586
+
587
+ /// Convenience function that returns a `TensorOptions` object with the `layout`
588
+ /// set to the given one.
589
+ inline TensorOptions layout(Layout layout) {
590
+ return TensorOptions().layout(layout);
591
+ }
592
+
593
+ /// Convenience function that returns a `TensorOptions` object with the `device`
594
+ /// set to the given one.
595
+ inline TensorOptions device(Device device) {
596
+ return TensorOptions().device(device);
597
+ }
598
+
599
+ /// Convenience function that returns a `TensorOptions` object with the
600
+ /// `device` set to CUDA and the `device_index` set to the given one.
601
+ inline TensorOptions device_index(c10::DeviceIndex device_index) {
602
+ return TensorOptions().device_index(device_index);
603
+ }
604
+
605
+ /// Convenience function that returns a `TensorOptions` object with the
606
+ /// `requires_grad` set to the given one.
607
+ inline TensorOptions requires_grad(bool requires_grad = true) {
608
+ return TensorOptions().requires_grad(requires_grad);
609
+ }
610
+
611
+ /// Convenience function that returns a `TensorOptions` object with the
612
+ /// `memory_format` set to the given one.
613
+ inline TensorOptions memory_format(MemoryFormat memory_format) {
614
+ return TensorOptions().memory_format(memory_format);
615
+ }
616
+
617
+ C10_API std::ostream& operator<<(
618
+ std::ostream& stream,
619
+ const TensorOptions& options);
620
+
621
+ template <typename T>
622
+ inline TensorOptions dtype() {
623
+ return dtype(caffe2::TypeMeta::Make<T>());
624
+ }
625
+
626
+ inline std::string toString(const TensorOptions& options) {
627
+ std::ostringstream stream;
628
+ stream << options;
629
+ return stream.str();
630
+ }
631
+
632
+ // This is intended to be a centralized location by which we can determine
633
+ // what an appropriate DispatchKey for a tensor is.
634
+ inline DispatchKey computeDispatchKey(
635
+ c10::optional<ScalarType> dtype,
636
+ c10::optional<Layout> layout,
637
+ c10::optional<Device> device) {
638
+ const auto layout_ = layout_or_default(layout);
639
+ const auto device_ = device_or_default(device);
640
+ switch (layout_) {
641
+ case Layout::Jagged:
642
+ case Layout::Strided: {
643
+ const auto dtype_ = dtype_or_default(dtype);
644
+ switch (device_.type()) {
645
+ #define DO_CASE(device, _) \
646
+ case c10::DeviceType::device: { \
647
+ if (isQIntType(dtype_)) { \
648
+ return DispatchKey::Quantized##device; \
649
+ } \
650
+ return DispatchKey::device; \
651
+ }
652
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
653
+ #undef DO_CASE
654
+ case c10::DeviceType::FPGA:
655
+ return DispatchKey::FPGA;
656
+ case c10::DeviceType::ORT:
657
+ return DispatchKey::ORT;
658
+ case c10::DeviceType::Vulkan:
659
+ return DispatchKey::Vulkan;
660
+ case c10::DeviceType::Metal:
661
+ return DispatchKey::Metal;
662
+ case c10::DeviceType::MKLDNN:
663
+ case c10::DeviceType::OPENGL:
664
+ case c10::DeviceType::OPENCL:
665
+ case c10::DeviceType::IDEEP:
666
+ TORCH_INTERNAL_ASSERT(
667
+ 0,
668
+ "This is a grandfathered Caffe2 device type ",
669
+ device_.type(),
670
+ ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error.");
671
+ default:
672
+ TORCH_CHECK_NOT_IMPLEMENTED(
673
+ false,
674
+ "Unsupported device type for dense layout: ",
675
+ device_.type());
676
+ }
677
+ }
678
+ case Layout::Sparse:
679
+ switch (device_.type()) {
680
+ #define DO_CASE(device, _) \
681
+ case c10::DeviceType::device: { \
682
+ return DispatchKey::Sparse##device; \
683
+ }
684
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
685
+ #undef DO_CASE
686
+ default:
687
+ TORCH_CHECK_NOT_IMPLEMENTED(
688
+ false,
689
+ "Unsupported device type for sparse layout: ",
690
+ device_.type());
691
+ }
692
+ case Layout::Mkldnn:
693
+ switch (device_.type()) {
694
+ case c10::DeviceType::CPU:
695
+ return DispatchKey::MkldnnCPU;
696
+ default:
697
+ TORCH_CHECK_NOT_IMPLEMENTED(
698
+ false,
699
+ "Unsupported device type for mkldnn layout: ",
700
+ device_.type());
701
+ }
702
+ case Layout::SparseCsr:
703
+ case Layout::SparseCsc:
704
+ case Layout::SparseBsr:
705
+ case Layout::SparseBsc:
706
+ switch (device_.type()) {
707
+ #define DO_CASE(device, _) \
708
+ case c10::DeviceType::device: { \
709
+ return DispatchKey::SparseCsr##device; \
710
+ }
711
+ C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused)
712
+ #undef DO_CASE
713
+ default:
714
+ TORCH_CHECK_NOT_IMPLEMENTED(
715
+ false,
716
+ "Unsupported device type for ",
717
+ layout_,
718
+ " layout: ",
719
+ device_.type());
720
+ }
721
+ default:
722
+ TORCH_CHECK(false, "Unsupported layout: ", layout_);
723
+ }
724
+ }
725
+
726
+ inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) {
727
+ switch (dispatch_key) {
728
+ #define DO_CASE(bc, _) case DispatchKey::Sparse##bc:
729
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
730
+ #undef DO_CASE
731
+ return Layout::Sparse;
732
+ #define DO_CASE(bc, _) case DispatchKey::SparseCsr##bc:
733
+ C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused)
734
+ #undef DO_CASE
735
+ TORCH_CHECK(
736
+ false, "Cannot map DispatchKey ", dispatch_key, " to a unique layout.");
737
+ case DispatchKey::MkldnnCPU:
738
+ return Layout::Mkldnn;
739
+ default:
740
+ return Layout::Strided;
741
+ }
742
+ }
743
+
744
+ inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) {
745
+ switch (dispatch_key) {
746
+ // stuff that's real
747
+ #define DO_CASE(suffix, prefix) \
748
+ case DispatchKey::prefix##suffix: \
749
+ return c10::DeviceType::suffix;
750
+ #define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix)
751
+ C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES)
752
+ #undef DO_CASES
753
+ #undef DO_CASE
754
+
755
+ case DispatchKey::MkldnnCPU:
756
+ return c10::DeviceType::CPU;
757
+ case DispatchKey::Vulkan:
758
+ return c10::DeviceType::Vulkan;
759
+
760
+ case DispatchKey::ORT:
761
+ return c10::DeviceType::ORT;
762
+ default:
763
+ TORCH_CHECK(
764
+ false,
765
+ "DispatchKey ",
766
+ dispatch_key,
767
+ " doesn't correspond to a device");
768
+ }
769
+ }
770
+
771
+ inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) {
772
+ return TensorOptions()
773
+ .layout(dispatchKeyToLayout(dispatch_key))
774
+ .device(dispatchKeyToDeviceType(dispatch_key));
775
+ }
776
+
777
+ namespace detail {
778
+ inline bool backend_supports_empty_operator(const TensorOptions& options) {
779
+ // Quantized backends don't support at::empty().
780
+ // They have separate operators like at::empty_quantized() that take in
781
+ // extra information about how to quantize the tensor.
782
+ return !isQIntType(typeMetaToScalarType(options.dtype()));
783
+ }
784
+
785
+ } // namespace detail
786
+
787
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymInt.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <cstdint>
7
+ #include <utility>
8
+
9
+ namespace c10 {
10
+
11
+ namespace detail {
12
+ // This template can only be specialized at int64_t and c10::SymInt;
13
+ // you'll get linker errors otherwise
14
+ template <typename T>
15
+ C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar);
16
+ } // namespace detail
17
+
18
+ template <typename T>
19
+ T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) {
20
+ // Inline the fast paths
21
+ if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) {
22
+ // For SymInts, we want an explicit control flow to trigger a guard, so we
23
+ // may as well branch too.
24
+ if (dim < 0) {
25
+ return dim + dim_post_expr;
26
+ }
27
+ return dim;
28
+ }
29
+ // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors)
30
+ return c10::detail::maybe_wrap_dim_slow<T>(
31
+ std::move(dim), std::move(dim_post_expr), wrap_scalar);
32
+ }
33
+
34
+ inline int64_t maybe_wrap_dim(
35
+ int64_t dim,
36
+ int64_t dim_post_expr,
37
+ bool wrap_scalar = true) {
38
+ return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
39
+ }
40
+
41
+ inline c10::SymInt maybe_wrap_dim(
42
+ c10::SymInt dim,
43
+ c10::SymInt dim_post_expr,
44
+ bool wrap_scalar = true) {
45
+ return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
46
+ }
47
+
48
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/alignment.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ namespace c10 {
6
+
7
+ #ifdef C10_MOBILE
8
+ // Use 16-byte alignment on mobile
9
+ // - ARM NEON AArch32 and AArch64
10
+ // - x86[-64] < AVX
11
+ constexpr size_t gAlignment = 16;
12
+ #else
13
+ // Use 64-byte alignment should be enough for computation up to AVX512.
14
+ constexpr size_t gAlignment = 64;
15
+ #endif
16
+
17
+ constexpr size_t gPagesize = 4096;
18
+ // since the default thp pagesize is 2MB, enable thp only
19
+ // for buffers of size 2MB or larger to avoid memory bloating
20
+ constexpr size_t gAlloc_threshold_thp = static_cast<size_t>(2) * 1024 * 1024;
21
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineDeviceGuard.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ namespace c10::impl {
8
+
9
+ /**
10
+ * A StreamGuard is an RAII class that changes the current device
11
+ * to the device corresponding to some stream, and changes the
12
+ * default stream on that device to be this stream.
13
+ *
14
+ * InlineStreamGuard is a helper class for implementing StreamGuards.
15
+ * See InlineDeviceGuard for guidance on how to use this class.
16
+ */
17
+ template <typename T>
18
+ class InlineStreamGuard : private InlineDeviceGuard<T> {
19
+ public:
20
+ /// No default constructor, see Note [Omitted default constructor from RAII]
21
+ explicit InlineStreamGuard() = delete;
22
+
23
+ /// Set the current device to the device associated with the passed stream,
24
+ /// and set the current stream on that device to the passed stream.
25
+ explicit InlineStreamGuard(Stream stream)
26
+ : InlineDeviceGuard<T>(stream.device()),
27
+ original_stream_of_original_device_(
28
+ this->impl_.getStream(original_device())),
29
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
30
+ current_stream_(stream) {}
31
+
32
+ /// This constructor exists purely for testing
33
+ template <
34
+ typename U = T,
35
+ typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
36
+ explicit InlineStreamGuard(
37
+ Stream stream,
38
+ const DeviceGuardImplInterface* impl)
39
+ : InlineDeviceGuard<T>(
40
+ stream.device(),
41
+ impl ? impl : getDeviceGuardImpl(stream.device_type())),
42
+ original_stream_of_original_device_(
43
+ this->impl_.getStream(original_device())),
44
+ original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
45
+ current_stream_(stream) {}
46
+
47
+ /// Copy is disallowed
48
+ InlineStreamGuard(const InlineStreamGuard<T>&) = delete;
49
+ InlineStreamGuard<T>& operator=(const InlineStreamGuard<T>&) = delete;
50
+
51
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
52
+ /// which is required for moves on types with nontrivial destructors.
53
+ InlineStreamGuard(InlineStreamGuard<T>&& other) = delete;
54
+ InlineStreamGuard& operator=(InlineStreamGuard<T>&& other) = delete;
55
+
56
+ ~InlineStreamGuard() {
57
+ this->impl_.exchangeStream(original_stream_of_current_device_);
58
+ }
59
+
60
+ /// Resets the currently set stream to the original stream and
61
+ /// the currently set device to the original device. Then,
62
+ /// set the current device to the device associated with the passed stream,
63
+ /// and set the current stream on that device to the passed stream.
64
+ ///
65
+ /// NOTE: this implementation may skip some stream/device setting if
66
+ /// it can prove that it is unnecessary.
67
+ ///
68
+ /// WARNING: reset_stream does NOT preserve previously set streams on
69
+ /// different devices. If you need to set streams on multiple devices
70
+ /// use MultiStreamGuard instead.
71
+ void reset_stream(Stream stream) {
72
+ // TODO: make a version that takes an impl argument. Unfortunately,
73
+ // that will require SFINAE because impl is only valid for the
74
+ // VirtualGuardImpl specialization.
75
+ if (stream.device() == this->current_device()) {
76
+ this->impl_.exchangeStream(stream);
77
+ current_stream_ = stream;
78
+ } else {
79
+ // Destruct and reconstruct the StreamGuard in-place
80
+ this->impl_.exchangeStream(original_stream_of_current_device_);
81
+ this->reset_device(stream.device());
82
+ original_stream_of_current_device_ = this->impl_.exchangeStream(stream);
83
+ current_stream_ = stream;
84
+ }
85
+ }
86
+
87
+ // It's not clear if set_device should also reset the current stream
88
+ // if the device is unchanged; therefore, we don't provide it.
89
+ // The situation is somewhat clearer with reset_device, but it's still
90
+ // a pretty weird thing to do, so haven't added this either.
91
+
92
+ /// Returns the stream of the original device prior to this guard. Subtly,
93
+ /// the stream returned here is the original stream of the *original*
94
+ /// device; i.e., it's the stream that your computation *would* have
95
+ /// been put on, if it hadn't been for this meddling stream guard.
96
+ /// This is usually what you want.
97
+ Stream original_stream() const {
98
+ return original_stream_of_original_device_;
99
+ }
100
+
101
+ /// Returns the most recent stream that was set using this device guard,
102
+ /// either from construction, or via set_stream.
103
+ Stream current_stream() const {
104
+ return current_stream_;
105
+ }
106
+
107
+ /// Returns the most recent device that was set using this device guard,
108
+ /// either from construction, or via set_device/reset_device/set_index.
109
+ Device current_device() const {
110
+ return InlineDeviceGuard<T>::current_device();
111
+ }
112
+
113
+ /// Returns the device that was set at the most recent reset_stream(),
114
+ /// or otherwise the device at construction time.
115
+ Device original_device() const {
116
+ return InlineDeviceGuard<T>::original_device();
117
+ }
118
+
119
+ private:
120
+ Stream
121
+ original_stream_of_original_device_; // what the user probably cares about
122
+ Stream original_stream_of_current_device_; // what we need to restore
123
+ Stream current_stream_;
124
+ };
125
+
126
+ /**
127
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
128
+ * initialization, and resets the device to its original value on destruction.
129
+ * See InlineOptionalDeviceGuard for more guidance on how to use this class.
130
+ */
131
+ template <typename T>
132
+ class InlineOptionalStreamGuard {
133
+ public:
134
+ /// Creates an uninitialized stream guard.
135
+ explicit InlineOptionalStreamGuard()
136
+ : guard_() // See Note [Explicit initialization of optional fields]
137
+ {}
138
+
139
+ /// Set the current device to the device associated with the passed stream,
140
+ /// and set the current stream on that device to the passed stream,
141
+ /// if the passed stream is not nullopt.
142
+ explicit InlineOptionalStreamGuard(optional<Stream> stream_opt) : guard_() {
143
+ if (stream_opt.has_value()) {
144
+ guard_.emplace(stream_opt.value());
145
+ }
146
+ }
147
+
148
+ /// All constructors of StreamGuard are valid for OptionalStreamGuard
149
+ template <typename... Args>
150
+ explicit InlineOptionalStreamGuard(Args&&... args)
151
+ : guard_(std::in_place, std::forward<Args>(args)...) {}
152
+
153
+ // See Note [Move construction for RAII guards is tricky]
154
+ InlineOptionalStreamGuard(InlineOptionalStreamGuard<T>&& other) = delete;
155
+
156
+ // See Note [Move assignment for RAII guards is tricky]
157
+ InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) =
158
+ delete;
159
+
160
+ /// Resets the currently set stream to the original stream and
161
+ /// the currently set device to the original device. Then,
162
+ /// set the current device to the device associated with the passed stream,
163
+ /// and set the current stream on that device to the passed stream.
164
+ /// Initializes the OptionalStreamGuard if it was not previously initialized.
165
+ void reset_stream(Stream stream) {
166
+ if (guard_.has_value()) {
167
+ guard_->reset_stream(stream);
168
+ } else {
169
+ guard_.emplace(stream);
170
+ }
171
+ }
172
+
173
+ /// Returns the stream that was set at the time the guard was most recently
174
+ /// initialized, or nullopt if the guard is uninitialized.
175
+ optional<Stream> original_stream() const {
176
+ return guard_.has_value() ? make_optional(guard_->original_stream())
177
+ : nullopt;
178
+ }
179
+
180
+ /// Returns the most recent stream that was set using this stream guard,
181
+ /// either from construction, or via reset_stream, if the guard is
182
+ /// initialized, or nullopt if the guard is uninitialized.
183
+ optional<Stream> current_stream() const {
184
+ return guard_.has_value() ? make_optional(guard_->current_stream())
185
+ : nullopt;
186
+ }
187
+
188
+ /// Restore the original device and stream, resetting this guard to
189
+ /// uninitialized state.
190
+ void reset() {
191
+ guard_.reset();
192
+ }
193
+
194
+ private:
195
+ optional<InlineStreamGuard<T>> guard_;
196
+ };
197
+
198
+ template <typename T>
199
+ class InlineMultiStreamGuard {
200
+ public:
201
+ /// Calls `set_stream` on each of the streams in the list.
202
+ /// This may be useful if you need to set different streams
203
+ /// for different devices.
204
+ explicit InlineMultiStreamGuard(ArrayRef<Stream> streams) {
205
+ if (!streams.empty()) {
206
+ impl_.emplace(getDeviceTypeOfStreams(streams));
207
+ original_streams_.reserve(streams.size());
208
+ for (const Stream& s : streams) {
209
+ original_streams_.emplace_back(this->impl_->exchangeStream(s));
210
+ }
211
+ }
212
+ }
213
+
214
+ /// Copy is disallowed
215
+ InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete;
216
+ InlineMultiStreamGuard<T>& operator=(const InlineMultiStreamGuard&) = delete;
217
+
218
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
219
+ /// which is required for moves on types with nontrivial destructors.
220
+ InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete;
221
+ InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete;
222
+
223
+ ~InlineMultiStreamGuard() noexcept {
224
+ if (this->impl_.has_value()) {
225
+ for (const Stream& s : original_streams_) {
226
+ this->impl_->exchangeStream(s);
227
+ }
228
+ }
229
+ }
230
+
231
+ protected:
232
+ optional<T> impl_;
233
+
234
+ private:
235
+ /// The original streams that were active on all devices.
236
+ std::vector<Stream> original_streams_;
237
+
238
+ static DeviceType getDeviceTypeOfStreams(ArrayRef<Stream> streams) {
239
+ TORCH_INTERNAL_ASSERT(!streams.empty());
240
+ DeviceType type = streams[0].device_type();
241
+ for (const auto idx : c10::irange(1, streams.size())) {
242
+ TORCH_CHECK_VALUE(
243
+ streams[idx].device_type() == type,
244
+ "Streams have a mix of device types: stream 0 is on ",
245
+ streams[0].device(),
246
+ " while stream ",
247
+ idx,
248
+ " is on device ",
249
+ streams[idx].device());
250
+ }
251
+ return type;
252
+ }
253
+ };
254
+
255
+ } // namespace c10::impl
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/HermeticPyObjectTLS.h>
4
+ #include <c10/core/impl/PyInterpreter.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/python_stub.h>
7
+
8
+ #include <atomic>
9
+
10
+ namespace c10::impl {
11
+
12
+ struct C10_API PyObjectSlot {
13
+ public:
14
+ PyObjectSlot();
15
+
16
+ ~PyObjectSlot();
17
+
18
+ void maybe_destroy_pyobj();
19
+
20
+ // Associate the TensorImpl with the specified PyObject, and, if necessary,
21
+ // also tag the interpreter.
22
+ //
23
+ // NB: This lives in a header so that we can inline away the switch on status
24
+ //
25
+ // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after
26
+ // PyObject if necessary!
27
+ void init_pyobj(
28
+ PyInterpreter* self_interpreter,
29
+ PyObject* pyobj,
30
+ PyInterpreterStatus status) {
31
+ impl::PyInterpreter* expected = nullptr;
32
+ switch (status) {
33
+ case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED:
34
+ // caller guarantees there is no multithreaded access; if there is
35
+ // no data race OK to do a relaxed store
36
+ pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed);
37
+ break;
38
+ case impl::PyInterpreterStatus::TAGGED_BY_US:
39
+ // no tagging is necessary, the tag is already correct
40
+ break;
41
+ case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED:
42
+ // attempt to claim this TensorImpl with the specified interpreter
43
+ // tag
44
+ if (pyobj_interpreter_.compare_exchange_strong(
45
+ expected, self_interpreter, std::memory_order_acq_rel)) {
46
+ break;
47
+ }
48
+ // test if, actually, it was already tagged by us! this situation can't
49
+ // be caused by a race, but it could be caused by a situation
50
+ // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED
51
+ // (because they didn't pre-check the tag) when actually it was
52
+ // owned by the interpreter
53
+ if (expected == self_interpreter) {
54
+ break;
55
+ }
56
+ // fallthrough, we lost the race. We are guaranteed not to lose the
57
+ // race with ourself, as calls to init_pyobj with the same interpreter
58
+ // ID must be sequentialized by the GIL
59
+ [[fallthrough]];
60
+ case impl::PyInterpreterStatus::TAGGED_BY_OTHER:
61
+ TORCH_CHECK(
62
+ false,
63
+ "cannot allocate PyObject for Tensor on interpreter ",
64
+ self_interpreter,
65
+ " that has already been used by another torch deploy interpreter ",
66
+ pyobj_interpreter_.load());
67
+ }
68
+
69
+ // we are the ONLY thread that can have gotten to this point. It is not
70
+ // possible to conflict with another zero interpreter as access is protected
71
+ // by GIL
72
+ // NB: owns_pyobj tag is initially false
73
+ pyobj_ = pyobj;
74
+ }
75
+
76
+ // Query the PyObject interpreter. This may return null if there is no
77
+ // interpreter. This is racy!
78
+ PyInterpreter* pyobj_interpreter();
79
+
80
+ PyObject* _unchecked_untagged_pyobj() const;
81
+
82
+ // Test the interpreter tag. If tagged for the current interpreter, return
83
+ // a non-nullopt (but possibly null) PyObject. If (possibly) untagged,
84
+ // returns a nullopt. If it is definitely invalid, raises an error.
85
+ //
86
+ // If `ignore_hermetic_tls` is false and this function is called from a
87
+ // hermetic context (ie, `HermeticPyObjectTLS::get_state()` is true), then
88
+ // nullopt is returned. If `ignore_hermetic_tls` is true, then the hermetic
89
+ // context is ignored, allowing you to check the interpreter tag of a
90
+ // nonhermetic PyObject from within a hermetic context. This is necessary
91
+ // because there are some cases where the deallocator function of a
92
+ // nonhermetic PyObject is called from within a hermetic context, so it must
93
+ // be properly treated as a nonhermetic PyObject.
94
+ //
95
+ // NB: this lives in header so that we can avoid actually creating the
96
+ // c10::optional
97
+ c10::optional<PyObject*> check_pyobj(
98
+ PyInterpreter* self_interpreter,
99
+ bool ignore_hermetic_tls = false) const {
100
+ // Note [Memory ordering on Python interpreter tag]
101
+ impl::PyInterpreter* interpreter =
102
+ pyobj_interpreter_.load(std::memory_order_acquire);
103
+ if (interpreter == nullptr) {
104
+ // NB: This never returns DEFINITELY_UNINITIALIZED because there is
105
+ // always the possibility that another thread races to initialize
106
+ // after we query here. The only time when we can conclude a tensor
107
+ // is definitely uninitialized is when we have just allocated it and
108
+ // it cannot have escaped to other threads yet
109
+ return c10::nullopt;
110
+ } else if (interpreter == self_interpreter) {
111
+ // NB: pyobj_ could still be null!
112
+ if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) {
113
+ return c10::nullopt;
114
+ } else {
115
+ return c10::make_optional(_unchecked_untagged_pyobj());
116
+ }
117
+ } else {
118
+ TORCH_CHECK(
119
+ false,
120
+ "cannot access PyObject for Tensor on interpreter ",
121
+ (*self_interpreter)->name(),
122
+ " that has already been used by another torch deploy interpreter ",
123
+ (*pyobj_interpreter_.load())->name());
124
+ }
125
+ }
126
+
127
+ // Clear the PyObject field for an interpreter, in situations where we
128
+ // statically know the tensor is tagged with our interpreter.
129
+ void unchecked_clear_pyobj(PyInterpreter* interpreter);
130
+
131
+ PyInterpreter& load_pyobj_interpreter() const;
132
+
133
+ // Check if the PyObjectSlot's interpreter is the same as the specified
134
+ // interpreter
135
+ bool check_interpreter(PyInterpreter* interpreter);
136
+
137
+ // Check if the PyObjectSlot is holding a PyObject, owned or non-owned
138
+ bool has_pyobj_nonhermetic();
139
+
140
+ bool owns_pyobj();
141
+
142
+ void set_owns_pyobj(bool b);
143
+
144
+ private:
145
+ // This field contains the interpreter tag for this object. See
146
+ // Note [Python interpreter tag] for general context
147
+ //
148
+ // Note [Memory ordering on Python interpreter tag]
149
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
150
+ // What memory_order do we need when accessing this atomic? We don't
151
+ // need a single total modification order (as provided by
152
+ // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only
153
+ // transition from -1 to some positive integer and never changes afterwards.
154
+ // Because there is only one modification, it trivially already has a total
155
+ // modification order (e.g., we don't need fences or locked instructions on
156
+ // x86)
157
+ //
158
+ // In fact, one could make a reasonable argument that relaxed reads are OK,
159
+ // due to the presence of external locking (GIL) to ensure that interactions
160
+ // with other data structures are still correctly synchronized, so that
161
+ // we fall in the "Single-Location Data Structures" case as described in
162
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
163
+ // However, on x86, it doesn't matter if I use acquire or relaxed on the load
164
+ // as I get the same assembly in both cases. So I just use the more
165
+ // conservative acquire (which will impede compiler optimizations but I don't
166
+ // care)
167
+ std::atomic<PyInterpreter*> pyobj_interpreter_;
168
+
169
+ // This field contains a reference to a PyObject representing this Tensor.
170
+ // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new
171
+ // PyObject for it and set this field. This field does not have to be
172
+ // protected by an atomic as it is only allowed to be accessed when you hold
173
+ // the GIL, or during destruction of the tensor.
174
+ //
175
+ // When a PyObject dies, you are obligated to clear this field
176
+ // (otherwise, you will try to use-after-free the pyobj); this currently
177
+ // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp
178
+ //
179
+ // NB: Ordinarily, this should not be a strong reference, as if the
180
+ // PyObject owns the Tensor, this would create a reference cycle.
181
+ // However, sometimes this ownership flips. To track who owns
182
+ // who, this has a single pointer tag indicating whether or not the
183
+ // C++ object owns the PyObject (the common case, zero, means PyObject
184
+ // owns the C++ object); see _unchecked_untagged_pyobj for raw access
185
+ // or check_pyobj for checked access. See references to PyObject
186
+ // resurrection in torch/csrc/autograd/python_variable.cpp
187
+ PyObject* pyobj_;
188
+ };
189
+
190
+ } // namespace c10::impl
venv/lib/python3.10/site-packages/torch/include/c10/core/thread_pool.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <condition_variable>
5
+ #include <cstddef>
6
+ #include <functional>
7
+ #include <mutex>
8
+ #include <queue>
9
+ #include <thread>
10
+ #include <utility>
11
+ #include <vector>
12
+
13
+ #include <c10/macros/Export.h>
14
+ #include <c10/util/Registry.h>
15
+ #include <c10/util/numa.h>
16
+ #include <c10/util/thread_name.h>
17
+
18
+ namespace c10 {
19
+
20
+ class C10_API TaskThreadPoolBase {
21
+ public:
22
+ virtual void run(std::function<void()> func) = 0;
23
+
24
+ virtual size_t size() const = 0;
25
+
26
+ /**
27
+ * The number of available (i.e. idle) threads in this thread pool.
28
+ */
29
+ virtual size_t numAvailable() const = 0;
30
+
31
+ /**
32
+ * Check if the current thread is from the thread pool.
33
+ */
34
+ virtual bool inThreadPool() const = 0;
35
+
36
+ virtual ~TaskThreadPoolBase() noexcept = default;
37
+
38
+ static size_t defaultNumThreads();
39
+ };
40
+
41
+ class C10_API ThreadPool : public c10::TaskThreadPoolBase {
42
+ protected:
43
+ struct task_element_t {
44
+ bool run_with_id;
45
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
46
+ const std::function<void()> no_id;
47
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
48
+ const std::function<void(std::size_t)> with_id;
49
+
50
+ explicit task_element_t(std::function<void()> f)
51
+ : run_with_id(false), no_id(std::move(f)), with_id(nullptr) {}
52
+ explicit task_element_t(std::function<void(std::size_t)> f)
53
+ : run_with_id(true), no_id(nullptr), with_id(std::move(f)) {}
54
+ };
55
+
56
+ std::queue<task_element_t> tasks_;
57
+ std::vector<std::thread> threads_;
58
+ mutable std::mutex mutex_;
59
+ std::condition_variable condition_;
60
+ std::condition_variable completed_;
61
+ std::atomic_bool running_;
62
+ bool complete_;
63
+ std::size_t available_;
64
+ std::size_t total_;
65
+ int numa_node_id_;
66
+
67
+ public:
68
+ ThreadPool() = delete;
69
+
70
+ explicit ThreadPool(
71
+ int pool_size,
72
+ int numa_node_id = -1,
73
+ const std::function<void()>& init_thread = nullptr);
74
+
75
+ ~ThreadPool() override;
76
+
77
+ size_t size() const override;
78
+
79
+ size_t numAvailable() const override;
80
+
81
+ bool inThreadPool() const override;
82
+
83
+ void run(std::function<void()> func) override;
84
+
85
+ template <typename Task>
86
+ void runTaskWithID(Task task) {
87
+ std::unique_lock<std::mutex> lock(mutex_);
88
+
89
+ // Set task and signal condition variable so that a worker thread will
90
+ // wake up and use the task.
91
+ tasks_.emplace(static_cast<std::function<void(std::size_t)>>(task));
92
+ complete_ = false;
93
+ condition_.notify_one();
94
+ }
95
+
96
+ /// @brief Wait for queue to be empty
97
+ void waitWorkComplete();
98
+
99
+ private:
100
+ // @brief Entry point for pool threads.
101
+ void main_loop(std::size_t index);
102
+ };
103
+
104
+ class C10_API TaskThreadPool : public c10::ThreadPool {
105
+ public:
106
+ explicit TaskThreadPool(int pool_size, int numa_node_id = -1)
107
+ : ThreadPool(pool_size, numa_node_id, [numa_node_id]() {
108
+ setThreadName("CaffeTaskThread");
109
+ NUMABind(numa_node_id);
110
+ }) {}
111
+ };
112
+
113
+ C10_DECLARE_SHARED_REGISTRY(
114
+ ThreadPoolRegistry,
115
+ TaskThreadPoolBase,
116
+ int,
117
+ int,
118
+ bool);
119
+
120
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <c10/util/Backtrace.h>
3
+ #include <c10/util/env.h>
4
+ #include <cstdlib>
5
+ #include <exception>
6
+ #include <iostream>
7
+ #include <mutex>
8
+ #include <optional>
9
+
10
+ namespace c10 {
11
+ class AbortHandlerHelper {
12
+ public:
13
+ static AbortHandlerHelper& getInstance() {
14
+ #ifdef _WIN32
15
+ thread_local
16
+ #endif // _WIN32
17
+ static AbortHandlerHelper instance;
18
+ return instance;
19
+ }
20
+
21
+ void set(std::terminate_handler handler) {
22
+ std::lock_guard<std::mutex> lk(mutex);
23
+ if (!inited) {
24
+ prev = std::set_terminate(handler);
25
+ curr = std::get_terminate();
26
+ inited = true;
27
+ }
28
+ }
29
+
30
+ std::terminate_handler getPrev() const {
31
+ return prev;
32
+ }
33
+
34
+ private:
35
+ std::terminate_handler prev = nullptr;
36
+ std::terminate_handler curr = nullptr;
37
+ bool inited = false;
38
+ std::mutex mutex;
39
+ AbortHandlerHelper() = default;
40
+ ~AbortHandlerHelper() {
41
+ // Only restore the handler if we are the current one
42
+ if (inited && curr == std::get_terminate()) {
43
+ std::set_terminate(prev);
44
+ }
45
+ }
46
+
47
+ public:
48
+ AbortHandlerHelper(AbortHandlerHelper const&) = delete;
49
+ void operator=(AbortHandlerHelper const&) = delete;
50
+ };
51
+
52
+ namespace detail {
53
+ C10_ALWAYS_INLINE void terminate_handler() {
54
+ std::cout << "Unhandled exception caught in c10/util/AbortHandler.h" << '\n';
55
+ auto backtrace = get_backtrace();
56
+ std::cout << backtrace << '\n' << std::flush;
57
+ auto prev_handler = AbortHandlerHelper::getInstance().getPrev();
58
+ if (prev_handler) {
59
+ prev_handler();
60
+ } else {
61
+ std::abort();
62
+ }
63
+ }
64
+ } // namespace detail
65
+
66
+ C10_ALWAYS_INLINE void set_terminate_handler() {
67
+ bool use_custom_terminate = false;
68
+ // On Windows it is enabled by default based on
69
+ // https://github.com/pytorch/pytorch/pull/50320#issuecomment-763147062
70
+ #ifdef _WIN32
71
+ use_custom_terminate = true;
72
+ #endif // _WIN32
73
+ auto result = c10::utils::check_env("TORCH_CUSTOM_TERMINATE");
74
+ if (result != std::nullopt) {
75
+ use_custom_terminate = result.value();
76
+ }
77
+ if (use_custom_terminate) {
78
+ AbortHandlerHelper::getInstance().set(detail::terminate_handler);
79
+ }
80
+ }
81
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023-present Facebook. All Rights Reserved.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/macros/Export.h>
6
+ #include <array>
7
+ #include <chrono>
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+ #include <ctime>
11
+ #include <functional>
12
+ #include <type_traits>
13
+
14
+ #if defined(C10_IOS) && defined(C10_MOBILE)
15
+ #include <sys/time.h> // for gettimeofday()
16
+ #endif
17
+
18
+ #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
19
+ #define C10_RDTSC
20
+ #if defined(_MSC_VER)
21
+ #include <intrin.h>
22
+ #elif defined(__CUDACC__) || defined(__HIPCC__)
23
+ #undef C10_RDTSC
24
+ #elif defined(__clang__)
25
+ // `__rdtsc` is available by default.
26
+ // NB: This has to be first, because Clang will also define `__GNUC__`
27
+ #elif defined(__GNUC__)
28
+ #include <x86intrin.h>
29
+ #else
30
+ #undef C10_RDTSC
31
+ #endif
32
+ #endif
33
+
34
+ namespace c10 {
35
+
36
+ using time_t = int64_t;
37
+ using steady_clock_t = std::conditional_t<
38
+ std::chrono::high_resolution_clock::is_steady,
39
+ std::chrono::high_resolution_clock,
40
+ std::chrono::steady_clock>;
41
+
42
+ inline time_t getTimeSinceEpoch() {
43
+ auto now = std::chrono::system_clock::now().time_since_epoch();
44
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(now).count();
45
+ }
46
+
47
+ inline time_t getTime(bool allow_monotonic = false) {
48
+ #if defined(C10_IOS) && defined(C10_MOBILE)
49
+ // clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS
50
+ // can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime
51
+ // is implemented or not
52
+ struct timeval now;
53
+ gettimeofday(&now, NULL);
54
+ return static_cast<time_t>(now.tv_sec) * 1000000000 +
55
+ static_cast<time_t>(now.tv_usec) * 1000;
56
+ #elif defined(_WIN32) || defined(__MACH__)
57
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
58
+ steady_clock_t::now().time_since_epoch())
59
+ .count();
60
+ #else
61
+ // clock_gettime is *much* faster than std::chrono implementation on Linux
62
+ struct timespec t {};
63
+ auto mode = CLOCK_REALTIME;
64
+ if (allow_monotonic) {
65
+ mode = CLOCK_MONOTONIC;
66
+ }
67
+ clock_gettime(mode, &t);
68
+ return static_cast<time_t>(t.tv_sec) * 1000000000 +
69
+ static_cast<time_t>(t.tv_nsec);
70
+ #endif
71
+ }
72
+
73
+ // We often do not need to capture true wall times. If a fast mechanism such
74
+ // as TSC is available we can use that instead and convert back to epoch time
75
+ // during post processing. This greatly reduce the clock's contribution to
76
+ // profiling.
77
+ // http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/
78
+ // https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io
79
+ // TODO: We should use
80
+ // `https://github.com/google/benchmark/blob/main/src/cycleclock.h`
81
+ inline auto getApproximateTime() {
82
+ #if defined(C10_RDTSC)
83
+ return static_cast<uint64_t>(__rdtsc());
84
+ #else
85
+ return getTime();
86
+ #endif
87
+ }
88
+
89
+ using approx_time_t = decltype(getApproximateTime());
90
+ static_assert(
91
+ std::is_same_v<approx_time_t, int64_t> ||
92
+ std::is_same_v<approx_time_t, uint64_t>,
93
+ "Expected either int64_t (`getTime`) or uint64_t (some TSC reads).");
94
+
95
+ // Convert `getCount` results to Nanoseconds since unix epoch.
96
+ class C10_API ApproximateClockToUnixTimeConverter final {
97
+ public:
98
+ ApproximateClockToUnixTimeConverter();
99
+ std::function<time_t(approx_time_t)> makeConverter();
100
+
101
+ struct UnixAndApproximateTimePair {
102
+ time_t t_;
103
+ approx_time_t approx_t_;
104
+ };
105
+ static UnixAndApproximateTimePair measurePair();
106
+
107
+ private:
108
+ static constexpr size_t replicates = 1001;
109
+ using time_pairs = std::array<UnixAndApproximateTimePair, replicates>;
110
+ time_pairs measurePairs();
111
+
112
+ time_pairs start_times_;
113
+ };
114
+
115
+ } // namespace c10