applied-ai-018 commited on
Commit
281b268
·
verified ·
1 Parent(s): ff22b3c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/12.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h +32 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h +397 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h +209 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h +48 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h +25 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h +631 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h +111 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h +186 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/core/Range.h +25 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h +92 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h +1055 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h +17 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h +21 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h +75 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h +88 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h +28 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h +20 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h +101 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h +483 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/core/functional.h +54 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h +10 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h +1555 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h +2545 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h +54 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h +391 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h +0 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h +9 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h +2 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h +14 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h +21 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h +15 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h +24 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo.h +39 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_native.h +21 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_native.h +25 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_ops.h +39 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_available_native.h +21 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_native.h +22 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward.h +135 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear.h +30 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist.h +39 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_native.h +24 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view.h +91 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h +28 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h +50 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_ops.h +50 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h +26 -0
ckpts/universal/global_step120/zero/12.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09d269bf863d57f4cba42ae46bd3404e36cf62f09205922efe5a033f03c974d6
3
+ size 16778396
ckpts/universal/global_step120/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e80128ee8a72a61df467d9892140892f3b71633dd2013faca85c383b6a3df8be
3
+ size 33555612
ckpts/universal/global_step120/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:029e63de74c7f900acc4b6a6c493cc10b814dec6597b9f8d731077db9cff2f77
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // In order to preserve bc, we make DeprecatedTypeProperties instances unique
4
+ // just like they are for Type.
5
+
6
+ #include <c10/core/Backend.h>
7
+ #include <c10/core/ScalarType.h>
8
+ #include <memory>
9
+
10
+ namespace at {
11
+
12
+ class DeprecatedTypeProperties;
13
+
14
+ struct TORCH_API DeprecatedTypePropertiesDeleter {
15
+ void operator()(DeprecatedTypeProperties * ptr);
16
+ };
17
+
18
+ class TORCH_API DeprecatedTypePropertiesRegistry {
19
+ public:
20
+ DeprecatedTypePropertiesRegistry();
21
+
22
+ DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const;
23
+
24
+ private:
25
+ std::unique_ptr<DeprecatedTypeProperties> registry
26
+ [static_cast<int>(Backend::NumOptions)]
27
+ [static_cast<int>(ScalarType::NumOptions)];
28
+ };
29
+
30
+ TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
31
+
32
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/TypeTraits.h>
6
+ #include <c10/util/TypeList.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <c10/util/order_preserving_flat_hash_map.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <ATen/core/TensorBody.h>
11
+ #include <ATen/core/jit_type_base.h>
12
+
13
+ namespace c10 {
14
+ struct IValue;
15
+ template<class Key, class Value> class Dict;
16
+ struct Type;
17
+
18
+ namespace impl {
19
+
20
+ using valid_dict_key_types = guts::typelist::typelist<
21
+ int64_t,
22
+ std::string,
23
+ double,
24
+ c10::complex<double>,
25
+ bool,
26
+ at::Tensor
27
+ >;
28
+ }
29
+
30
+ namespace detail {
31
+
32
+ struct DictKeyHash {
33
+ size_t operator()(const IValue& ivalue) const;
34
+ };
35
+
36
+ struct DictKeyEqualTo {
37
+ bool operator()(const IValue& lhs, const IValue& rhs) const;
38
+ };
39
+
40
+ struct DictImpl final : public c10::intrusive_ptr_target {
41
+ using dict_map_type = ska_ordered::order_preserving_flat_hash_map<IValue, IValue, DictKeyHash, DictKeyEqualTo>;
42
+ struct DictElementTypes final {
43
+ TypePtr keyType;
44
+ TypePtr valueType;
45
+ };
46
+
47
+ explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_)
48
+ : dict(std::move(dict_))
49
+ , elementTypes(std::move(elementTypes_)) {}
50
+ dict_map_type dict;
51
+
52
+ DictElementTypes elementTypes;
53
+
54
+ intrusive_ptr<DictImpl> copy() const;
55
+ friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs);
56
+ };
57
+
58
+ }
59
+
60
+ namespace impl {
61
+ template<class Key, class Value, class Iterator> class DictIterator;
62
+
63
+ /**
64
+ * A reference to an entry in the Dict.
65
+ * Use the `key()` and `value()` methods to read the element.
66
+ */
67
+ template<class Key, class Value, class Iterator>
68
+ class DictEntryRef final {
69
+ public:
70
+ explicit DictEntryRef(Iterator iterator)
71
+ : iterator_(std::move(iterator)) {}
72
+
73
+ decltype(auto) key() const {
74
+ return iterator_->first.template to<Key>();
75
+ }
76
+
77
+ decltype(auto) value() const {
78
+ return iterator_->second.template to<Value>();
79
+ }
80
+
81
+ template<class Value_>
82
+ void setValue(Value_&& value) const {
83
+ static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of setValue()");
84
+ iterator_->second = Value(std::forward<Value_>(value));
85
+ }
86
+
87
+ private:
88
+ // allow copying and moving, but only our friends (i.e. the Dict class) can do
89
+ // it. Copying/moving this reference wrapper would be too ambiguous to allow it
90
+ // in the public API.
91
+ DictEntryRef(const DictEntryRef&) = default;
92
+ DictEntryRef& operator=(const DictEntryRef&) = default;
93
+ DictEntryRef(DictEntryRef&&) noexcept = default;
94
+ DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default;
95
+
96
+ Iterator iterator_;
97
+ friend class DictIterator<Key, Value, Iterator>;
98
+ friend class Dict<Key, Value>;
99
+ };
100
+
101
+ // this wraps map_type::iterator to make sure user code can't rely
102
+ // on it being the type of the underlying map.
103
+ template<class Key, class Value, class Iterator>
104
+ class DictIterator final {
105
+ public:
106
+ // C++17 friendly std::iterator implementation
107
+ using iterator_category = std::forward_iterator_tag;
108
+ using value_type = DictEntryRef<Key, Value, Iterator>;
109
+ using difference_type = std::ptrdiff_t;
110
+ using pointer = value_type*;
111
+ using reference = value_type&;
112
+
113
+ explicit DictIterator() = default;
114
+ ~DictIterator() = default;
115
+
116
+ DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {}
117
+ DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {}
118
+ DictIterator& operator=(const DictIterator& rhs) {
119
+ entryRef_ = rhs.entryRef_;
120
+ return *this;
121
+ }
122
+ DictIterator& operator=(DictIterator&& rhs) noexcept {
123
+ entryRef_ = std::move(rhs.entryRef_);
124
+ return *this;
125
+ }
126
+
127
+ DictIterator& operator++() {
128
+ ++entryRef_.iterator_;
129
+ return *this;
130
+ }
131
+
132
+ DictIterator operator++(int) {
133
+ DictIterator copy(*this);
134
+ ++*this;
135
+ return copy;
136
+ }
137
+
138
+ const DictEntryRef<Key, Value, Iterator>& operator*() const {
139
+ return entryRef_;
140
+ }
141
+
142
+ const DictEntryRef<Key, Value, Iterator>* operator->() const {
143
+ return &entryRef_;
144
+ }
145
+
146
+ friend difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) {
147
+ return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_;
148
+ }
149
+
150
+ private:
151
+ explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {}
152
+
153
+ const Iterator& get_iterator_() const {
154
+ return entryRef_.iterator_;
155
+ }
156
+
157
+ friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) {
158
+ return lhs.get_iterator_() == rhs.get_iterator_();
159
+ }
160
+
161
+ friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) {
162
+ return lhs.get_iterator_() != rhs.get_iterator_();
163
+ }
164
+
165
+ friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) {
166
+ return lhs.get_iterator_() < rhs.get_iterator_();
167
+ }
168
+
169
+ friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) {
170
+ return lhs.get_iterator_() <= rhs.get_iterator_();
171
+ }
172
+
173
+ friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) {
174
+ return lhs.get_iterator_() > rhs.get_iterator_();
175
+ }
176
+
177
+ friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) {
178
+ return lhs.get_iterator_() >= rhs.get_iterator_();
179
+ }
180
+
181
+ DictEntryRef<Key, Value, Iterator> entryRef_;
182
+
183
+ friend class DictIterator<Key, Value, typename c10::detail::DictImpl::dict_map_type::iterator>;
184
+ friend class Dict<Key, Value>;
185
+ };
186
+
187
+ template<class Key, class Value> Dict<Key, Value> toTypedDict(Dict<IValue, IValue> dict);
188
+ template<class Key, class Value> Dict<IValue, IValue> toGenericDict(Dict<Key, Value> dict);
189
+ }
190
+
191
+ /**
192
+ * An object of this class stores a map from Key to Value.
193
+ *
194
+ * This is a pointer type. After a copy, both Dicts
195
+ * will share the same storage:
196
+ *
197
+ * > Dict<int, string> a;
198
+ * > Dict<int, string> b = a;
199
+ * > b.insert(3, "three");
200
+ * > ASSERT("three" == a.at(3));
201
+ *
202
+ * We use this class in the PyTorch kernel API because that
203
+ * allows us to do optimizations and switch out the underlying
204
+ * map implementation without breaking backwards compatibility
205
+ * for the kernel API.
206
+ */
207
+ template<class Key, class Value>
208
+ class Dict final {
209
+ private:
210
+ static_assert((std::is_same<IValue, Key>::value && std::is_same<IValue, Value>::value) || guts::typelist::contains<impl::valid_dict_key_types, Key>::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string.");
211
+
212
+ // impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map.
213
+ // We intentionally don't offer conversion from/to
214
+ // order_preserving_flat_hash_map, return references to it or something like that,
215
+ // because such operations would get expensive if we switch out
216
+ // the actual map implementation.
217
+ // This is an intrusive_ptr because Dict is a pointer type.
218
+ // Invariant: This will never be a nullptr, there will always be a valid
219
+ // DictImpl.
220
+ c10::intrusive_ptr<detail::DictImpl> impl_;
221
+
222
+ explicit Dict(c10::intrusive_ptr<detail::DictImpl>&& impl);
223
+ friend struct IValue;
224
+ template<class K, class V> friend Dict<K, V> impl::toTypedDict(Dict<IValue, IValue>);
225
+ template<class K, class V> friend Dict<IValue, IValue> impl::toGenericDict(Dict<K, V>);
226
+
227
+ public:
228
+ using key_type = Key;
229
+ using mapped_type = Value;
230
+ using size_type = typename detail::DictImpl::dict_map_type::size_type;
231
+ using iterator = impl::DictIterator<Key, Value, typename detail::DictImpl::dict_map_type::iterator>;
232
+
233
+ /**
234
+ * Creates an empty dict.
235
+ */
236
+ explicit Dict();
237
+
238
+ /**
239
+ * Create a generic dict with runtime type information.
240
+ * This only works for c10::impl::GenericDict and is not part of the public API
241
+ * but only supposed to be used internally by PyTorch.
242
+ */
243
+ explicit Dict(TypePtr keyType, TypePtr valueType);
244
+
245
+ ~Dict() = default;
246
+
247
+ Dict(const Dict&) = default;
248
+ Dict& operator=(const Dict&) = default;
249
+
250
+ /**
251
+ * Create a new Dict pointing to a deep copy of the same data.
252
+ * The Dict returned is a new dict with separate storage.
253
+ * Changes in it are not reflected in the original dict or vice versa.
254
+ */
255
+ Dict copy() const;
256
+
257
+ /**
258
+ * Returns an iterator to the first element of the container.
259
+ * If the container is empty, the returned iterator will be equal to end().
260
+ */
261
+ iterator begin() const;
262
+
263
+ /**
264
+ * Returns an iterator to the element following the last element of the container.
265
+ * This element acts as a placeholder; attempting to access it results in undefined behavior.
266
+ */
267
+ iterator end() const;
268
+
269
+ /**
270
+ * Checks if the container has no elements.
271
+ */
272
+ bool empty() const;
273
+
274
+ /**
275
+ * Returns the number of elements in the container.
276
+ */
277
+ size_type size() const;
278
+
279
+ /**
280
+ * Erases all elements from the container. After this call, size() returns zero.
281
+ * Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators.
282
+ */
283
+ void clear() const;
284
+
285
+ /**
286
+ * Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key.
287
+ * May invalidate any references, pointers, or iterators referring to contained elements.
288
+ *
289
+ * @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place.
290
+ */
291
+ template<class Key_, class Value_>
292
+ std::pair<iterator, bool> insert(Key_&& key, Value_&& value) const;
293
+
294
+ /**
295
+ * If an element with the given key already exists, it is overwritten with the given value.
296
+ * Otherwise, a new element with the given key and value are inserted.
297
+ * May invalidate any references, pointers, or iterators referring to contained elements.
298
+ *
299
+ * @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated.
300
+ */
301
+ template<class Key_, class Value_>
302
+ std::pair<iterator, bool> insert_or_assign(Key_&& key, Value_&& value) const;
303
+
304
+ /**
305
+ * Removes the element pointed to by iter.
306
+ * May invalidate any references, pointers, or iterators referring to contained elements.
307
+ * The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter.
308
+ */
309
+ void erase(iterator iter) const;
310
+
311
+ /**
312
+ * Removes the element with the given key, if it exists.
313
+ * May invalidate any references, pointers, or iterators referring to contained elements.
314
+ *
315
+ * @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
316
+ */
317
+ C10_NODISCARD size_t erase(const Key& key) const;
318
+
319
+ /**
320
+ * Returns the mapped value of the element with key equivalent to key.
321
+ * If no such element exists, an exception of type std::out_of_range is thrown.
322
+ */
323
+ Value at(const Key& key) const;
324
+
325
+ /**
326
+ * Finds an element with key equivalent to key.
327
+ *
328
+ * @return Iterator to an element with key equivalent to key.
329
+ * If no such element is found, past-the-end (see end()) iterator is returned.
330
+ */
331
+ iterator find(const Key& key) const;
332
+
333
+ /**
334
+ * Checks if there is an element with key equivalent to key in the container.
335
+ *
336
+ * @return true if there is such an element, otherwise false.
337
+ */
338
+ bool contains(const Key& key) const;
339
+
340
+ /**
341
+ * Increase the capacity so that at least count elements can be stored without
342
+ * having to reallocate or rehash.
343
+ */
344
+ void reserve(size_type count) const;
345
+
346
+ /**
347
+ * Value equality comparison. This function implements Python-like semantics for
348
+ * equality: two dicts with the same identity (e.g. same pointer) trivially
349
+ * compare equal, otherwise each element is compared for equality.
350
+ */
351
+ template <class Key_, class Value_>
352
+ friend bool operator==(
353
+ const Dict<Key_, Value_>& lhs,
354
+ const Dict<Key_, Value_>& rhs);
355
+ template <class Key_, class Value_>
356
+ friend bool operator!=(
357
+ const Dict<Key_, Value_>& lhs,
358
+ const Dict<Key_, Value_>& rhs);
359
+
360
+ /**
361
+ * Identity comparison. Returns true if and only if `rhs` represents the same
362
+ * Dict object as `this`.
363
+ */
364
+ bool is(const Dict& rhs) const;
365
+
366
+ // private API for now because the return type will change to TypePtr
367
+ // instead of optional<TypePtr> once types are mandatory.
368
+ TypePtr keyType() const;
369
+ TypePtr valueType() const;
370
+
371
+ // [unsafe set type]
372
+ // These functions mutate the tagged type of this dictionary in place.
373
+ // There is no checking that the members of the dictionary are instances
374
+ // of the new types, nor is there a check that other IValues which
375
+ // hold references to this dictionary have the right static type.
376
+ // This functionality is used only in the unpickler, where at
377
+ // creation type the real type of the dictionary is unknown, but
378
+ // then later recovered from the static type information of the
379
+ // unpickled object.
380
+ void unsafeSetKeyType(TypePtr t);
381
+ void unsafeSetValueType(TypePtr t);
382
+ };
383
+
384
+ namespace impl {
385
+ // GenericDict is how IValue stores dicts. It is, however, not part of the
386
+ // public API. Kernels should use Dicts with concrete Key, Value types instead
387
+ // (maybe except for some internal prim ops).
388
+ using GenericDict = Dict<IValue, IValue>;
389
+
390
+ }
391
+ }
392
+
393
+ namespace torch {
394
+ template<class Key, class Value> using Dict = c10::Dict<Key, Value>;
395
+ }
396
+
397
+ #include <ATen/core/Dict_inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <c10/util/hash.h>
5
+
6
+ namespace c10 {
7
+ namespace detail {
8
+ inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const {
9
+ if (lhs.isTensor() && rhs.isTensor()) {
10
+ // for tensors, we compare only by identity (following how it's done in Python).
11
+ return lhs.is(rhs);
12
+ }
13
+ // Otherwise, we first compare by identity for efficiency, then by value (see:
14
+ // [container equality])
15
+ return _fastEqualsForContainer(lhs, rhs);
16
+ }
17
+ }
18
+
19
+ template<class T> decltype(auto) getTypePtr();
20
+ std::string toString(const Type& type);
21
+
22
+ namespace impl {
23
+
24
+ template<class Key, class Value>
25
+ Dict<Key, Value> toTypedDict(GenericDict dict) {
26
+ TORCH_INTERNAL_ASSERT(*getTypePtr<Key>() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Key types mismatch.");
27
+ TORCH_INTERNAL_ASSERT(*getTypePtr<Value>() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr<Key>()), ", ", toString(*getTypePtr<Value>()), ">. Value types mismatch.");
28
+
29
+ return Dict<Key, Value>(std::move(dict.impl_));
30
+ }
31
+
32
+ template<class Key, class Value>
33
+ GenericDict toGenericDict(Dict<Key, Value> dict) {
34
+ return GenericDict(std::move(dict.impl_));
35
+ }
36
+ }
37
+
38
+ namespace detail {
39
+
40
+ inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
41
+ if (ivalue.isInt()) {
42
+ return std::hash<int64_t>()(ivalue.toInt());
43
+ } else if (ivalue.isString()) {
44
+ return std::hash<c10::string_view>()(ivalue.toStringView());
45
+ } else if (ivalue.isDouble()) {
46
+ return std::hash<double>()(ivalue.toDouble());
47
+ } else if (ivalue.isComplexDouble()) {
48
+ return c10::hash<c10::complex<double>>()(ivalue.toComplexDouble());
49
+ } else if (ivalue.isBool()) {
50
+ return std::hash<bool>()(ivalue.toBool());
51
+ } else if (ivalue.isTensor()) {
52
+ return std::hash<TensorImpl*>()(ivalue.toTensor().unsafeGetTensorImpl());
53
+ } else if (ivalue.isDevice()) {
54
+ return std::hash<Device>()(ivalue.toDevice());
55
+ } else {
56
+ throw std::runtime_error(
57
+ "Can't hash IValues with tag '" + ivalue.tagKind() + "'");
58
+ }
59
+ }
60
+
61
+ inline intrusive_ptr<DictImpl> DictImpl::copy() const {
62
+ return make_intrusive<DictImpl>(dict, elementTypes);
63
+ }
64
+
65
+ }
66
+
67
+ template<class Key, class Value>
68
+ Dict<Key, Value>::Dict()
69
+ :Dict(make_intrusive<detail::DictImpl>(
70
+ detail::DictImpl::dict_map_type(),
71
+ detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
72
+ static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
73
+ static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
74
+ }
75
+
76
+ template<class Key, class Value>
77
+ Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
78
+ : Dict(make_intrusive<detail::DictImpl>(
79
+ detail::DictImpl::dict_map_type(),
80
+ detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
81
+ static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
82
+ static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
83
+ }
84
+
85
+ template<class Key, class Value>
86
+ Dict<Key, Value>::Dict(c10::intrusive_ptr<detail::DictImpl>&& impl): impl_(std::move(impl)) {}
87
+
88
+ template<class Key, class Value>
89
+ Dict<Key, Value> Dict<Key, Value>::copy() const {
90
+ return Dict<Key, Value>(impl_->copy());
91
+ }
92
+
93
+ template<class Key, class Value>
94
+ typename Dict<Key, Value>::iterator Dict<Key, Value>::begin() const {
95
+ return iterator{impl_->dict.begin()};
96
+ }
97
+
98
+ template<class Key, class Value>
99
+ typename Dict<Key, Value>::iterator Dict<Key, Value>::end() const {
100
+ return iterator{impl_->dict.end()};
101
+ }
102
+
103
+ template<class Key, class Value>
104
+ bool Dict<Key, Value>::empty() const {
105
+ return impl_->dict.empty();
106
+ }
107
+
108
+ template<class Key, class Value>
109
+ typename Dict<Key, Value>::size_type Dict<Key, Value>::size() const {
110
+ return impl_->dict.size();
111
+ }
112
+
113
+ template<class Key, class Value>
114
+ void Dict<Key, Value>::clear() const {
115
+ impl_->dict.clear();
116
+ }
117
+
118
+ template<class Key, class Value>
119
+ template<class Key_, class Value_>
120
+ std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
121
+ static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
122
+ static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
123
+ auto inserted = impl_->dict.emplace(
124
+ Key(std::forward<Key_>(key)),
125
+ Value(std::forward<Value_>(value)));
126
+ return {iterator{inserted.first}, inserted.second};
127
+ }
128
+
129
+ template<class Key, class Value>
130
+ template<class Key_, class Value_>
131
+ std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
132
+ static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
133
+ static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
134
+ auto inserted = impl_->dict.insert_or_assign(
135
+ Key(std::forward<Key_>(key)),
136
+ Value(std::forward<Value_>(value)));
137
+ return {iterator{inserted.first}, inserted.second};
138
+ }
139
+
140
+ template<class Key, class Value>
141
+ void Dict<Key, Value>::erase(iterator iter) const {
142
+ impl_->dict.erase(iter.entryRef_.iterator_);
143
+ }
144
+
145
+ template<class Key, class Value>
146
+ C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
147
+ return impl_->dict.erase(key);
148
+ }
149
+
150
+ template<class Key, class Value>
151
+ Value Dict<Key, Value>::at(const Key& key) const {
152
+ return impl_->dict.at(key).template to<Value>();
153
+ }
154
+
155
+ template<class Key, class Value>
156
+ typename Dict<Key, Value>::iterator Dict<Key, Value>::find(const Key& key) const {
157
+ return iterator{impl_->dict.find(key)};
158
+ }
159
+
160
+ template<class Key, class Value>
161
+ bool Dict<Key, Value>::contains(const Key& key) const {
162
+ return end() != find(key);
163
+ }
164
+
165
+ template<class Key, class Value>
166
+ void Dict<Key, Value>::reserve(size_type count) const {
167
+ impl_->dict.reserve(count);
168
+ }
169
+
170
+ template<class Key, class Value>
171
+ TypePtr Dict<Key, Value>::keyType() const {
172
+ return impl_->elementTypes.keyType;
173
+ }
174
+
175
+ template<class Key, class Value>
176
+ TypePtr Dict<Key, Value>::valueType() const {
177
+ return impl_->elementTypes.valueType;
178
+ }
179
+ template <class Key, class Value>
180
+ void Dict<Key, Value>::unsafeSetKeyType(TypePtr t) {
181
+ impl_->elementTypes.keyType = std::move(t);
182
+ }
183
+
184
+ template <class Key, class Value>
185
+ void Dict<Key, Value>::unsafeSetValueType(TypePtr t) {
186
+ impl_->elementTypes.valueType = std::move(t);
187
+ }
188
+
189
+ template <class Key_, class Value_>
190
+ bool operator==(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
191
+ // Dicts with the same identity trivially compare equal.
192
+ if (lhs.impl_ == rhs.impl_) {
193
+ return true;
194
+ }
195
+
196
+ // Otherwise compare the values
197
+ return *lhs.impl_ == *rhs.impl_;
198
+ }
199
+
200
+ template <class Key_, class Value_>
201
+ bool operator!=(const Dict<Key_, Value_>& lhs, const Dict<Key_, Value_>& rhs) {
202
+ return !(lhs == rhs);
203
+ }
204
+
205
+ template <class Key, class Value>
206
+ bool Dict<Key, Value>::is(const Dict& rhs) const {
207
+ return this->impl_ == rhs.impl_;
208
+ }
209
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/Dimname.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <ostream>
7
+
8
+ namespace at {
9
+
10
+ enum class NameType: uint8_t { BASIC, WILDCARD };
11
+
12
+ struct TORCH_API Dimname {
13
+ static Dimname fromSymbol(Symbol name);
14
+ static Dimname wildcard();
15
+ static bool isValidName(const std::string& name);
16
+
17
+ NameType type() const { return type_; }
18
+ Symbol symbol() const { return name_; }
19
+
20
+ bool isBasic() const { return type_ == NameType::BASIC; }
21
+ bool isWildcard() const { return type_ == NameType::WILDCARD; }
22
+
23
+ bool matches(Dimname other) const;
24
+ c10::optional<Dimname> unify(Dimname other) const;
25
+
26
+ private:
27
+ Dimname(Symbol name)
28
+ : name_(name), type_(NameType::BASIC) {}
29
+ Dimname(Symbol name, NameType type)
30
+ : name_(name), type_(type) {}
31
+
32
+ Symbol name_;
33
+ NameType type_;
34
+ };
35
+
36
+ using DimnameList = c10::ArrayRef<Dimname>;
37
+
38
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
39
+
40
+ inline bool operator==(const Dimname& lhs, const Dimname& rhs) {
41
+ return lhs.symbol() == rhs.symbol();
42
+ }
43
+
44
+ inline bool operator!=(const Dimname& lhs, const Dimname& rhs) {
45
+ return !(lhs == rhs);
46
+ }
47
+
48
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ostream>
4
+ #include <string>
5
+
6
+ #include <c10/core/Scalar.h>
7
+ #include <ATen/core/Tensor.h>
8
+
9
+ namespace c10 {
10
+ TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
11
+ TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s);
12
+ TORCH_API std::string toString(const Scalar& s);
13
+ }
14
+ namespace at {
15
+
16
+ TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
17
+ TORCH_API std::ostream& print(
18
+ std::ostream& stream,
19
+ const Tensor& tensor,
20
+ int64_t linesize);
21
+ static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) {
22
+ return print(out,t,80);
23
+ }
24
+ TORCH_API void print(const Tensor & t, int64_t linesize=80);
25
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h ADDED
@@ -0,0 +1,631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue_to.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <functional>
8
+ #include <initializer_list>
9
+ #include <iterator>
10
+ #include <type_traits>
11
+
12
+ /*
13
+ * [Note: IListRef]
14
+ * Wrapper around different API containers (e.g. boxed and unboxed).
15
+ *
16
+ * What is it?
17
+ * ===========
18
+ * It is a tagged union of both boxed and unboxed API containers.
19
+ * Working implementations:
20
+ *
21
+ * - `IListRef<at::Tensor>`
22
+ * - `IListRef<at::OptionalTensorRef>`
23
+ *
24
+ * Note that `IListRef` is a view type. Meaning that it won't own the
25
+ * tensors it holds. It's intended to be used only as argument parameters.
26
+ * Specifically, where these 2 worlds overlap.
27
+ *
28
+ * What is this for?
29
+ * =================
30
+ * Historically, PyTorch has maintained 2 different APIs: the unboxed
31
+ * (called from C++ API and Python eager mode) and boxed APIs (called
32
+ * from the TorchScript JIT, mobile interpreter, and boxed fallbacks).
33
+ *
34
+ * Calling unboxed kernels from the boxed "world" and vice-versa may
35
+ * result in non-negligible overhead. Lists are one of those types:
36
+ *
37
+ * - Boxed world: `c10::List`
38
+ * - Unboxed world: `c10::ArrayRef`
39
+ *
40
+ * In this context, `c10::IListRef` solves this problem by wrapping those
41
+ * 2 container types, so that we don't need to convert from one to
42
+ * the other.
43
+ *
44
+ * (see https://github.com/pytorch/pytorch/issues/66328)
45
+ *
46
+ * What does it do?
47
+ * ================
48
+ * This container wraps around the different tagged containers
49
+ * (currently, only boxed and unboxed), without incurring in extra
50
+ * overhead for converting from one to another. It does so while
51
+ * exposing usual container methods, which dispatch to corresponding
52
+ * implementations.
53
+ *
54
+ * While it works with different container types, it introduces
55
+ * overhead for repeatedly calling member functions (since those will
56
+ * get dispatched, again). Therefore, you should only use it to iterate
57
+ * through the list up to one time. If you need to do more complex things,
58
+ * call `materialize()` first.
59
+ *
60
+ * Adding support for a new Tag
61
+ * ============================
62
+ * Suppose we want to add a new tag: `Chest`. Here are the steps
63
+ * we would have to go through:
64
+ *
65
+ * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`.
66
+ *
67
+ * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
68
+ * ...
69
+ * _(Chest, ##__VA_ARGS__)
70
+ *
71
+ * 2. Add type aliases, union members, and constructors.
72
+ *
73
+ * template <typename T>
74
+ * class IListRef {
75
+ * ...
76
+ * using chest_type =
77
+ * typename detail::IListRefTagImpl<T, IListRefTag::Chest>::list_type;
78
+ * ...
79
+ * IListRef(...) : tag_(IListRefTag::Chest) {
80
+ * ...
81
+ * }
82
+ * ...
83
+ * union Payload {
84
+ * ...
85
+ * chest_type chest;
86
+ * ...
87
+ * };
88
+ * ...
89
+ * };
90
+ *
91
+ * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's
92
+ * preferable to make the default implementation work for `T = Tensor`
93
+ * (both `Unboxed` and `Boxed` do it).
94
+ *
95
+ * template <typename T, typename ListElemT>
96
+ * class IListRefTagImplBase<IListRefTag::Chest, T, ListElemT> {
97
+ * public:
98
+ * using elem_type = ListElemT;
99
+ * using list_type = ChestContainer<elem_type>;
100
+ *
101
+ * static const list_type& unwrap(const IListRef<T>& ilist) { ... }
102
+ *
103
+ * static typename list_type::const_iterator& unwrap(
104
+ * IListRefIterator<T>& it) { ... }
105
+ *
106
+ * static const typename list_type::const_iterator& unwrap(
107
+ * const IListRefIterator<T>& it) { ... }
108
+ *
109
+ * static IListRefConstRef<T> iterator_get(
110
+ * const typename list_type::const_iterator& it) { ... }
111
+ * }
112
+ *
113
+ * 4. Add an specialization for each of the already supported types.
114
+ * Finally, for consistency, add them to the tracking list.
115
+ * (see [Note: IListRefTagImpl Specializations])
116
+ *
117
+ * template <>
118
+ * class IListRefTagImpl<IListRefTag::Chest, at::Tensor>
119
+ * : public IListRefTagImplBase<IListRefTag::Chest, at::Tensor> {};
120
+ *
121
+ * Adding support for a new Type
122
+ * =============================
123
+ * Suppose we want to add support for a new type: `Matrix`.
124
+ * Here are the steps we would have to go through:
125
+ *
126
+ * 1. Add an specialization for each of the existing tags.
127
+ * For consistency, add them to the tracking list.
128
+ * (see [Note: IListRefTagImpl Specializations])
129
+ *
130
+ * template <>
131
+ * class IListRefTagImpl<IListRefTag::Unboxed, Matrix>
132
+ * : public IListRefTagImplBase<IListRefTag::Unboxed, Matrix> {};
133
+ *
134
+ * template <>
135
+ * class IListRefTagImpl<Matrix, IListRefTag::Boxed>
136
+ * : public IListRefTagImplBase<IListRefTag::Boxed, Matrix> {};
137
+ *
138
+ * Common Problems
139
+ * ===============
140
+ * 1. One of `IListRef(Iterator)` methods are failing to compile.
141
+ *
142
+ * That may be happening because the container type you added
143
+ * is not compatible with the code written for that method. If
144
+ * that's true, then you might have to transform that code into
145
+ * a static method call (see `List::operator[]` method).
146
+ *
147
+ * 2. Can't make `IListRefIterator<T>::operator*` return a const-reference.
148
+ *
149
+ * First, keep in mind that we assume that boxed containers will
150
+ * have to deal with `IValue` (e.g. `c10::List`). In this context,
151
+ * what may be happening is that `IValue` doesn't store internally
152
+ * your type `T`. Instead, it constructs a type new `T` everytime
153
+ * you try to get `T` for it (see `IListRef<at::OptinalTensorRef>`).
154
+ */
155
+
156
+ namespace c10 {
157
+ template <typename T>
158
+ class IListRef;
159
+
160
+ /*
161
+ * Applies arbitrary macros to each `IListRefTag`.
162
+ */
163
+ #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
164
+ _(Unboxed, ##__VA_ARGS__) \
165
+ _(Boxed, ##__VA_ARGS__) \
166
+ _(Materialized, ##__VA_ARGS__)
167
+
168
+ /*
169
+ * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`,
170
+ * while bringing to scope:
171
+ *
172
+ * - `ImplT`: the implementation class for `TAG`
173
+ * - `this_`: the result of unwrapping `this`
174
+ */
175
+ #define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \
176
+ case c10::IListRefTag::TAG: { \
177
+ using ImplT = c10::detail::IListRefTagImpl<IListRefTag::TAG, T>; \
178
+ auto& this_ = ImplT::unwrap(*this); \
179
+ BODY \
180
+ } break;
181
+
182
+ /*
183
+ * Dispatches the unwrap call, depending on `TAG`, followed by
184
+ * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`.
185
+ *
186
+ * This macro is useful because it allows us to handle different
187
+ * types (that correspond to different tags) to be implemented
188
+ * only once. We can do it even when the implementation of the
189
+ * different tags aren't syntatically the same, by dispatching
190
+ * it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
191
+ */
192
+ #define TORCH_ILISTREF_UNWRAP(TAG, BODY) \
193
+ switch (TAG) { \
194
+ TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
195
+ break; \
196
+ default: \
197
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \
198
+ }
199
+
200
+ enum class IListRefTag {
201
+ #define DEFINE_TAG(tag, ...) tag,
202
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG)
203
+ #undef DEFINE_TAG
204
+ None
205
+ };
206
+
207
+ namespace detail {
208
+ /*
209
+ * Type alias that specifies whether we return a reference or a copy of `T`.
210
+ *
211
+ * What is this for?
212
+ * =================
213
+ * Since values in the boxed world are represented by an `IValue`, we also
214
+ * depend on whether it can be converted to a const-reference (`Tensor`) or
215
+ * has to create a new copy of `T` (`OptionalTensorRef`).
216
+ */
217
+ template <typename T>
218
+ using IListRefConstRef = typename ivalue_to_const_ref_overload_return<T>::type;
219
+
220
+ /*
221
+ * Interface that implements key functions for each `IListRefTag` type.
222
+ *
223
+ * What is this for?
224
+ * =================
225
+ * Given an `IListRef(Iterator)<T>`, some methods have to be implemented
226
+ * differently for each `TAG`. Therefore, the methods inside this class
227
+ * are used as dispatch targets for the different `IListRefTag` values.
228
+ *
229
+ * You should create an specialization of this class for each possible
230
+ * combination of `IListRefTag` type (except `None`) and element types
231
+ * (e.g. `Tensor`).
232
+ *
233
+ * What does it do?
234
+ * ================
235
+ * 1. defines static methods to be used as dispatch targets by both
236
+ * `IListRef<T>` and `IListRefIterator<T>` (see the implementation of
237
+ * `IListRefTagImplBase`).
238
+ *
239
+ * 2. defines the `elem_type` and `list_type` aliases that will be
240
+ * used in the definition of `IListRef<T>`. In general, we should do
241
+ * so by inheriting from `IListRefTagImplBase<TAG, T, ListElemT>`.
242
+ *
243
+ * [Note: IListRefTagImpl Specialization]
244
+ * ======================================
245
+ * For `IListRef(Iterator)<at::Tensor>`:
246
+ * - <IListRefTag::Unboxed, at::Tensor>
247
+ * - <IListRefTag::Boxed, at::Tensor>
248
+ * - <IListRefTag::Materialized, at::Tensor>
249
+ *
250
+ * For `IListRef(Iterator)<at::OptionalTensorRef>`:
251
+ * - <IListRefTag::Unboxed, at::OptionalTensorRef>
252
+ * - <IListRefTag::Boxed, at::OptionalTensorRef>
253
+ * - <IListRefTag::Materialized, at::OptionalTensorRef>
254
+ */
255
+ template <IListRefTag TAG, typename T>
256
+ class IListRefTagImpl {};
257
+
258
+ /*
259
+ * Base implementation of `IListRefTagImpl<TAG, T>` methods.
260
+ *
261
+ * What is this for?
262
+ * =================
263
+ * This should make adding specializations for new types easier. For
264
+ * example, one should be able to add a new type just by making its
265
+ * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`.
266
+ *
267
+ * You should create a partial specialization for this class only if
268
+ * you introduce a new `IListRefTag`. The idea being that there is one
269
+ * default implementation for each possible value of `IListRefTag`.
270
+ *
271
+ * What does it do?
272
+ * ================
273
+ * 1. defines `elem_type` as an alias to `ListElemT`.
274
+ *
275
+ * 1. defines `list_type` as an alias to the default container type
276
+ * that will hold a collection of `elem_type`. The idea being that
277
+ * all types tagged as `TAG` will have `list_type` as its container,
278
+ * with different `elem_type`.
279
+ *
280
+ * 3. defines the default implementation for each of the methods that
281
+ * are supposed to be defined on `IListRefTagImpl` specializations.
282
+ *
283
+ * 4. inheriting from `IListRefTagImplBase<TAG, T, ListElemT>` also means
284
+ * that the payload of the type `IListRef<T>` will be of type `list_type`
285
+ * when it is tagged as `TAG`.
286
+ */
287
+ template <IListRefTag TAG, typename T, typename ListElemT = T>
288
+ class IListRefTagImplBase {};
289
+
290
+ /*
291
+ * Materialized container for `IListRef<T>`.
292
+ *
293
+ * What is this for?
294
+ * =================
295
+ * Container that groups `T` references together. This exchanges the
296
+ * overhead of every method call from `IListRef<T>` for a dynamic allocation.
297
+ *
298
+ * You should use this container instead of `IListRef<T>` if:
299
+ *
300
+ * - You are going to iterate the list more than once
301
+ * - You need to repeatedly access arbitrary elements (using `operator[]`)
302
+ * What does it do?
303
+
304
+ * ================
305
+ * Removes the reference (&) from the type, and wraps it into a
306
+ * `std::reference_wrapper`. If `IListRefConstRef<T>` is not a
307
+ * reference type, then it's left unchanged.
308
+ */
309
+ template <typename T>
310
+ using _MaterializedIListRefElem = typename std::conditional<
311
+ std::is_reference<T>::value,
312
+ typename std::reference_wrapper<typename std::remove_reference<T>::type>,
313
+ T>::type;
314
+
315
+ template <typename T>
316
+ using MaterializedIListRefElem = _MaterializedIListRefElem<IListRefConstRef<T>>;
317
+
318
+ template <typename T>
319
+ using MaterializedIListRef = std::vector<MaterializedIListRefElem<T>>;
320
+
321
+ } // namespace detail
322
+
323
+ /*
324
+ * Iterator for `IListRef<T>`.
325
+ *
326
+ * What is it?
327
+ * ===========
328
+ * Currently, a `std::bidirectional_iterator` that wraps the iterator
329
+ * types defined for each of the `IListRefTag`.
330
+ *
331
+ * One should be able to use it, as if it were the unwrapped
332
+ * iterators themselves.
333
+
334
+ * What does it do?
335
+ * ================
336
+ * Similarly to `IListRef<T>`, this is a wrapper class. Specifically, it
337
+ * wraps each container's `const_iterator` type alias. So, for example,
338
+ * given that the container for `IListRefTag::Boxed` is `c10::List`, this
339
+ * iterator will wrap a `c10::List::const_iterator`.
340
+ *
341
+ * [Note: MSVC Iterator Debug]
342
+ * ===========================
343
+ * MSVC `vector<T>::iterator` implementation (used in the boxed variant)
344
+ * makes it so this union's destructor, copy-constructor (assignment), and
345
+ * move-constructor (assignment) are implicitly deleted.
346
+ *
347
+ * Therefore, we need to explicitly define them as needed. Follows a list
348
+ * of places where these are needed and their reason:
349
+ *
350
+ * - `Payload` destructor:
351
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2.
352
+ *
353
+ * - `IListRefIterator` destructor:
354
+ * same as above. However, we need to explicitly call the variant
355
+ * destructor explicitly.
356
+ *
357
+ * - `IListRefIterator` copy-constructor:
358
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different
359
+ * than 0.
360
+ */
361
+ template <typename T>
362
+ class IListRefIterator {
363
+ private:
364
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
365
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
366
+ friend class detail::IListRefTagImplBase< \
367
+ IListRefTag::TAG, \
368
+ T, \
369
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
370
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
371
+ #undef DEFINE_FRIEND_CLASS
372
+
373
+ public:
374
+ // C++17 friendly std::iterator implementation
375
+ using iterator_category = std::bidirectional_iterator_tag;
376
+ using value_type = T;
377
+ using difference_type = std::ptrdiff_t;
378
+ using pointer = T*;
379
+ using reference = T&;
380
+
381
+ using unboxed_iterator_type = typename detail::
382
+ IListRefTagImpl<IListRefTag::Unboxed, T>::list_type::const_iterator;
383
+ using boxed_iterator_type = typename detail::
384
+ IListRefTagImpl<IListRefTag::Boxed, T>::list_type::const_iterator;
385
+ using materialized_iterator_type =
386
+ typename detail::MaterializedIListRef<T>::const_iterator;
387
+
388
+ IListRefIterator() : tag_(IListRefTag::None) {}
389
+
390
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0
391
+ // See [Note: MSVC Iterator Debug]
392
+ IListRefIterator(const IListRefIterator& iterator)
393
+ : tag_(iterator.tag_) {
394
+ switch (tag_) {
395
+ case IListRefTag::Boxed:
396
+ payload_.boxed_iterator = iterator.payload_.boxed_iterator;
397
+ break;
398
+ case IListRefTag::Unboxed:
399
+ payload_.unboxed_iterator = iterator.payload_.unboxed_iterator;
400
+ break;
401
+ case IListRefTag::Materialized:
402
+ payload_.materialized_iterator = iterator.payload_.materialized_iterator;
403
+ break;
404
+ default:
405
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
406
+ }
407
+ }
408
+ #endif
409
+
410
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2
411
+ // See [Note: MSVC Iterator Debug]
412
+ ~IListRefIterator() noexcept(false) {
413
+ switch (tag_) {
414
+ case IListRefTag::Boxed:
415
+ payload_.boxed_iterator.~boxed_iterator_type();
416
+ break;
417
+ case IListRefTag::Unboxed:
418
+ payload_.unboxed_iterator.~unboxed_iterator_type();
419
+ break;
420
+ case IListRefTag::Materialized:
421
+ payload_.materialized_iterator.~materialized_iterator_type();
422
+ break;
423
+ default:
424
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
425
+ }
426
+ }
427
+ #endif
428
+
429
+ IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) {
430
+ payload_.boxed_iterator = boxed;
431
+ }
432
+
433
+ IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) {
434
+ payload_.unboxed_iterator = unboxed;
435
+ }
436
+
437
+ IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) {
438
+ payload_.materialized_iterator = materialized;
439
+ }
440
+
441
+ detail::IListRefConstRef<T> operator*() const {
442
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); });
443
+ }
444
+
445
+ IListRefIterator& operator++() {
446
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
447
+ return *this;
448
+ }
449
+
450
+ IListRefIterator operator++(int) {
451
+ auto old = *this;
452
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
453
+ return old;
454
+ }
455
+
456
+ IListRefIterator& operator--() {
457
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
458
+ return *this;
459
+ }
460
+
461
+ IListRefIterator operator--(int) {
462
+ auto old = *this;
463
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
464
+ return old;
465
+ }
466
+
467
+ bool operator==(const IListRefIterator& rhs) const {
468
+ if (tag_ != rhs.tag_) {
469
+ return false;
470
+ }
471
+ TORCH_ILISTREF_UNWRAP(tag_, {
472
+ auto& rhs_it = ImplT::unwrap(rhs);
473
+ return this_ == rhs_it;
474
+ });
475
+ }
476
+
477
+ bool operator!=(const IListRefIterator& rhs) const {
478
+ return !(*this == rhs);
479
+ }
480
+
481
+ private:
482
+ union Payload {
483
+ boxed_iterator_type boxed_iterator;
484
+ unboxed_iterator_type unboxed_iterator;
485
+ materialized_iterator_type materialized_iterator;
486
+ void* _init_ptr;
487
+ Payload() : _init_ptr(nullptr) {}
488
+ #if defined(_MSC_VER)
489
+ // See [Note: MSVC Iterator Debug]
490
+ ~Payload() {}
491
+ #endif
492
+ };
493
+
494
+ Payload payload_;
495
+ IListRefTag tag_;
496
+ };
497
+
498
+ /*
499
+ * See [Note: IListRef]
500
+ */
501
+ template <typename T>
502
+ class IListRef {
503
+ private:
504
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
505
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
506
+ friend class detail::IListRefTagImplBase< \
507
+ IListRefTag::TAG, \
508
+ T, \
509
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
510
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
511
+ #undef DEFINE_FRIEND_CLASS
512
+
513
+ public:
514
+ using unboxed_type =
515
+ typename detail::IListRefTagImpl<IListRefTag::Unboxed, T>::list_type;
516
+ using boxed_type =
517
+ typename detail::IListRefTagImpl<IListRefTag::Boxed, T>::list_type;
518
+ using materialized_type =
519
+ typename detail::MaterializedIListRef<T>;
520
+
521
+ using iterator = IListRefIterator<T>;
522
+ using const_iterator = IListRefIterator<T>;
523
+ using reverse_iterator = std::reverse_iterator<iterator>;
524
+ using value_type = typename iterator::value_type;
525
+
526
+ IListRef() : tag_(IListRefTag::None) {}
527
+
528
+ IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) {
529
+ payload_.boxed = &boxed;
530
+ }
531
+
532
+ IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) {
533
+ payload_.unboxed = unboxed;
534
+ }
535
+
536
+ IListRef(const std::initializer_list<T>& list) : tag_(IListRefTag::Unboxed) {
537
+ payload_.unboxed = at::ArrayRef<T>(list);
538
+ }
539
+
540
+ template <
541
+ typename... UnboxedConstructorArgs,
542
+ typename = std::enable_if_t<
543
+ std::is_constructible<unboxed_type, UnboxedConstructorArgs...>::value>>
544
+ IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) {
545
+ payload_.unboxed = unboxed_type(std::forward<UnboxedConstructorArgs>(args)...);
546
+ }
547
+
548
+ IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) {
549
+ payload_.materialized = &materialized;
550
+ }
551
+
552
+ size_t size() const {
553
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); });
554
+ }
555
+
556
+ bool empty() const {
557
+ return size() == 0;
558
+ }
559
+
560
+ iterator begin() const {
561
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); });
562
+ }
563
+
564
+ iterator end() const {
565
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); });
566
+ }
567
+
568
+ detail::IListRefConstRef<T> front() const {
569
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); });
570
+ }
571
+
572
+ /*
573
+ * Materializes the `IListRef` into a `std::vector`.
574
+ *
575
+ * This should be used when one wishes to either:
576
+ *
577
+ * - iterate over the list more than once: each `IListRefIterator`
578
+ * member function call has to go through a switch, introducing
579
+ * non-negligible overhead
580
+ *
581
+ * - randomly access an arbitrary element using `operator[]`:
582
+ * same reason as above
583
+ */
584
+ detail::MaterializedIListRef<T> materialize() const {
585
+ if (isMaterialized()) {
586
+ return toMaterialized();
587
+ }
588
+
589
+ detail::MaterializedIListRef<T> materialized;
590
+ materialized.reserve(size());
591
+ for (const auto& t : *this) {
592
+ materialized.emplace_back(t);
593
+ }
594
+ return materialized;
595
+ }
596
+
597
+ #define DEFINE_CHECK(TAG, ...) \
598
+ bool is##TAG() const { \
599
+ return tag_ == IListRefTag::TAG; \
600
+ }
601
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK);
602
+ #undef DEFINE_CHECK
603
+
604
+ bool isNone() const {
605
+ return tag_ == IListRefTag::None;
606
+ }
607
+
608
+ #define DEFINE_CASTING(TAG, ...) \
609
+ const typename detail::IListRefTagImpl<IListRefTag::TAG, T>::list_type& \
610
+ to##TAG() const { \
611
+ TORCH_INTERNAL_ASSERT(is##TAG()); \
612
+ return detail::IListRefTagImpl<IListRefTag::TAG, T>::unwrap(*this); \
613
+ }
614
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING);
615
+ #undef DEFINE_CASTING
616
+
617
+ private:
618
+ union Payload {
619
+ const boxed_type* boxed;
620
+ unboxed_type unboxed;
621
+ const materialized_type* materialized;
622
+ Payload() : boxed(nullptr) {}
623
+ };
624
+
625
+ Payload payload_;
626
+ IListRefTag tag_;
627
+ };
628
+
629
+ } // namespace c10
630
+
631
+ #include <ATen/core/IListRef_inl.h>
venv/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // The legacy mechanism for dispatching operators in ATen is a Type
4
+ // object, which is essentially a giant virtual dispatch table
5
+ // for every operation we support dynamically dispatching over.
6
+ //
7
+ // This has been deprecated in favor of ATenDispatch, and in the future,
8
+ // c10 dispatcher.
9
+ // TODO: Clean up what remains here
10
+
11
+ #include <c10/core/impl/LocalDispatchKeySet.h>
12
+
13
+ namespace at {
14
+
15
+ // A RAII, thread local (!) guard that will disable dispatch to variable
16
+ // handler.
17
+ //
18
+ // NOTE [ Treating Variables as non-Variables in type dispatch ]
19
+ //
20
+ // What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes
21
+ // dispatches on ATen functions to go to the non-variable implementation,
22
+ // bypassing autograd handling (and also profiling and tracing).
23
+ //
24
+ // To understand why this guard exists, it's helpful to understand the history
25
+ // behind how Variable was implemented. Previously, Variables were implemented
26
+ // as a wrapper on Tensors; so the act of processing a Variable involved
27
+ // unwrapping the underlying Tensor, and then calling the underlying base
28
+ // operation on /that/ operation
29
+ //
30
+ // However, after the Variable/Tensor merge, there is no concept of unwrapping
31
+ // a tensor anymore. If you just call the operation on the same variable
32
+ // again inside your VariableType handler, you'll dispatch back to
33
+ // VariableType, which is not what we want.
34
+ //
35
+ // The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which
36
+ // when enabled will cause `legacyTensorType()` and `getType()` to always return
37
+ // non-Variable type, even if the tensor being called on is a variable.
38
+
39
+ /* Note [AutoDispatchBelowAutograd]
40
+ * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used
41
+ * for kernel implementations and customized C++ kernels.
42
+ * If you are looking for a guard to run workload in inference mode, please use
43
+ * c10::InferenceMode RAII which is user facing API.
44
+ * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode)
45
+ * was used in the user code for inference-only workload, this was under risk of
46
+ * producing wrong results silently in some edge cases. For example:
47
+ * ```
48
+ * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true);
49
+ * torch::Tensor out = s * s;
50
+ * {
51
+ * at::AutoDispatchBelowAutograd guard;
52
+ * s.add_(1); // Skips version bump on `s`.
53
+ * }
54
+ * // WRONG GRADIENT! s.grad() are now computed using `s` value after the
55
+ * // inplace update.
56
+ * out.backward(torch::ones_like(out));
57
+ * ```
58
+ * Users should use `c10::InferenceMode` here so that it'll properly throw an
59
+ * error saying "one of the variables needed for gradient computation has be modified."
60
+ */
61
+ struct TORCH_API AutoDispatchBelowAutograd {
62
+ AutoDispatchBelowAutograd() :
63
+ autograd_guard_(c10::autograd_dispatch_keyset) {
64
+ }
65
+
66
+ // disable all autograd dispatch keys
67
+ c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
68
+ };
69
+
70
+ // TODO: AutoNonVariableTypeMode should be removed in release 1.10.
71
+ struct TORCH_API AutoNonVariableTypeMode {
72
+ AutoNonVariableTypeMode(bool enabled = true) :
73
+ autograd_guard_(c10::autograd_dispatch_keyset) {
74
+ TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. "
75
+ "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, "
76
+ "If you are looking for a user facing API to enable running your inference-only "
77
+ "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code "
78
+ "is under risk of producing silent wrong result in some edge cases. "
79
+ "See Note [AutoDispatchBelowAutograd] for more details.");
80
+ TORCH_INTERNAL_ASSERT(enabled);
81
+ }
82
+
83
+ // disable all autograd dispatch keys
84
+ c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
85
+ };
86
+
87
+ struct TORCH_API AutoDispatchSkipFunctionalize {
88
+ AutoDispatchSkipFunctionalize() :
89
+ dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) {
90
+ }
91
+ c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
92
+ };
93
+
94
+ /* Note [AutoDispatchBelowADInplaceOrView]
95
+ * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode
96
+ * before we split inplace & view ops out of VariableType kernel.
97
+ * Note this guard is used in VariableType kernels for functional ops
98
+ * as well as ADInplaceOrView kernels for inplace/view ops to enforce the
99
+ * Invariant:
100
+ * Once you are in VariableType/ADInplaceOrView kernel for an op,
101
+ * you never go back to a kernel on same dispatch key until
102
+ * you finish the current op.
103
+ */
104
+ struct TORCH_API AutoDispatchBelowADInplaceOrView {
105
+ AutoDispatchBelowADInplaceOrView() :
106
+ dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) {
107
+ }
108
+ // disable Autograd & ADInplaceOrView dispatch keys
109
+ c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
110
+ };
111
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/NestedIntSymNodeImpl.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ConstantSymNodeImpl.h>
4
+ #include <c10/core/SymNodeImpl.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <cstdint>
10
+ #include <string>
11
+
12
+ namespace c10 {
13
+
14
+ // The motivating usecase for this is to represent the ragged size structure
15
+ // of a jagged tensor [B, [s_0, s_1, s_2], D] as a single integer j0. This
16
+ // allows us to simply return [B, j0, D] if someone queries for the size of our
17
+ // tensor.
18
+ //
19
+ // Morally we define comparison between two nested ints to return true if
20
+ // that comparison holds for all corresponding elements of the arrays they
21
+ // represent. Comparison between a nested int and a plain int is defined
22
+ // similarly.
23
+ //
24
+ // To simulate this desired behavior but also avoid the O(N) cost of checking,
25
+ // we associate each raggedness pattern with an integer "id" that can be used as
26
+ // a proxy to evaluate equality. We also constrain the range of values for this
27
+ // as to enable inequality checks.
28
+ //
29
+ // We also support a positive integer scalar "coeff" that is used for computing
30
+ // strides. For example given, a [B, j0, D] tensor, it can be strided in two
31
+ // different ways: [D * j0, D, 1] and [j0, 1, sum(j0)]. The coeff is used to
32
+ // differentiate the two cases.
33
+ //
34
+ // During tracing the strides of the outputs need to be a function of the size
35
+ // and strides of the inputs so it is important that NestedIntSymNode itself is
36
+ // able to express this.
37
+ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
38
+ public:
39
+ // CAUTION: you should probably not be constructing these directly; please
40
+ // the higher-level API in python instead (TODO: actually introduce that).
41
+ explicit NestedIntSymNodeImpl(int64_t val, int64_t coeff)
42
+ : val_(val), coeff_(coeff) {}
43
+
44
+ bool bool_() override {
45
+ return false;
46
+ }
47
+
48
+ bool is_int() override {
49
+ return true;
50
+ }
51
+
52
+ bool is_float() override {
53
+ return false;
54
+ }
55
+
56
+ bool is_bool() override {
57
+ return false;
58
+ }
59
+
60
+ bool is_nested_int() const override {
61
+ return true;
62
+ }
63
+
64
+ bool has_hint() override {
65
+ return true;
66
+ }
67
+
68
+ c10::SymNode wrap_int(int64_t num) override {
69
+ return SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(num));
70
+ };
71
+
72
+ int64_t guard_int(const char* file, int64_t line) override {
73
+ TORCH_CHECK(false);
74
+ }
75
+
76
+ double guard_float(const char* file, int64_t line) override {
77
+ TORCH_CHECK(false, "not a float");
78
+ }
79
+
80
+ bool guard_bool(const char* file, int64_t line) override {
81
+ TORCH_CHECK(false, "not a bool");
82
+ }
83
+
84
+ int64_t int_() override {
85
+ TORCH_CHECK(false);
86
+ }
87
+
88
+ std::string str() override {
89
+ if (coeff_ == 1) {
90
+ return "j" + std::to_string(val_);
91
+ }
92
+ return std::to_string(coeff_) + "*j" + std::to_string(val_);
93
+ }
94
+
95
+ // NOTE [ Inequalities with nested int ]
96
+ //
97
+ // The semantics of nested int when it comes to relations is that it is
98
+ // treated as integer known to be within a certain range,
99
+ //
100
+ // j0 \in [2, int64_t::max]
101
+ //
102
+ // allowing us to answer queries like j0 >= 1 (True), and j0 == 0 (False).
103
+ // This is a useful default range for the raggedness pattern of a jagged
104
+ // tensor (1) since sizes are non-negative, and (2) we need to get past 0/1
105
+ // specialization checks.
106
+ //
107
+ // [ Indeterminate inequalities error out ]
108
+ //
109
+ // Given the semantic defined above, certain relations like j0 < 3 are thus
110
+ // indeterminable. In our impl today, evaluating such relations error
111
+ //
112
+ // It may seem convenient to just define indeterminate relations to return
113
+ // False, but the implementation we maintain in parallel using sympy does not
114
+ // allow this.
115
+ //
116
+ // Sympy only allows overriding of Ge. The other relations (Lt, Gt, Le) are,
117
+ // by consequence, all derived from Ge e.g., Lt(a, b) := !Ge(a, b). This
118
+ // would mean that means that if we define the indeterminate j0 >= 3 to be
119
+ // False, the also indeterminate j0 < 3 will be evaluated to be True!
120
+ //
121
+ // [ Coefficient are assumed positive ]
122
+ //
123
+ // For the purpose of computing inequalities, we consider the coefficient of
124
+ // the nested int to be a positive integer.
125
+ //
126
+ // Thus, no modifications are needed to the logic since
127
+ // j0 >= k implies coeff * j0 >= k
128
+ //
129
+ c10::SymNode eq(const c10::SymNode& other) override;
130
+ c10::SymNode ne(const c10::SymNode& other) override;
131
+ c10::SymNode ge(const c10::SymNode& other) override;
132
+ c10::SymNode gt(const c10::SymNode& other) override;
133
+ c10::SymNode lt(const c10::SymNode& other) override;
134
+ c10::SymNode le(const c10::SymNode& other) override;
135
+ c10::SymNode mul(const c10::SymNode& other) override;
136
+
137
+ c10::optional<int64_t> nested_int() override {
138
+ return val_;
139
+ }
140
+
141
+ c10::optional<int64_t> nested_int_coeff() override {
142
+ return coeff_;
143
+ }
144
+
145
+ bool is_symbolic() override {
146
+ return false;
147
+ }
148
+
149
+ #define DEFINE_BINARY_NOT_SUPPORTED(name) \
150
+ c10::SymNode name(const c10::SymNode& other) override { \
151
+ TORCH_CHECK(false, #name " not supported by NestedIntSymNode"); \
152
+ }
153
+
154
+ DEFINE_BINARY_NOT_SUPPORTED(add)
155
+ DEFINE_BINARY_NOT_SUPPORTED(sub)
156
+ DEFINE_BINARY_NOT_SUPPORTED(truediv)
157
+ DEFINE_BINARY_NOT_SUPPORTED(pow)
158
+ DEFINE_BINARY_NOT_SUPPORTED(floordiv)
159
+ DEFINE_BINARY_NOT_SUPPORTED(mod)
160
+ DEFINE_BINARY_NOT_SUPPORTED(sym_min)
161
+ DEFINE_BINARY_NOT_SUPPORTED(sym_max)
162
+ DEFINE_BINARY_NOT_SUPPORTED(sym_and)
163
+ DEFINE_BINARY_NOT_SUPPORTED(sym_or)
164
+
165
+ #undef DEFINE_BINARY_NOT_SUPPORTED
166
+
167
+ #define DEFINE_NOT_SUPPORTED(name) \
168
+ c10::SymNode name() override { \
169
+ TORCH_CHECK(false, #name " is not supported by NestedIntSymNode"); \
170
+ }
171
+
172
+ DEFINE_NOT_SUPPORTED(sym_not)
173
+ DEFINE_NOT_SUPPORTED(ceil)
174
+ DEFINE_NOT_SUPPORTED(floor)
175
+ DEFINE_NOT_SUPPORTED(neg)
176
+ DEFINE_NOT_SUPPORTED(clone)
177
+ DEFINE_NOT_SUPPORTED(sym_float)
178
+
179
+ #undef DEFINE_NOT_SUPPORTED
180
+
181
+ private:
182
+ int64_t val_;
183
+ int64_t coeff_;
184
+ };
185
+
186
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/Range.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <iosfwd>
5
+
6
+ namespace at {
7
+
8
+ struct Range {
9
+ Range(int64_t begin, int64_t end)
10
+ : begin(begin)
11
+ , end(end) {}
12
+
13
+ int64_t size() const { return end - begin; }
14
+
15
+ Range operator/(int64_t divisor) {
16
+ return Range(begin / divisor, end / divisor);
17
+ }
18
+
19
+ int64_t begin;
20
+ int64_t end;
21
+ };
22
+
23
+ std::ostream& operator<<(std::ostream& out, const Range& range);
24
+
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBody.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ namespace at {
7
+ class TORCH_API OptionalTensorRef {
8
+ public:
9
+ OptionalTensorRef() = default;
10
+
11
+ ~OptionalTensorRef() {
12
+ ref_.unsafeReleaseTensorImpl();
13
+ }
14
+
15
+ OptionalTensorRef(const TensorBase& src)
16
+ : ref_(Tensor::unsafe_borrow_t{}, src) {
17
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined());
18
+ }
19
+
20
+ OptionalTensorRef(const OptionalTensorRef& rhs)
21
+ : ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {}
22
+
23
+ OptionalTensorRef& operator=(OptionalTensorRef rhs) {
24
+ std::swap(ref_, rhs.ref_);
25
+ return *this;
26
+ }
27
+
28
+ bool has_value() const {
29
+ return ref_.defined();
30
+ }
31
+
32
+ const Tensor& getTensorRef() const & {
33
+ return ref_;
34
+ }
35
+
36
+ const Tensor& operator*() const & {
37
+ return ref_;
38
+ }
39
+
40
+ const Tensor* operator->() const & {
41
+ return &ref_;
42
+ }
43
+
44
+ operator bool() const {
45
+ return ref_.defined();
46
+ }
47
+
48
+ private:
49
+ Tensor ref_;
50
+ };
51
+
52
+ // Use to convert a TensorBase (that may be undefined) to an at::Tensor
53
+ // without bumping refcount.
54
+ class TORCH_API TensorRef {
55
+ public:
56
+ ~TensorRef() {
57
+ ref_.unsafeReleaseTensorImpl();
58
+ }
59
+
60
+ TensorRef(const TensorBase& src)
61
+ : ref_(Tensor::unsafe_borrow_t{}, src) {}
62
+
63
+ const Tensor& operator*() const & {
64
+ return ref_;
65
+ }
66
+ private:
67
+ Tensor ref_;
68
+ };
69
+
70
+ template <typename T>
71
+ auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
72
+ // Return the grad argument in case of a hook with void return type to have an
73
+ // std::function with Tensor return type
74
+ static_assert(std::is_same<decltype(hook(Tensor())), void>::value,
75
+ "Expected hook to return void");
76
+ return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
77
+ TensorRef grad(grad_base);
78
+ fn(*grad);
79
+ return Tensor();
80
+ });
81
+ }
82
+
83
+ template <typename T>
84
+ auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> {
85
+ return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
86
+ TensorRef grad(grad_base);
87
+ Tensor ret = fn(*grad);
88
+ return TensorBase(std::move(ret));
89
+ });
90
+ }
91
+
92
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h ADDED
@@ -0,0 +1,1055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/core/Layout.h>
5
+ #include <c10/core/MemoryFormat.h>
6
+ #include <c10/core/ScalarType.h>
7
+ #include <c10/core/ScalarTypeToTypeMeta.h>
8
+ #include <c10/core/Storage.h>
9
+ #include <c10/core/SymIntArrayRef.h>
10
+ #include <c10/core/TensorImpl.h>
11
+ #include <c10/core/TensorOptions.h>
12
+ #include <c10/core/UndefinedTensorImpl.h>
13
+ #include <c10/core/WrapDimMinimal.h>
14
+ #include <c10/util/C++17.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <c10/util/ExclusivelyOwned.h>
17
+ #include <c10/util/ExclusivelyOwnedTensorTraits.h>
18
+ #include <c10/util/MaybeOwned.h>
19
+ #include <c10/util/Optional.h>
20
+ #include <c10/util/intrusive_ptr.h>
21
+
22
+ #include <ATen/core/NamedTensor.h>
23
+ #include <ATen/core/QuantizerBase.h>
24
+ #include <ATen/core/TensorAccessor.h>
25
+ #include <ATen/StorageUtils.h>
26
+
27
+ namespace c10 {
28
+ class Scalar;
29
+ }
30
+
31
+ namespace torch { namespace autograd {
32
+
33
+ struct Node;
34
+
35
+ }} // namespace torch::autograd
36
+
37
+ namespace at {
38
+
39
+ class Tensor;
40
+ class TensorBase;
41
+
42
+ // Convert Tensor to TensorBase without any need to include Tensor.h
43
+ TORCH_API const TensorBase& get_tensor_base(const Tensor& t);
44
+
45
+ namespace impl {
46
+ inline bool variable_excluded_from_dispatch() {
47
+ #ifdef C10_MOBILE
48
+ // Please read the comment in `VariableFallbackKernel.cpp` about the background of this change.
49
+ return true;
50
+ #else
51
+ return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset);
52
+ #endif
53
+ }
54
+
55
+ }
56
+
57
+ // NOTE: [Tensor vs. TensorBase]
58
+ //
59
+ // Tensor, being the central data structure in PyTorch, gets used and
60
+ // it's header included almost everywhere. Unfortunately this means
61
+ // every time an operator signature is updated or changed in
62
+ // native_functions.yaml, you (and every other PyTorch developer) need
63
+ // to recompile all of ATen and it's dependencies.
64
+ //
65
+ // TensorBase aims to break up these header dependencies, and improve
66
+ // incremental build times for all PyTorch developers. TensorBase
67
+ // represents a reference counted handle to TensorImpl, exactly the
68
+ // same as Tensor. However, TensorBase doesn't have code generated
69
+ // methods in it's API and thus no dependence on native_functions.yaml.
70
+ //
71
+ // Usage tips
72
+ // ----------
73
+ // - You can `#define TORCH_ASSERT_NO_OPERATORS` at the top of a .cpp
74
+ // or .cu file to ensure it has no header dependencies on
75
+ // native_functions.yaml (direct or indirect).
76
+ // - Tensor inherits from TensorBase, so functions taking
77
+ // `const TensorBase &` are callable with Tensor as well.
78
+ // - TensorBase can be converted to tensor with `Tensor(tensor_base)`,
79
+ // but this requires a reference-count bump. OptionalTensorRef on
80
+ // the other hand can materialize a `const Tensor &` without
81
+ // touching the reference-count.
82
+ class TORCH_API TensorBase {
83
+ public:
84
+ struct unsafe_borrow_t { explicit unsafe_borrow_t() = default; };
85
+
86
+ protected:
87
+ // Create a Tensor with a +0 reference count. Special care must be
88
+ // taken to avoid decrementing this reference count at destruction
89
+ // time. Intended to support MaybeOwnedTraits<Tensor>.
90
+ explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs)
91
+ : impl_(c10::intrusive_ptr<at::TensorImpl, UndefinedTensorImpl>::reclaim(rhs.impl_.get())) {}
92
+ friend MaybeOwnedTraits<TensorBase>;
93
+
94
+ public:
95
+ TensorBase() = default;
96
+ // This constructor should not be used by end users and is an implementation
97
+ // detail invoked by autogenerated code.
98
+ explicit TensorBase(
99
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
100
+ : impl_(std::move(tensor_impl)) {
101
+ if (impl_.get() == nullptr) {
102
+ throw std::runtime_error("TensorImpl with nullptr is not supported");
103
+ }
104
+ }
105
+ TensorBase(const TensorBase&) = default;
106
+ TensorBase(TensorBase&&) noexcept = default;
107
+
108
+ public:
109
+ // Creates a new wrapper from TensorImpl. Intentionally a free method because
110
+ // it should be used with care. Checks necessary invariants
111
+ static TensorBase wrap_tensor_impl(
112
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
113
+ TensorBase r(std::move(tensor_impl));
114
+ r.enforce_invariants();
115
+ return r;
116
+ }
117
+
118
+ int64_t dim() const {
119
+ return impl_->dim();
120
+ }
121
+ int64_t storage_offset() const {
122
+ return impl_->storage_offset();
123
+ }
124
+
125
+ TensorBase contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
126
+ if (is_contiguous(memory_format)) {
127
+ return *this;
128
+ } else {
129
+ return __dispatch_contiguous(memory_format);
130
+ }
131
+ }
132
+
133
+ /// Should be used if *this can reasonably be expected to be contiguous and
134
+ /// performance is important.
135
+ /// Compared to contiguous, it saves a reference count
136
+ /// increment/decrement if *this is already contiguous, at the cost
137
+ /// in all cases of an extra pointer of stack usage, an extra branch
138
+ /// to access, and an extra branch at destruction time.
139
+ c10::MaybeOwned<TensorBase> expect_contiguous(
140
+ MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
141
+
142
+ // Use .contiguous() instead. Trying to borrow from a prvalue
143
+ // will only lead to trouble and dangling references.
144
+ c10::MaybeOwned<TensorBase> expect_contiguous(
145
+ MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
146
+
147
+ const TensorBase& fill_(const c10::Scalar& scalar) const;
148
+ const TensorBase& zero_() const;
149
+
150
+ TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
151
+
152
+ bool is_complex() const {
153
+ return at::isComplexType(this->scalar_type());
154
+ }
155
+
156
+ bool is_floating_point() const {
157
+ return at::isFloatingType(this->scalar_type());
158
+ }
159
+
160
+ bool is_signed() const {
161
+ return at::isSignedType(this->scalar_type());
162
+ }
163
+
164
+ c10::SymInt sym_size(int64_t dim) const {
165
+ return impl_->sym_size(dim);
166
+ }
167
+
168
+ c10::SymInt sym_stride(int64_t dim) const {
169
+ const auto sizes = this->sym_strides();
170
+ const auto ndim = static_cast<int64_t>(sizes.size());
171
+ // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
172
+ return sizes[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
173
+
174
+ }
175
+
176
+ int64_t size(int64_t dim) const {
177
+ return impl_->size(dim);
178
+ }
179
+
180
+ int64_t stride(int64_t dim) const {
181
+ const auto strides = this->strides();
182
+ const auto ndim = static_cast<int64_t>(strides.size());
183
+ // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
184
+ return strides[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)];
185
+ }
186
+
187
+ TensorImpl * unsafeGetTensorImpl() const {
188
+ return impl_.get();
189
+ }
190
+ TensorImpl * unsafeReleaseTensorImpl() {
191
+ return impl_.release();
192
+ }
193
+ const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
194
+ return impl_;
195
+ }
196
+
197
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> unsafeReleaseIntrusivePtr() {
198
+ return std::move(impl_);
199
+ }
200
+
201
+ bool defined() const {
202
+ return impl_;
203
+ }
204
+
205
+ void reset() {
206
+ impl_.reset();
207
+ }
208
+
209
+ #if defined (_MSC_VER)
210
+ TensorBase& operator=(const TensorBase& x) & {
211
+ impl_ = x.impl_;
212
+ return *this;
213
+ };
214
+ TensorBase& operator=(TensorBase&& x) & noexcept {
215
+ impl_ = std::move(x.impl_);
216
+ return *this;
217
+ }
218
+ #else
219
+ TensorBase& operator=(const TensorBase& x) & = default;
220
+ TensorBase& operator=(TensorBase&& x) & noexcept = default;
221
+ #endif
222
+
223
+ // Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here
224
+ TensorBase& operator=(const TensorBase&) && = delete;
225
+ TensorBase& operator=(TensorBase&&) && noexcept = delete;
226
+
227
+ bool is_same(const TensorBase& other) const noexcept {
228
+ return impl_ == other.impl_;
229
+ }
230
+ size_t use_count() const noexcept {
231
+ return impl_.use_count();
232
+ }
233
+ size_t weak_use_count() const noexcept {
234
+ return impl_.weak_use_count();
235
+ }
236
+
237
+ std::string toString() const;
238
+
239
+ IntArrayRef sizes() const {
240
+ return impl_->sizes();
241
+ }
242
+ c10::SymIntArrayRef sym_sizes() const {
243
+ return impl_->sym_sizes();
244
+ }
245
+ c10::SymIntArrayRef sym_strides() const {
246
+ return impl_->sym_strides();
247
+ }
248
+ IntArrayRef strides() const {
249
+ return impl_->strides();
250
+ }
251
+ // See impl::get_opt_names in ATen/NamedTensor.h for docs.
252
+ c10::optional<DimnameList> opt_names() const {
253
+ return impl::get_opt_names(unsafeGetTensorImpl());
254
+ }
255
+ // See impl::get_names in ATen/NamedTensor.h for docs.
256
+ DimnameList names() const {
257
+ return impl::get_names(unsafeGetTensorImpl());
258
+ }
259
+ int64_t ndimension() const {
260
+ return dim();
261
+ }
262
+
263
+ bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const {
264
+ return impl_->is_contiguous(memory_format);
265
+ }
266
+
267
+ bool is_non_overlapping_and_dense() const {
268
+ return impl_->is_non_overlapping_and_dense();
269
+ }
270
+
271
+ at::MemoryFormat suggest_memory_format(
272
+ bool channels_last_strides_exact_match = false) const {
273
+ // Setting channels_last_strides_exact_match to true forces function to
274
+ // check 0,1 - sized dimension strides.
275
+ if (layout() == at::kStrided) {
276
+ if (impl_->is_strides_like_channels_last()) {
277
+ if (!channels_last_strides_exact_match ||
278
+ get_channels_last_strides_2d(sizes()) == strides()) {
279
+ return at::MemoryFormat::ChannelsLast;
280
+ }
281
+ }
282
+ else if (impl_->is_strides_like_channels_last_3d()) {
283
+ if (!channels_last_strides_exact_match ||
284
+ get_channels_last_strides_3d(sizes()) == strides()) {
285
+ return at::MemoryFormat::ChannelsLast3d;
286
+ }
287
+ }
288
+ }
289
+ return at::MemoryFormat::Contiguous;
290
+ }
291
+
292
+ // Total bytes consumed by the "view" of elements of the array. Does not
293
+ // include size of metadata. The number reported here does not necessarily
294
+ // correspond to the true physical memory consumed by a tensor; instead,
295
+ // it reports the memory the tensor would take *if* it were contiguous.
296
+ // Defined to be numel() * itemsize()
297
+ size_t nbytes() const {
298
+ TORCH_CHECK(layout () != at::kSparse,
299
+ "nbytes is not defined for sparse tensors. If you want the size of the constituent " \
300
+ "tensors, add the nbytes of the indices and values. If you want the size of the " \
301
+ "equivalent dense tensor, multiply numel() by element_size()");
302
+ return impl_->numel() * impl_->itemsize();
303
+ }
304
+
305
+ c10::SymInt sym_nbytes() const {
306
+ TORCH_CHECK(layout () != at::kSparse,
307
+ "nbytes is not defined for sparse tensors. If you want the size of the constituent " \
308
+ "tensors, add the nbytes of the indices and values. If you want the size of the " \
309
+ "equivalent dense tensor, multiply numel() by element_size()");
310
+ return impl_->sym_numel() * impl_->itemsize();
311
+ }
312
+
313
+ int64_t numel() const {
314
+ return impl_->numel();
315
+ }
316
+
317
+ c10::SymInt sym_numel() const {
318
+ return impl_->sym_numel();
319
+ }
320
+
321
+ c10::SymInt sym_storage_offset() const {
322
+ return impl_->sym_storage_offset();
323
+ }
324
+
325
+ // Length of one array element in bytes. This is the traditional
326
+ // Numpy naming.
327
+ size_t itemsize() const {
328
+ return impl_->itemsize();
329
+ }
330
+
331
+ // Same as itemsize(). This is the PyTorch naming.
332
+ int64_t element_size() const {
333
+ return static_cast<int64_t>(impl_->itemsize());
334
+ }
335
+
336
+ DispatchKeySet key_set() const {
337
+ return impl_->key_set();
338
+ }
339
+ ScalarType scalar_type() const {
340
+ return typeMetaToScalarType(impl_->dtype());
341
+ }
342
+ bool has_storage() const {
343
+ return defined() && impl_->has_storage();
344
+ }
345
+ const Storage& storage() const {
346
+ return impl_->storage();
347
+ }
348
+ bool is_alias_of(const at::TensorBase& other) const{
349
+ return impl_->storage().is_alias_of(other.storage());
350
+ }
351
+
352
+ // Move the storage backend to shm based
353
+ // to enable memory sharing across processes.
354
+ //
355
+ // NB1: the ideal behavior of this API still requires further discussion
356
+ // but for now we are inclined to keep it consistent with existing THP behavior
357
+ // https://github.com/pytorch/pytorch/blob/4dca9bde0552afc67b5b74f4a0696fe6055709c4/torch/storage.py#L196-L212
358
+ // so we don't assert on anything here and rely on caller knowing
359
+ // what it's doing.
360
+ //
361
+ // NB2: this currently provides Linux fd based shm support only
362
+ // to simplify the storage lifetime management logic in ATen
363
+ // and similarly for now we are not adding support for file system based
364
+ // shm support like in THP due to additional GC manager support needed
365
+ // to prevent leaks.
366
+ // As such, calling this from non supported systems (e.g. Windows) would fail.
367
+ void share_memory_() {
368
+ at::share_memory_(*this);
369
+ }
370
+
371
+ inline bool _is_zerotensor() const {
372
+ return impl_->_is_zerotensor();
373
+ }
374
+
375
+ inline void _set_zero(bool zero) const {
376
+ impl_->_set_zero(zero);
377
+ }
378
+
379
+ inline bool is_conj() const {
380
+ return impl_->is_conj();
381
+ }
382
+
383
+ // sets the conjugate bit of a tensor.
384
+ // NOTE: Conjugate bit is supposed to be a read-only field. Only change this, if you are sure
385
+ // that's what you want. Changing this might lead to incorrect behavior since conjugation is
386
+ // a lazy operation and we rely on this bit to determine if a conjugation needs to be materialized.
387
+ inline void _set_conj(bool conjugate) const {
388
+ impl_->_set_conj(conjugate);
389
+ }
390
+
391
+ inline bool is_neg() const {
392
+ return impl_->is_neg();
393
+ }
394
+
395
+ // sets the negative bit of a tensor.
396
+ // NOTE: Negative bit is supposed to be a read-only field. Only change this, if you are sure
397
+ // that's what you want. Changing this might lead to incorrect behavior since we rely on this
398
+ // bit to determine if a negation needs to be materialized.
399
+ inline void _set_neg(bool negative) const {
400
+ impl_->_set_neg(negative);
401
+ }
402
+
403
+ /// Returns a `Tensor`'s layout.
404
+ Layout layout() const {
405
+ return impl_->layout();
406
+ }
407
+
408
+ /// Returns a `Tensor`'s dtype (`TypeMeta`).
409
+ caffe2::TypeMeta dtype() const {
410
+ return impl_->dtype();
411
+ }
412
+
413
+ /// Returns a `Tensor`'s device.
414
+ inline Device device() const {
415
+ return impl_->device();
416
+ }
417
+
418
+ /// Returns a `Tensor`'s device index.
419
+ DeviceIndex get_device() const {
420
+ // NB: this is not a native function to avoid dispatching overhead.
421
+ return impl_->get_device();
422
+ }
423
+
424
+ /// Returns if a `Tensor` has CPU backend.
425
+ bool is_cpu() const {
426
+ // NB: this is not a native function to avoid dispatching overhead.
427
+ return impl_->is_cpu();
428
+ }
429
+
430
+ /// Returns if a `Tensor` has CUDA backend.
431
+ bool is_cuda() const {
432
+ // NB: this is not a native function to avoid dispatching overhead.
433
+ return impl_->is_cuda();
434
+ }
435
+
436
+ /// Returns if a `Tensor` has IPU backend.
437
+ bool is_ipu() const {
438
+ // NB: this is not a native function to avoid dispatching overhead.
439
+ return impl_->is_ipu();
440
+ }
441
+
442
+ /// Returns if a `Tensor` has XPU backend.
443
+ bool is_xpu() const {
444
+ // NB: this is not a native function to avoid dispatching overhead.
445
+ return impl_->is_xpu();
446
+ }
447
+
448
+ /// Returns if a `Tensor` has XLA backend.
449
+ bool is_xla() const {
450
+ return impl_->is_xla();
451
+ }
452
+
453
+ /// Returns if a `Tensor` has MTIA backend.
454
+ bool is_mtia() const {
455
+ return impl_->is_mtia();
456
+ }
457
+
458
+ /// Returns if a `Tensor` has HPU backend.
459
+ bool is_hpu() const {
460
+ return impl_->is_hpu();
461
+ }
462
+
463
+ /// Returns if a `Tensor` has Lazy backend.
464
+ bool is_lazy() const {
465
+ return impl_->is_lazy();
466
+ }
467
+
468
+ /// Returns if a `Tensor` has HIP backend.
469
+ bool is_hip() const {
470
+ // NB: this is not a native function to avoid dispatching overhead.
471
+ return impl_->is_hip();
472
+ }
473
+
474
+ /// Returns if a `Tensor` has VE backend.
475
+ bool is_ve() const {
476
+ // NB: this is not a native function to avoid dispatching overhead.
477
+ return impl_->is_ve();
478
+ }
479
+
480
+ /// Returns if a `Tensor` has PrivateUse1 backend.
481
+ bool is_privateuseone() const {
482
+ // NB: this is not a native function to avoid dispatching overhead.
483
+ return impl_->is_privateuseone();
484
+ }
485
+
486
+ /// Returns if a `Tensor` has sparse backend.
487
+ bool is_sparse() const {
488
+ // NB: this is not a native function to avoid dispatching overhead.
489
+ return impl_->is_sparse();
490
+ }
491
+
492
+ /// Returns is a `Tensor` has a sparse CSR backend.
493
+ bool is_sparse_csr() const {
494
+ // NB: this is not a native function to avoid dispatching overhead.
495
+ return impl_->is_sparse_csr();
496
+ }
497
+
498
+ /// Returns if a `Tensor` is mkldnn tensor.
499
+ bool is_mkldnn() const {
500
+ // NB: this is not a native function to avoid dispatching overhead.
501
+ return impl_->is_mkldnn();
502
+ }
503
+
504
+ /// Returns if a `Tensor` is mps tensor.
505
+ bool is_mps() const {
506
+ // NB: this is not a native function to avoid dispatching overhead.
507
+ return impl_->is_mps();
508
+ }
509
+
510
+ /// Returns if a `Tensor` is ort tensor.
511
+ bool is_ort() const {
512
+ // NB: this is not a native function to avoid dispatching overhead.
513
+ return impl_->is_ort();
514
+ }
515
+
516
+ /// Returns if a `Tensor` is vulkan tensor.
517
+ bool is_vulkan() const {
518
+ // NB: this is not a native function to avoid dispatching overhead.
519
+ return impl_->is_vulkan();
520
+ }
521
+
522
+ /// Returns if a `Tensor` is metal tensor.
523
+ bool is_metal() const {
524
+ // NB: this is not a native function to avoid dispatching overhead.
525
+ return impl_->is_metal();
526
+ }
527
+
528
+ /// Returns if a `Tensor` has quantized backend.
529
+ bool is_quantized() const {
530
+ // NB: this is not a native function to avoid dispatching overhead.
531
+ return impl_->is_quantized();
532
+ }
533
+
534
+ /// Returns if a `Tensor` is a meta tensor. Meta tensors can
535
+ /// also have other designations.
536
+ bool is_meta() const {
537
+ return impl_->is_meta();
538
+ }
539
+
540
+ /// Returns if a `Tensor` is an inference tensor.
541
+ bool is_inference() const {
542
+ return impl_->is_inference();
543
+ }
544
+
545
+ // Returns if a `Tensor` is a NestedTensor.
546
+ bool is_nested() const {
547
+ return impl_->is_nested();
548
+ }
549
+
550
+ /// If a tensor is a quantized tensor, returns its quantizer
551
+ /// TODO: it's not in native_functions.yaml yet as it's not exposed to python
552
+ QuantizerPtr quantizer() const;
553
+
554
+ /// Returns if a `Tensor` has any dimension names
555
+ bool has_names() const {
556
+ // If a user is using unnamed tensors, then we can short-circuit right here.
557
+ // Otherwise, impl::has_names attempts to retrieve names.
558
+ if (!impl_->has_named_tensor_meta()) {
559
+ return false;
560
+ }
561
+ return impl::has_names(unsafeGetTensorImpl());
562
+ }
563
+
564
+ /// Returns a `Tensor`'s dimension names data structure
565
+ const NamedTensorMeta* get_named_tensor_meta() const {
566
+ return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
567
+ }
568
+
569
+ NamedTensorMeta* get_named_tensor_meta() {
570
+ return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
571
+ }
572
+
573
+ /// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in
574
+ /// TensorOptions.h.
575
+ TensorOptions options() const {
576
+ return TensorOptions().dtype(dtype())
577
+ .device(device())
578
+ .layout(layout());
579
+ }
580
+
581
+ const void* const_data_ptr() const {
582
+ return this->unsafeGetTensorImpl()->data();
583
+ }
584
+
585
+ void* mutable_data_ptr() const {
586
+ return this->unsafeGetTensorImpl()->mutable_data();
587
+ }
588
+
589
+ // TODO(#97856) Make this return a const pointer. This currently
590
+ // returns a non-const pointer because of the large
591
+ // number of clients that we still want to audit before
592
+ // migrating to mutable_data_ptr().
593
+ void* data_ptr() const {
594
+ return mutable_data_ptr();
595
+ }
596
+
597
+ template <typename T, std::enable_if_t<!std::is_const<T>::value, int> = 0>
598
+ const T* const_data_ptr() const;
599
+
600
+ template <typename T, std::enable_if_t<std::is_const<T>::value, int> = 0>
601
+ const std::remove_const_t<T>* const_data_ptr() const;
602
+
603
+ template <typename T>
604
+ T* mutable_data_ptr() const;
605
+
606
+ // Legacy interface during the migration to indicate that a callsite
607
+ // has not been audited for mutability.
608
+ //
609
+ // Do not add new uses of this, use const_data_ptr() if possible,
610
+ // mutable_data_ptr() otherwise.
611
+ //
612
+ // TODO(#97856) Make this return a const pointer. This is currently
613
+ // const because of the vast number of clients that
614
+ // rely on this.
615
+ template <typename T>
616
+ T* data_ptr() const;
617
+
618
+ // Purposely not defined here to avoid inlining
619
+ void print() const;
620
+
621
+ // Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and
622
+ // dimension.
623
+ template<typename T, size_t N>
624
+ TensorAccessor<T,N> accessor() const& {
625
+ static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
626
+ TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
627
+ T* ptr = nullptr;
628
+ if constexpr (std::is_const<T>::value) {
629
+ ptr = const_data_ptr<T>();
630
+ } else {
631
+ ptr = mutable_data_ptr<T>();
632
+ }
633
+ return TensorAccessor<T,N>(ptr,sizes().data(),strides().data());
634
+ }
635
+ template<typename T, size_t N>
636
+ TensorAccessor<T,N> accessor() && = delete;
637
+
638
+ // Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and
639
+ // dimension. You can optionally specify RestrictPtrTraits as a template parameter to
640
+ // cast the data pointer to a __restrict__ pointer.
641
+ // In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor
642
+ // as an argument.
643
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
644
+ GenericPackedTensorAccessor<T,N,PtrTraits,index_t> generic_packed_accessor() const& {
645
+ static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
646
+ TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim());
647
+ T* ptr = nullptr;
648
+ if constexpr (std::is_const<T>::value) {
649
+ ptr = const_data_ptr<T>();
650
+ } else {
651
+ ptr = mutable_data_ptr<T>();
652
+ }
653
+ return GenericPackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(ptr),sizes().data(),strides().data());
654
+ }
655
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
656
+ GenericPackedTensorAccessor<T,N> generic_packed_accessor() && = delete;
657
+
658
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
659
+ PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() const& {
660
+ TORCH_CHECK(
661
+ impl_->numel() <=
662
+ static_cast<int64_t>(std::numeric_limits<int32_t>::max()),
663
+ "numel needs to be smaller than int32_t max; otherwise, please use packed_accessor64");
664
+ return generic_packed_accessor<T,N,PtrTraits,int32_t>();
665
+ }
666
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
667
+ PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() && = delete;
668
+
669
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
670
+ PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() const& {
671
+ return generic_packed_accessor<T,N,PtrTraits,int64_t>();
672
+ }
673
+ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
674
+ PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() && = delete;
675
+
676
+ // ~~~~~ Autograd API ~~~~~
677
+
678
+ /// \fn bool is_leaf() const;
679
+ ///
680
+ /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
681
+ ///
682
+ /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
683
+ /// created by the user. This means that they are not the result of an operation and so
684
+ /// `grad_fn()` is `nullptr`.
685
+ ///
686
+ /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
687
+ /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
688
+ ///
689
+ /// Example:
690
+ /// @code
691
+ /// auto a = torch::rand(10, torch::requires_grad());
692
+ /// std::cout << a.is_leaf() << std::endl; // prints `true`
693
+ ///
694
+ /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
695
+ /// std::cout << b.is_leaf() << std::endl; // prints `false`
696
+ /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
697
+ ///
698
+ /// auto c = torch::rand(10, torch::requires_grad()) + 2;
699
+ /// std::cout << c.is_leaf() << std::endl; // prints `false`
700
+ /// // c was created by the addition operation
701
+ ///
702
+ /// auto d = torch::rand(10).cuda();
703
+ /// std::cout << d.is_leaf() << std::endl; // prints `true`
704
+ /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
705
+ ///
706
+ /// auto e = torch::rand(10).cuda().requires_grad_();
707
+ /// std::cout << e.is_leaf() << std::endl; // prints `true`
708
+ /// // e requires gradients and has no operations creating it
709
+ ///
710
+ /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
711
+ /// std::cout << f.is_leaf() << std::endl; // prints `true`
712
+ /// // f requires grad, has no operation creating it
713
+ /// @endcode
714
+
715
+ /// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
716
+ ///
717
+ /// Computes the gradient of current tensor with respect to graph leaves.
718
+ ///
719
+ /// The graph is differentiated using the chain rule. If the tensor is
720
+ /// non-scalar (i.e. its data has more than one element) and requires
721
+ /// gradient, the function additionally requires specifying ``gradient``.
722
+ /// It should be a tensor of matching type and location, that contains
723
+ /// the gradient of the differentiated function w.r.t. this Tensor.
724
+ ///
725
+ /// This function accumulates gradients in the leaves - you might need to
726
+ /// zero them before calling it.
727
+ ///
728
+ /// \param gradient Gradient w.r.t. the
729
+ /// tensor. If it is a tensor, it will be automatically converted
730
+ /// to a Tensor that does not require grad unless ``create_graph`` is True.
731
+ /// None values can be specified for scalar Tensors or ones that
732
+ /// don't require grad. If a None value would be acceptable then
733
+ /// this argument is optional.
734
+ /// \param retain_graph If ``false``, the graph used to compute
735
+ /// the grads will be freed. Note that in nearly all cases setting
736
+ /// this option to True is not needed and often can be worked around
737
+ /// in a much more efficient way. Defaults to the value of
738
+ /// ``create_graph``.
739
+ /// \param create_graph If ``true``, graph of the derivative will
740
+ /// be constructed, allowing to compute higher order derivative
741
+ /// products. Defaults to ``false``.
742
+ /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
743
+ /// ``at::Tensor::grad``. All other Tensors will be ignored. If not
744
+ /// provided, the gradient is accumulated into all the leaf Tensors
745
+ /// that were used to compute the current tensor.
746
+ /// When inputs are provided and a given input is not a leaf,
747
+ /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
748
+ /// It is an implementation detail on which the user should not rely.
749
+ /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
750
+
751
+ /// \fn Tensor detach() const;
752
+ ///
753
+ /// Returns a new Tensor, detached from the current graph.
754
+ /// The result will never require gradient.
755
+
756
+ /// \fn Tensor & detach_() const;
757
+ ///
758
+ /// Detaches the Tensor from the graph that created it, making it a leaf.
759
+ /// Views cannot be detached in-place.
760
+
761
+ /// \fn void retain_grad() const;
762
+ ///
763
+ /// Enables this Tensor to have their :attr:`grad` populated during
764
+ /// :func:`backward`. This is a no-op for leaf tensors.
765
+
766
+ /// \fn bool retains_grad() const;
767
+ ///
768
+ /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
769
+ /// populated during :func:`backward`, ``false`` otherwise.
770
+
771
+ const TensorBase& set_requires_grad(bool requires_grad) const {
772
+ impl_->set_requires_grad(requires_grad);
773
+ return *this;
774
+ }
775
+ bool requires_grad() const {
776
+ return impl_->requires_grad();
777
+ }
778
+
779
+ // The Forward AD API functions below are low level and are not to be used by end
780
+ // users who should use the API provided in torch/csrc/autograd.h
781
+
782
+ /// This function returns the forward gradient for this Tensor at the given level.
783
+ const Tensor& _fw_grad(uint64_t level) const {
784
+ return impl_->_fw_grad(level, *this);
785
+ }
786
+
787
+ /// This function can be used to set the value of the forward grad.
788
+ /// Note that the given new_grad might not be used directly if it has different
789
+ /// metadata (size/stride/storage offset) compared to this Tensor. In that case,
790
+ /// new_grad content will be copied into a new Tensor
791
+ void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
792
+ impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
793
+ }
794
+
795
+ /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
796
+ /// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
797
+ /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
798
+ ///
799
+ /// One notable difference with the legacy `.data()` function is that changes to the
800
+ /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
801
+ /// will not update the original `Variable`, due to the fact that this function
802
+ /// shallow-copies the `Variable`'s underlying TensorImpl.
803
+ at::TensorBase tensor_data() const;
804
+
805
+ /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
806
+ /// in Python, which create a new `Variable` that shares the same storage and
807
+ /// tensor metadata with the original `Variable`, but with a completely new
808
+ /// autograd history.
809
+ ///
810
+ /// NOTE: If we change the tensor metadata (e.g. sizes / strides /
811
+ /// storage / storage_offset) of a variable created from `var.variable_data()`, those
812
+ /// changes will not update the original variable `var`. In `.variable_data()`, we set
813
+ /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
814
+ /// in order to prevent users from changing metadata of `var.variable_data()`
815
+ /// and expecting the original variable `var` to also be updated.
816
+ at::TensorBase variable_data() const;
817
+
818
+ // Gradient Node and Edges
819
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
820
+
821
+ /// Gets the gradient function of the `Variable`. If this is a leaf variable,
822
+ /// the pointer returned will be null.
823
+ ///
824
+ /// For View Variables:
825
+ /// Gets the up-to-date grad_fn. If the shared data or base was modified, we
826
+ /// re-create the grad_fn to express the up-to-date view relationship between
827
+ /// this and the base Variable.
828
+ const std::shared_ptr<torch::autograd::Node>& grad_fn() const;
829
+
830
+ // Hooks
831
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
832
+
833
+ template <typename T>
834
+ using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, TensorBase>>::value, unsigned>;
835
+ template <typename T>
836
+ using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, TensorBase>, TensorBase>::value, unsigned>;
837
+
838
+ /// Registers a backward hook.
839
+ ///
840
+ /// The hook will be called every time a gradient with respect to the Tensor is computed.
841
+ /// The hook should have one of the following signature:
842
+ /// ```
843
+ /// hook(TensorBase grad) -> TensorBase
844
+ /// ```
845
+ /// ```
846
+ /// hook(TensorBase grad) -> void
847
+ /// ```
848
+ /// The hook should not modify its argument, but it can optionally return a new gradient
849
+ /// which will be used in place of `grad`.
850
+ ///
851
+ /// This function returns the index of the hook in the list which can be used to remove hook.
852
+ ///
853
+ /// Example:
854
+ /// @code
855
+ /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
856
+ /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
857
+ /// v.backward(torch::tensor({1., 2., 3.}));
858
+ /// // This prints:
859
+ /// // ```
860
+ /// // 2
861
+ /// // 4
862
+ /// // 6
863
+ /// // [ CPUFloatType{3} ]
864
+ /// // ```
865
+ /// std::cout << v.grad() << std::endl;
866
+ /// v.remove_hook(h); // removes the hook
867
+ /// @endcode
868
+ template <typename T>
869
+ hook_return_void_t<T> register_hook(T&& hook) const;
870
+ template <typename T>
871
+ hook_return_var_t<T> register_hook(T&& hook) const;
872
+
873
+ protected:
874
+ unsigned _register_hook(std::function<TensorBase(const TensorBase&)> hook) const;
875
+
876
+ public:
877
+
878
+ /// Remove hook at given position
879
+ void remove_hook(unsigned pos) const;
880
+
881
+ // Variable methods
882
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
883
+
884
+ bool is_leaf() const;
885
+
886
+ int64_t output_nr() const;
887
+
888
+ void set_data(const TensorBase & new_data) const;
889
+
890
+ TensorBase data() const;
891
+
892
+ int64_t _version() const;
893
+
894
+ void retain_grad() const;
895
+
896
+ bool retains_grad() const;
897
+
898
+ const TensorBase& requires_grad_(bool _requires_grad=true) const;
899
+
900
+ // View Variables
901
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
902
+
903
+ /// Returns true if this `Variable` is a view of another `Variable`.
904
+ bool is_view() const;
905
+
906
+ /// Returns the `Variable` that this `Variable` is a view of. If this
907
+ /// `Variable` is not a view, throw a `std::runtime_error`.
908
+ const TensorBase& _base() const;
909
+
910
+ // Miscellaneous
911
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
912
+
913
+ const std::string& name() const;
914
+
915
+ protected:
916
+ void enforce_invariants();
917
+ c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_;
918
+
919
+ private:
920
+ TensorBase __dispatch_contiguous(c10::MemoryFormat) const;
921
+ };
922
+
923
+ inline DeviceIndex get_device(const TensorBase& self) {
924
+ return self.get_device();
925
+ }
926
+
927
+ template <typename T>
928
+ auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t<T> {
929
+ // Return the grad argument in case of a hook with void return type to have an
930
+ // std::function with Tensor return type
931
+ static_assert(std::is_same<decltype(hook(TensorBase())), void>::value,
932
+ "Expected hook to return void");
933
+ return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad) {
934
+ fn(grad);
935
+ return TensorBase();
936
+ });
937
+ }
938
+
939
+ template <typename T>
940
+ auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t<T> {
941
+ return _register_hook(std::forward<T>(hook));
942
+ }
943
+
944
+ namespace detail {
945
+ // Helper creator for Tensor class which doesn't requires the users to pass
946
+ // in an intrusive_ptr instead it just converts the argument passed to
947
+ // requested intrusive_ptr type.
948
+ template <typename T, typename... Args>
949
+ TensorBase make_tensor_base(Args&&... args) {
950
+ return TensorBase(c10::make_intrusive<T>(std::forward<Args>(args)...));
951
+ }
952
+
953
+ } // namespace detail
954
+
955
+ static inline DispatchKey legacyExtractDispatchKey(const TensorBase& t) {
956
+ return legacyExtractDispatchKey(t.key_set());
957
+ }
958
+
959
+ } // namespace at
960
+
961
+ namespace c10 {
962
+ template <>
963
+ struct MaybeOwnedTraits<at::TensorBase> {
964
+ using owned_type = at::TensorBase;
965
+ using borrow_type = at::TensorBase;
966
+
967
+ static borrow_type createBorrow(const owned_type& from) {
968
+ // NOTE: this can be implemented without the special
969
+ // unsafe_borrow_t Tensor constructor as
970
+ //
971
+ // return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
972
+ //
973
+ // but that hurts inlining due to the nullptr check in the
974
+ // Tensor(c10::intrusive_ptr<...>) constructor. We already know
975
+ // that from.impl_ isn't null because from is a valid Tensor, so
976
+ // we needn't do the check again. (using __builtin_assume can
977
+ // avoid this, but wouldn't be portable to MSVC.)
978
+ return borrow_type(borrow_type::unsafe_borrow_t{}, from);
979
+ }
980
+
981
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
982
+ lhs.unsafeReleaseTensorImpl();
983
+ // See above note: this can be implemented with public API
984
+ // similarly to createBorrow(), but that would hurt inlining.
985
+ lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
986
+ }
987
+
988
+ static void destroyBorrow(borrow_type& toDestroy) {
989
+ toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
990
+ }
991
+
992
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
993
+ return borrow;
994
+ }
995
+
996
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
997
+ return &borrow;
998
+ }
999
+
1000
+ static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
1001
+ return true;
1002
+ }
1003
+ };
1004
+
1005
+ template <>
1006
+ struct ExclusivelyOwnedTraits<at::TensorBase> : public c10::ExclusivelyOwnedTensorTraits<at::TensorBase> {};
1007
+ } // namespace c10
1008
+
1009
+ namespace at {
1010
+
1011
+ inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor(
1012
+ const c10::optional<TensorBase>& opt) {
1013
+ return opt.has_value()
1014
+ ? c10::MaybeOwned<TensorBase>::borrowed(*opt)
1015
+ : c10::MaybeOwned<TensorBase>::owned(std::in_place);
1016
+ }
1017
+
1018
+ inline c10::MaybeOwned<TensorBase> TensorBase::expect_contiguous(MemoryFormat memory_format) const & {
1019
+ if (is_contiguous(memory_format)) {
1020
+ return c10::MaybeOwned<TensorBase>::borrowed(*this);
1021
+ } else {
1022
+ return c10::MaybeOwned<TensorBase>::owned(__dispatch_contiguous(memory_format));
1023
+ }
1024
+ }
1025
+
1026
+ namespace symint {
1027
+
1028
+ template <typename T>
1029
+ using enable_if_symint = std::enable_if_t<std::is_same<T, c10::SymInt>::value>;
1030
+ template <typename T>
1031
+ using enable_if_int = std::enable_if_t<std::is_same<T, int64_t>::value>;
1032
+
1033
+ template <typename T, typename = enable_if_symint<T>>
1034
+ c10::SymIntArrayRef sizes(const TensorBase& t) { return t.sym_sizes(); }
1035
+ template <typename T, typename = enable_if_int<T>>
1036
+ IntArrayRef sizes(const TensorBase& t) { return t.sizes(); }
1037
+
1038
+ template <typename T, typename = enable_if_symint<T>>
1039
+ c10::SymInt size(const TensorBase& t, int64_t dim) { return t.sym_size(dim); }
1040
+ template <typename T, typename = enable_if_int<T>>
1041
+ int64_t size(const TensorBase& t, int64_t dim) { return t.size(dim); }
1042
+
1043
+ template <typename T, typename = enable_if_symint<T>>
1044
+ c10::SymIntArrayRef strides(const TensorBase& t) { return t.sym_strides(); }
1045
+ template <typename T, typename = enable_if_int<T>>
1046
+ IntArrayRef strides(const TensorBase& t) { return t.strides(); }
1047
+
1048
+ template <typename T, typename = enable_if_symint<T>>
1049
+ c10::SymInt numel(const TensorBase& t) { return t.sym_numel(); }
1050
+ template <typename T, typename = enable_if_int<T>>
1051
+ int64_t numel(const TensorBase& t) { return t.numel(); }
1052
+
1053
+ } // namespace symint
1054
+
1055
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/core/TorchDispatchUtils.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/library.h>
4
+ #include <ATen/core/dispatch/Dispatcher.h>
5
+ #include <c10/util/ArrayRef.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
8
+
9
+ namespace at {
10
+ namespace impl {
11
+
12
+ TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
13
+ TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
14
+ TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li);
15
+ using c10::impl::dispatch_mode_enabled;
16
+
17
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+
4
+ namespace at {
5
+
6
+ inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) {
7
+ auto tensor_impl = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(static_cast<TensorImpl*>(th_pointer));
8
+ if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) {
9
+ c10::raw::intrusive_ptr::incref(tensor_impl.get());
10
+ }
11
+ return Tensor(std::move(tensor_impl));
12
+ }
13
+
14
+ inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) {
15
+ if (retain && th_pointer) {
16
+ c10::raw::intrusive_ptr::incref(static_cast<StorageImpl*>(th_pointer));
17
+ }
18
+ return Storage(c10::intrusive_ptr<StorageImpl>::reclaim(static_cast<StorageImpl*>(th_pointer)));
19
+ }
20
+
21
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ // A little explanation about why this file exists at all. We have
7
+ // a few methods on Tensor class which require access to reified access to
8
+ // AutogradMeta. In open source, this isn't a big deal: we just access
9
+ // torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and
10
+ // we can put the definitions inline. This is because everything gets balled
11
+ // into a single dynamic library in the end.
12
+ //
13
+ // However, inside our Facebook internal version of our build system, we
14
+ // have a split between aten and torch/csrc. So we cannot simply just
15
+ // cross this boundary. "Now wait," you might say, "Why don't we just
16
+ // merge the libraries inside Facebook". Well, the problem is that there
17
+ // are some downstream applications which are at binary size limit, and
18
+ // incorporating all of the extra code from libtorch would push them
19
+ // over (admarket/adreview/service:adreviewservice, see also
20
+ // https://github.com/pytorch/pytorch/pull/29299) So if you want to do that,
21
+ // we have to fix all of the services like this.
22
+ //
23
+ // I didn't want to block eliminating Tensor-Variable on this work, so I
24
+ // had to introduce another dynamic dispatch to get to the variable
25
+ // implementations (which live in torch/csrc/autograd/variable.cpp, FYI).
26
+ //
27
+ // I also considered using our existing dynamic dispatch mechanism, c10
28
+ // dispatcher, to do this. However, (1) some of the functions on Tensor
29
+ // have weird signatures that are not supported by autograd, and (2)
30
+ // see this bug https://github.com/pytorch/pytorch/issues/30102
31
+
32
+ namespace torch { namespace autograd {
33
+
34
+ struct Node;
35
+
36
+ }} // namespace torch::autograd
37
+
38
+ namespace at {
39
+ namespace impl {
40
+
41
+ struct TORCH_API VariableHooksInterface {
42
+ virtual ~VariableHooksInterface() = default;
43
+ virtual TensorBase tensor_data(const TensorBase&) const = 0;
44
+ virtual TensorBase variable_data(const TensorBase&) const = 0;
45
+ virtual const std::shared_ptr<torch::autograd::Node>& grad_fn(const TensorBase&) const = 0;
46
+ virtual unsigned _register_hook(
47
+ const TensorBase&,
48
+ std::function<TensorBase(const TensorBase&)> hook) const = 0;
49
+ virtual void remove_hook(const TensorBase&, unsigned pos) const = 0;
50
+ virtual bool is_view(const TensorBase&) const = 0;
51
+ virtual const TensorBase& base(const TensorBase&) const = 0;
52
+ virtual const std::string& name(const TensorBase&) const = 0;
53
+ virtual bool is_leaf(const TensorBase&) const = 0;
54
+ virtual int64_t output_nr(const TensorBase&) const = 0;
55
+ virtual void set_data(const TensorBase&, const TensorBase&) const = 0;
56
+ virtual TensorBase data(const TensorBase&) const = 0;
57
+ virtual int64_t _version(const TensorBase&) const = 0;
58
+ virtual void retain_grad(const TensorBase&) const = 0;
59
+ virtual bool retains_grad(const TensorBase&) const = 0;
60
+ virtual void _backward(const Tensor&, TensorList, const c10::optional<Tensor>&, c10::optional<bool>, bool) const = 0;
61
+ virtual void requires_grad_(const TensorBase&, bool) const = 0;
62
+ virtual void basic_autograd_not_implemented_fallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const = 0;
63
+ };
64
+
65
+ TORCH_API void SetVariableHooks(VariableHooksInterface* hooks);
66
+ TORCH_API VariableHooksInterface* GetVariableHooks();
67
+ TORCH_API bool HasVariableHooks();
68
+
69
+ struct TORCH_API VariableHooksRegisterer {
70
+ explicit VariableHooksRegisterer(VariableHooksInterface* hooks) {
71
+ SetVariableHooks(hooks);
72
+ }
73
+ };
74
+
75
+ }} // namespace at::impl
venv/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/intrusive_ptr.h>
7
+ #include <functional>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ struct BuiltinOpFunction : public Function {
14
+ BuiltinOpFunction(
15
+ c10::QualifiedName qualname,
16
+ c10::FunctionSchema schema,
17
+ std::function<void(Stack&)> callable,
18
+ std::string doc_string = "")
19
+ : name_(std::move(qualname)),
20
+ callable_(std::move(callable)),
21
+ schema_(std::move(schema)),
22
+ doc_string_(std::move(doc_string)) {
23
+ TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1);
24
+ }
25
+
26
+ c10::string_view doc_string() const override {
27
+ return doc_string_;
28
+ }
29
+
30
+ void run(Stack& stack) override {
31
+ callable_(stack);
32
+ }
33
+
34
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
35
+ Stack& stack,
36
+ TaskLauncher /* not used */) override {
37
+ run(stack);
38
+ auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type());
39
+ res->markCompleted(std::move(stack.front()));
40
+ return res;
41
+ }
42
+
43
+ const c10::QualifiedName& qualname() const override {
44
+ return name_;
45
+ }
46
+
47
+ // if this isn't yet defined, run its method_creator function
48
+ void ensure_defined() override {
49
+ // nop
50
+ }
51
+
52
+ const c10::FunctionSchema& getSchema() const override {
53
+ return schema_;
54
+ }
55
+
56
+ size_t num_inputs() const override {
57
+ return schema_.arguments().size();
58
+ }
59
+
60
+ Function& setSchema(c10::FunctionSchema schema) override {
61
+ schema_ = std::move(schema);
62
+ return *this;
63
+ }
64
+
65
+ bool call(Stack& stack, c10::optional<size_t>, c10::function_ref<void(const Code&)>) override {
66
+ run(stack);
67
+ return false;
68
+ }
69
+
70
+ bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>) override {
71
+ run(stack);
72
+ return false;
73
+ }
74
+
75
+ ~BuiltinOpFunction() override = default;
76
+
77
+ private:
78
+ c10::QualifiedName name_;
79
+
80
+ std::function<void(Stack&)> callable_;
81
+
82
+ c10::FunctionSchema schema_;
83
+
84
+ std::string doc_string_;
85
+ };
86
+
87
+ } // namespace jit
88
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <memory>
5
+
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/Exception.h>
9
+
10
+ namespace c10 {
11
+
12
+ struct ClassType;
13
+ using ClassTypePtr = std::shared_ptr<ClassType>;
14
+
15
+ TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex);
16
+
17
+ template <typename T>
18
+ const c10::ClassTypePtr& getCustomClassType() {
19
+ // Classes are never unregistered from getCustomClassTypeMap and the
20
+ // hash lookup can be a hot path, so just cache.
21
+ // For the same reason, it's fine If this ends up getting duplicated across
22
+ // DSO boundaries for whatever reason.
23
+ static c10::ClassTypePtr cache = getCustomClassTypeImpl(
24
+ std::type_index(typeid(T)));
25
+ return cache;
26
+ }
27
+
28
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from enum_tag.h
4
+
5
+ namespace at {
6
+ // Enum of valid tags obtained from the entries in tags.yaml
7
+ enum class Tag {
8
+ core,
9
+ data_dependent_output,
10
+ dynamic_output_shape,
11
+ generated,
12
+ inplace_view,
13
+ needs_fixed_stride_order,
14
+ nondeterministic_bitwise,
15
+ nondeterministic_seeded,
16
+ pointwise,
17
+ pt2_compliant_tag,
18
+ view_copy
19
+ };
20
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+
5
+ #include <utility>
6
+
7
+ namespace c10 {
8
+
9
+ struct EnumType;
10
+ using EnumTypePtr = std::shared_ptr<EnumType>;
11
+ using EnumNameValue = std::pair<std::string, IValue>;
12
+ struct TORCH_API EnumType : public NamedType {
13
+ friend struct Type;
14
+ static const TypeKind Kind = TypeKind::EnumType;
15
+
16
+ static EnumTypePtr create(
17
+ const c10::QualifiedName& qualified_class_name,
18
+ TypePtr value,
19
+ std::vector<EnumNameValue> enum_names_values,
20
+ std::weak_ptr<::torch::jit::CompilationUnit> cu) {
21
+ switch (value->kind()) {
22
+ case TypeKind::IntType:
23
+ case TypeKind::FloatType:
24
+ case TypeKind::StringType:
25
+ return EnumTypePtr(new EnumType(
26
+ qualified_class_name,
27
+ std::move(value),
28
+ std::move(enum_names_values),
29
+ std::move(cu)));
30
+ default:
31
+ AT_ERROR(
32
+ "Cannot create Enum with value type '",
33
+ value->str(),
34
+ "', only int, float and string are supported");
35
+ }
36
+ }
37
+
38
+ std::string str() const override {
39
+ return "Enum<" + annotation_str() + ">";
40
+ }
41
+
42
+ std::string repr_str() const override {
43
+ return str();
44
+ }
45
+
46
+ const TypePtr& getValueType() const {
47
+ return value_type_;
48
+ }
49
+
50
+ bool equals(const Type& rhs) const override {
51
+ if (auto* enum_rhs = rhs.castRaw<EnumType>()) {
52
+ return name().value() == enum_rhs->name().value() &&
53
+ *getValueType() == *(enum_rhs->getValueType()) &&
54
+ this->compilation_unit() == enum_rhs->compilation_unit();
55
+ }
56
+ return false;
57
+ }
58
+
59
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
60
+
61
+ std::shared_ptr<const ::torch::jit::CompilationUnit> compilation_unit()
62
+ const {
63
+ auto cu = cu_.lock();
64
+ return cu;
65
+ }
66
+
67
+ const QualifiedName& qualifiedClassName() const {
68
+ return name().value();
69
+ }
70
+
71
+ at::ArrayRef<TypePtr> containedTypes() const override {
72
+ return value_type_;
73
+ }
74
+
75
+ const at::ArrayRef<EnumNameValue> enumNamesValues() const {
76
+ return enum_names_values_;
77
+ }
78
+
79
+ private:
80
+ EnumType(
81
+ c10::QualifiedName qualified_class_name,
82
+ TypePtr value_type,
83
+ std::vector<EnumNameValue> enum_names_values,
84
+ std::weak_ptr<torch::jit::CompilationUnit> cu)
85
+ : NamedType(TypeKind::EnumType, std::move(qualified_class_name)),
86
+ value_type_(std::move(value_type)),
87
+ enum_names_values_(std::move(enum_names_values)),
88
+ cu_(std::move(cu)) {}
89
+
90
+ std::string annotation_str_impl(
91
+ C10_UNUSED TypePrinter printer = nullptr) const override {
92
+ const auto& n = name().value();
93
+ return n.qualifiedName();
94
+ }
95
+
96
+ TypePtr value_type_;
97
+ std::vector<EnumNameValue> enum_names_values_;
98
+ std::weak_ptr<::torch::jit::CompilationUnit> cu_;
99
+ };
100
+
101
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ostream>
3
+ #include <sstream>
4
+
5
+ // note: windows build doesn't find symbols in operator files unless
6
+ // this is a header file
7
+
8
+ namespace c10 {
9
+
10
+ inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) {
11
+ // eventually this should look almost identical to python arg parser, but
12
+ // it is simpler for now to work directly on this schema
13
+
14
+ out << schema.name();
15
+ if (!schema.overload_name().empty()) {
16
+ out << "." << schema.overload_name();
17
+ }
18
+ out << "(";
19
+
20
+ bool seen_kwarg_only = false;
21
+ for (const auto i : c10::irange(schema.arguments().size())) {
22
+ if (i > 0) out << ", ";
23
+ if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) {
24
+ out << "*, ";
25
+ seen_kwarg_only = true;
26
+ }
27
+ out << schema.arguments()[i];
28
+ }
29
+
30
+ if(schema.is_vararg()) {
31
+ if(!schema.arguments().empty())
32
+ out << ", ";
33
+ out << "...";
34
+ }
35
+
36
+ out << ") -> ";
37
+
38
+ const auto& returns = schema.returns();
39
+
40
+ /*
41
+ * We should skip parenthesis if we return a single item and it's not varret,
42
+ * or we return nothing but varret.
43
+ *
44
+ * Need special handling for schema
45
+ * aten::items.str(Dict(str, t) self) -> (str,t)[]
46
+ * Even though this schema returns a single item, we need add parenthesis.
47
+ * The is necessary so the printed schema can be parsed by the C++ SchemaParser
48
+ * Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly
49
+ * treat the return type as a tuple. An alternative is to enhance the Lexer
50
+ * to lookahead multiple tokens to accurately decide if the return type is
51
+ * a tuple.
52
+ */
53
+ bool need_paren = !(
54
+ (returns.size() == 1 && !schema.is_varret()) ||
55
+ (returns.empty() && schema.is_varret()));
56
+
57
+ if (returns.size() == 1 && !schema.is_varret()) {
58
+ std::stringstream return_ss;
59
+ return_ss << returns.at(0);
60
+ auto return_str = return_ss.str();
61
+
62
+ // enclosing the single return item with parenthesis if the return type
63
+ // starts with a left parenthesis.
64
+ //
65
+ // There are 2 cases
66
+ // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'.
67
+ // without the extra parenthesis, the c++ schem parser can not parse it.
68
+ // 2. something like '-> ((str, str))'. Need extra parenthesis so the return
69
+ // type is a single tuple rather than two strings.
70
+ // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about
71
+ // this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15)
72
+ // also covers this case.
73
+ if (!return_str.empty() && return_str.front() == '(') {
74
+ need_paren = true;
75
+ }
76
+ }
77
+
78
+ if (need_paren) {
79
+ out << "(";
80
+ }
81
+ for (const auto i : c10::irange(returns.size())) {
82
+ if (i > 0) {
83
+ out << ", ";
84
+ }
85
+ out << returns.at(i);
86
+ }
87
+ if (schema.is_varret()) {
88
+ if (!returns.empty()) {
89
+ out << ", ";
90
+ }
91
+ out << "...";
92
+ }
93
+ if (need_paren) {
94
+ out << ")";
95
+ }
96
+ return out;
97
+ }
98
+
99
+ inline size_t findFirstOutArg(const std::vector<Argument>& args) {
100
+ // find the start of out args in the schema
101
+ for (const auto out_start_idx : c10::irange(args.size())) {
102
+ if (args.at(out_start_idx).is_out()) {
103
+ return out_start_idx;
104
+ }
105
+ }
106
+ return args.size();
107
+ }
108
+
109
+ inline bool Argument::isBackwardCompatibleWith(
110
+ const Argument& old,
111
+ std::ostream* why_not) const {
112
+ const Argument* lhs = this;
113
+ const Argument* rhs = &old;
114
+ if (!(lhs->name() == rhs->name()
115
+ && lhs->N() == rhs->N()
116
+ && (lhs->alias_info() == rhs->alias_info()
117
+ || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
118
+ && *lhs->alias_info() == *rhs->alias_info())))) {
119
+ return false;
120
+ }
121
+ if (lhs->kwarg_only() && !rhs->kwarg_only()) {
122
+ return false;
123
+ }
124
+ if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) {
125
+ return false;
126
+ }
127
+ if (rhs->default_value().has_value() &&
128
+ lhs->default_value() != rhs->default_value()) {
129
+ return false;
130
+ }
131
+ return true;
132
+ }
133
+
134
+ inline bool Argument::isForwardCompatibleWith(
135
+ const Argument& old,
136
+ std::ostream* why_not) const {
137
+ const Argument* lhs = this;
138
+ const Argument* rhs = &old;
139
+ if (!(lhs->name() == rhs->name()
140
+ && lhs->N() == rhs->N()
141
+ && (lhs->alias_info() == rhs->alias_info()
142
+ || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr
143
+ && *lhs->alias_info() == *rhs->alias_info())))) {
144
+ return false;
145
+ }
146
+ if (lhs->kwarg_only() && !rhs->kwarg_only()) {
147
+ return false;
148
+ }
149
+ if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) {
150
+ return false;
151
+ }
152
+ if (rhs->default_value().has_value() &&
153
+ lhs->default_value() != rhs->default_value()) {
154
+ return false;
155
+ }
156
+ if (lhs->default_value().has_value() && !rhs->default_value().has_value()) {
157
+ return false;
158
+ }
159
+ return true;
160
+ }
161
+
162
+ inline std::string FunctionSchema::formatTypeMismatchMsg(
163
+ const Argument& expected,
164
+ const std::string& actual_type,
165
+ c10::optional<size_t> position,
166
+ c10::optional<std::string> value) const {
167
+ std::string position_str;
168
+ if (position) {
169
+ position_str = c10::str("Position: ", *position, "\n");
170
+ }
171
+ std::string value_str;
172
+ if (value) {
173
+ value_str = c10::str("Value: ", *value, "\n");
174
+ }
175
+ return c10::str(
176
+ name(),
177
+ "() ",
178
+ expected.formatTypeMismatchMsg(actual_type),
179
+ position_str,
180
+ value_str,
181
+ "Declaration: ",
182
+ *this);
183
+ }
184
+
185
+ inline bool FunctionSchema::isBackwardCompatibleWith(
186
+ const FunctionSchema& old,
187
+ std::ostream* why_not) const {
188
+ if (!(name() == old.name()
189
+ && overload_name() == old.overload_name()
190
+ // we are conservative on is_vararg and is_varret,
191
+ // since they are only used by internal operators
192
+ && is_vararg() == old.is_vararg()
193
+ && is_varret() == old.is_varret()
194
+ && returns().size() == old.returns().size()
195
+ && arguments().size() >= old.arguments().size())) {
196
+ return false;
197
+ }
198
+ for (const auto i : c10::irange(returns().size())) {
199
+ // Backwards compatibility requires covariance on argument types
200
+ // (i.e. more generic), and contravariance on return types (i.e.
201
+ // more specific).
202
+ if (!old.returns().at(i).isBackwardCompatibleWith(
203
+ returns().at(i),
204
+ why_not)) {
205
+ return false;
206
+ }
207
+ }
208
+
209
+ // we want to test both out and default args separately
210
+ size_t old_out_start_idx = findFirstOutArg(old.arguments());
211
+ size_t new_out_start_idx = findFirstOutArg(arguments());
212
+
213
+ // make sure among the default args, they are backward compatible
214
+ for (const auto i : c10::irange(old_out_start_idx)) {
215
+ if (!arguments().at(i).isBackwardCompatibleWith(
216
+ old.arguments().at(i), why_not)) {
217
+ return false;
218
+ }
219
+ }
220
+
221
+ // Validate that all new arguments provided has a default value
222
+ for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) {
223
+ if (!arguments().at(i).default_value()) {
224
+ if (why_not) {
225
+ *why_not
226
+ << "Function schema not backward compatible since the new argument '"
227
+ << arguments().at(i).name() << "' of type "
228
+ << arguments().at(i).type()->str()
229
+ << " did not provide a default value.";
230
+ }
231
+ return false;
232
+ }
233
+ }
234
+
235
+ // now compare the out args
236
+ for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) {
237
+ if (!arguments()
238
+ .at(i - old_out_start_idx + new_out_start_idx)
239
+ .isBackwardCompatibleWith(old.arguments().at(i), why_not)) {
240
+ return false;
241
+ }
242
+ }
243
+
244
+ return true;
245
+ }
246
+
247
+ inline bool FunctionSchema::isForwardCompatibleWith(
248
+ const FunctionSchema& old,
249
+ std::ostringstream& why_not) const {
250
+ if (!(name() == old.name() &&
251
+ overload_name() == old.overload_name()
252
+ // we are conservative on is_vararg and is_varret,
253
+ // since they are only used by internal operators
254
+ && is_vararg() == old.is_vararg() && is_varret() == old.is_varret() &&
255
+ returns().size() == old.returns().size())) {
256
+ return false;
257
+ }
258
+
259
+ // we want to test both out and default args separately
260
+ size_t old_out_start_idx = findFirstOutArg(old.arguments());
261
+ size_t new_out_start_idx = findFirstOutArg(arguments());
262
+
263
+ if (old.arguments().size() - old_out_start_idx !=
264
+ arguments().size() - new_out_start_idx) {
265
+ if (why_not) {
266
+ why_not << "Function schema should have the "
267
+ << "same number of out arguments";
268
+ }
269
+ return false;
270
+ }
271
+
272
+ // make sure among the default args, they are forward compatible
273
+ for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) {
274
+ if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) {
275
+ if (why_not) {
276
+ why_not
277
+ << "'" << arguments().at(i).name() << "'"
278
+ << " is not forward compatible with the older version of the schema";
279
+ }
280
+ return false;
281
+ }
282
+ }
283
+
284
+ // Validate that all new arguments provided has a default value
285
+ for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) {
286
+ if (!arguments().at(i).default_value()) {
287
+ if (why_not) {
288
+ why_not
289
+ << "Function schema is not forward compatible since the new argument '"
290
+ << arguments().at(i).name() << "' of type "
291
+ << arguments().at(i).type()->str()
292
+ << " did not provide a default value.";
293
+ }
294
+ return false;
295
+ }
296
+
297
+ auto default_val = arguments().at(i).default_value().value();
298
+ if (default_val.isList() || default_val.isGenericDict()) {
299
+ if (why_not) {
300
+ why_not
301
+ << "Function schema is not forward compatible since the new argument '"
302
+ << arguments().at(i).name() << "' of type "
303
+ << arguments().at(i).type()->str() << " has a container type "
304
+ << "as its default value.";
305
+ }
306
+ return false;
307
+ }
308
+ }
309
+
310
+ // now compare the out args
311
+ for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) {
312
+ if (!arguments()
313
+ .at(i - old_out_start_idx + new_out_start_idx)
314
+ .isForwardCompatibleWith(old.arguments().at(i))) {
315
+ if (why_not) {
316
+ why_not << "Out argument '"
317
+ << "'" << arguments().at(i).name()
318
+ << " is not FC with the older version of the schema";
319
+ }
320
+ return false;
321
+ }
322
+ }
323
+
324
+ return true;
325
+ }
326
+
327
+ template<typename T>
328
+ inline void FunctionSchema::checkArg(
329
+ const IValue& value,
330
+ const Argument& argument,
331
+ optional<size_t> pos) const {
332
+ if (value.isTensor() && argument.type() == TensorType::get()) {
333
+ // Fast-path for the common case
334
+ return;
335
+ }
336
+ if (!value.type<T>()->isSubtypeOf(*argument.type())) {
337
+ TORCH_CHECK(
338
+ false,
339
+ formatTypeMismatchMsg(
340
+ argument, value.type<T>()->repr_str(), pos));
341
+ }
342
+ }
343
+
344
+ inline std::string FunctionSchema::findErrorInKwargs(const std::vector<std::string>& kwargs) const {
345
+ // First check if any of the kwargs are unknown, i.e. don't match the name of
346
+ // any argument in the schema.
347
+ for (const auto& kwarg : kwargs) {
348
+ if (!std::count_if(
349
+ arguments().begin(),
350
+ arguments().end(),
351
+ [&kwarg](const Argument& argument) {
352
+ return argument.name() == kwarg;
353
+ })) {
354
+ return c10::str(
355
+ "Unknown keyword argument '",
356
+ kwarg,
357
+ "' for operator '",
358
+ name(),
359
+ "'. Schema: ",
360
+ *this);
361
+ }
362
+ }
363
+ // If there are unconsumed kwargs but none of them were unknown, the first
364
+ // positional argument present in the kwargs is duplicated.
365
+ for (const auto& argument : arguments()) {
366
+ if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) {
367
+ AT_ASSERT(!argument.default_value());
368
+ return c10::str(
369
+ "Argument '",
370
+ argument.name(),
371
+ "' specified both as positional and ",
372
+ "keyword argument. Schema: ",
373
+ *this);
374
+ }
375
+ }
376
+ return "";
377
+ }
378
+
379
+ template <typename T>
380
+ inline void FunctionSchema::checkAndNormalizeInputs(
381
+ std::vector<IValue>& inputs,
382
+ const std::unordered_map<std::string, IValue>& kwargs) const {
383
+ // Do we have more inputs than the schema accepts?
384
+ TORCH_CHECK(
385
+ inputs.size() <= arguments().size(),
386
+ "Expected at most ",
387
+ arguments().size(),
388
+ " argument(s) for operator '",
389
+ name(),
390
+ "', but received ",
391
+ inputs.size(),
392
+ " argument(s). Declaration: ",
393
+ *this);
394
+
395
+ size_t consumed_kwargs = 0;
396
+ for (const auto pos : c10::irange(arguments().size())) {
397
+ const auto& argument = arguments()[pos];
398
+ if (pos < inputs.size()) {
399
+ checkArg<T>(inputs[pos], argument, pos);
400
+ continue;
401
+ }
402
+ auto it = kwargs.find(argument.name());
403
+ if (it != kwargs.end()) {
404
+ checkArg<T>(it->second, argument, nullopt);
405
+ inputs.push_back(it->second);
406
+ consumed_kwargs++;
407
+ continue;
408
+ }
409
+ if (argument.default_value()) {
410
+ inputs.push_back(*argument.default_value());
411
+ continue;
412
+ }
413
+ AT_ERROR(
414
+ name(),
415
+ "() is missing value for argument '",
416
+ argument.name(),
417
+ "'. Declaration: ",
418
+ *this);
419
+ }
420
+ if (consumed_kwargs != kwargs.size()) {
421
+ std::vector<std::string> names;
422
+ names.reserve(kwargs.size());
423
+ for(const auto& k : kwargs) {
424
+ names.emplace_back(k.first);
425
+ }
426
+ throw std::runtime_error(findErrorInKwargs(names));
427
+ }
428
+ }
429
+
430
+ inline FunctionSchema FunctionSchema::cloneWithRemappedTypes(
431
+ const std::function<TypePtr(TypePtr)> type_map) const {
432
+ auto update_args = [&](const std::vector<Argument>& args) {
433
+ std::vector<Argument> new_args;
434
+ new_args.reserve(args.size());
435
+ for(const Argument& arg : args) {
436
+ new_args.emplace_back(arg.cloneWithType(type_map(arg.type())));
437
+ }
438
+ return new_args;
439
+ };
440
+ return FunctionSchema(
441
+ name(),
442
+ overload_name(),
443
+ update_args(arguments()),
444
+ update_args(returns()),
445
+ is_vararg(),
446
+ is_varret());
447
+ }
448
+
449
+ // covariant subtyping of list of Arguments
450
+ inline bool isSubtypeOfList(
451
+ ArrayRef<Argument> child,
452
+ ArrayRef<Argument> parent,
453
+ std::ostream* why_not) {
454
+ if (child.size() != parent.size()) {
455
+ return false;
456
+ }
457
+ for (const auto i : c10::irange(child.size())) {
458
+ const Argument& c = child[i];
459
+ const Argument& p = parent[i];
460
+ if (c.name() != p.name()) {
461
+ return false;
462
+ }
463
+ if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) {
464
+ return false;
465
+ }
466
+ }
467
+ return true;
468
+ }
469
+
470
+ inline bool FunctionSchema::isSubtypeOf(
471
+ const FunctionSchema& rhs,
472
+ bool as_method,
473
+ std::ostream* why_not) const {
474
+ size_t start = as_method ? 1 : 0;
475
+ // functions are contravariant in arguments but covariant in returns
476
+ return isSubtypeOfList(
477
+ ArrayRef<Argument>(rhs.arguments()).slice(start),
478
+ ArrayRef<Argument>(arguments()).slice(start),
479
+ why_not) &&
480
+ isSubtypeOfList(returns(), rhs.returns(), why_not);
481
+ }
482
+
483
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/functional.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+ #include <c10/util/ArrayRef.h>
5
+
6
+ namespace c10 {
7
+
8
+ // The passed in function must take T by value (T), or by
9
+ // const reference (const T&); taking T by non-const reference
10
+ // will result in an error like:
11
+ //
12
+ // error: no type named 'type' in 'class std::result_of<foobar::__lambda(T)>'
13
+ //
14
+ // No explicit template parameters are required.
15
+
16
+ // Overload for explicit function and ArrayRef
17
+ template<class F, class T>
18
+ inline auto fmap(const T& inputs, const F& fn) -> std::vector<decltype(fn(*inputs.begin()))> {
19
+ std::vector<decltype(fn(*inputs.begin()))> r;
20
+ r.reserve(inputs.size());
21
+ for(const auto & input : inputs)
22
+ r.push_back(fn(input));
23
+ return r;
24
+ }
25
+
26
+ // C++ forbids taking an address of a constructor, so here's a workaround...
27
+ // Overload for constructor (R) application
28
+ template<typename R, typename T>
29
+ inline std::vector<R> fmap(const T& inputs) {
30
+ std::vector<R> r;
31
+ r.reserve(inputs.size());
32
+ for(auto & input : inputs)
33
+ r.push_back(R(input));
34
+ return r;
35
+ }
36
+
37
+ template<typename F, typename T>
38
+ inline std::vector<T> filter(at::ArrayRef<T> inputs, const F& fn) {
39
+ std::vector<T> r;
40
+ r.reserve(inputs.size());
41
+ for(auto & input : inputs) {
42
+ if (fn(input)) {
43
+ r.push_back(input);
44
+ }
45
+ }
46
+ return r;
47
+ }
48
+
49
+ template<typename F, typename T>
50
+ inline std::vector<T> filter(const std::vector<T>& inputs, const F& fn) {
51
+ return filter<F, T>(static_cast<at::ArrayRef<T>>(inputs), fn);
52
+ }
53
+
54
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/core/GradMode.h>
5
+
6
+ namespace at {
7
+ using GradMode = c10::GradMode;
8
+ using AutoGradMode = c10::AutoGradMode;
9
+ using NoGradGuard = c10::NoGradGuard;
10
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h ADDED
@@ -0,0 +1,1555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/DimVector.h>
4
+ #include <ATen/core/TensorBody.h>
5
+ #include <ATen/core/blob.h>
6
+ #include <ATen/core/custom_class.h>
7
+ #include <ATen/core/ivalue_to.h>
8
+ #include <ATen/core/jit_type_base.h>
9
+ #include <ATen/core/type_factory.h>
10
+ #include <c10/core/SymBool.h>
11
+ #include <c10/core/SymFloat.h>
12
+ #include <c10/macros/Export.h>
13
+ #include <c10/util/MaybeOwned.h>
14
+ #include <c10/util/intrusive_ptr.h>
15
+ #include <type_traits>
16
+ #include <typeindex>
17
+ #include <unordered_map>
18
+ #include <unordered_set>
19
+ #include <utility>
20
+
21
+ namespace torch {
22
+ class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
23
+ namespace jit {
24
+ using ::torch::CustomClassHolder;
25
+ struct Function;
26
+ struct CompilationUnit;
27
+ struct Module;
28
+ } // namespace jit
29
+ } // namespace torch
30
+ namespace c10 {
31
+ template <class Key, class Value>
32
+ class Dict;
33
+ template <class T>
34
+ class List;
35
+ template <class T>
36
+ class IListRef;
37
+ struct IValue;
38
+ struct ClassType;
39
+ struct Type;
40
+ class RRefInterface;
41
+
42
+ struct ClassType;
43
+ using ClassTypePtr = std::shared_ptr<ClassType>;
44
+
45
+ TORCH_API bool _fastEqualsForContainer(const IValue& lhs, const IValue& rhs);
46
+
47
+ TORCH_API torch::jit::Function* checkObjectSortSchema(
48
+ const c10::ClassTypePtr& t,
49
+ std::stringstream& why_not);
50
+
51
+ // A comparator that checks ordering of two IValues of same type.
52
+ typedef std::function<bool(const IValue& a, const IValue& b)> IValueComparator;
53
+
54
+ TORCH_API IValueComparator getLessThanComparator(const IValue& v);
55
+ TORCH_API IValueComparator getGreaterThanComparator(const IValue& v);
56
+
57
+ namespace ivalue {
58
+ struct Tuple;
59
+ struct Future;
60
+ struct Await;
61
+ struct ConstantString;
62
+ struct GenericDict;
63
+ struct Object;
64
+ struct PyObjectHolder;
65
+ struct EnumHolder;
66
+ // We need a ComplexHolder because currently the payloads in the Union
67
+ // only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big
68
+ // to fit in the IValue directly, we indirect complex numbers through an
69
+ // intrusive pointer to ComplexHolder (which contains a c10::complex).
70
+ struct ComplexHolder : c10::intrusive_ptr_target {
71
+ public:
72
+ template <typename T>
73
+ ComplexHolder(c10::complex<T> c) {
74
+ val = convert<decltype(val), c10::complex<T>>(c);
75
+ }
76
+ ComplexHolder() = default;
77
+ c10::complex<double> val;
78
+ };
79
+
80
+ // Similar to ComplexHolder, for StreamData3
81
+ struct StreamData3Holder : c10::intrusive_ptr_target {
82
+ public:
83
+ StreamData3Holder(struct c10::StreamData3 d) : val(d) {}
84
+ StreamData3Holder() = delete;
85
+ struct c10::StreamData3 val;
86
+ };
87
+
88
+ } // namespace ivalue
89
+
90
+ // This is an owning wrapper for a c10::optional<std::vector<T>>
91
+ // that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>.
92
+ // Its purpose is to be used in generated code to keep the vector alive
93
+ // either until the end of a statement (as a temporary), or as a saved arg
94
+ // in autograd.
95
+ template <typename T>
96
+ struct OptionalArray {
97
+ c10::optional<std::vector<T>> list;
98
+
99
+ OptionalArray() = default;
100
+ OptionalArray(std::vector<T> val) : list(std::move(val)) {}
101
+
102
+ // Used when saving an argument for the backwards pass.
103
+ OptionalArray& operator=(c10::optional<ArrayRef<T>> ref) {
104
+ if (ref) {
105
+ list = std::vector<T>(ref->begin(), ref->end());
106
+ } else {
107
+ list = nullopt;
108
+ }
109
+ return *this;
110
+ }
111
+
112
+ // Used when saving an argument for the backwards pass.
113
+ OptionalArray& operator=(c10::OptionalArrayRef<T> ref) {
114
+ if (ref) {
115
+ list = std::vector<T>(ref->begin(), ref->end());
116
+ } else {
117
+ list = nullopt;
118
+ }
119
+ return *this;
120
+ }
121
+
122
+ operator c10::optional<c10::ArrayRef<T>>() {
123
+ if (!list) {
124
+ return nullopt;
125
+ }
126
+ return *list;
127
+ }
128
+
129
+ operator c10::OptionalArrayRef<T>() {
130
+ if (!list) {
131
+ return nullopt;
132
+ }
133
+ return *list;
134
+ }
135
+ };
136
+
137
+ // Capsule is an internal implementation detail of custom C++ classes. We
138
+ // define it as an owning wrapper for
139
+ // c10::intrusive_ptr<torch::CustomClassHolder> This wrapper is here to serve as
140
+ // an abstraction of the type erased custom class object pointer. It also allow
141
+ // pybind11 to treat this as a standalone class to register as a separate type
142
+ // caster, instead of a custom pointer holder which the pointer holder type
143
+ // caster try to "unwrap" it automatically.
144
+ struct Capsule {
145
+ c10::intrusive_ptr<torch::CustomClassHolder> obj_ptr;
146
+ explicit Capsule(c10::intrusive_ptr<torch::CustomClassHolder> ptr)
147
+ : obj_ptr(std::move(ptr)) {}
148
+ };
149
+
150
+ // IValue is the generic tagged union used by the interpreter to hold
151
+ // all value types.
152
+ // It is a 16-byte object with an 8-byte payload and an 8-byte tag.
153
+ // The tag is currently 4 bytes to determine the type, and 1 byte
154
+ // to mark whether that type is a subtype of c10::intrusive_ptr_target and needs
155
+ // retain/release calls.
156
+
157
+ #define TORCH_FORALL_TAGS(_) \
158
+ _(None) \
159
+ _(Tensor) \
160
+ _(Storage) \
161
+ _(Double) \
162
+ _(ComplexDouble) \
163
+ _(Int) \
164
+ _(SymInt) \
165
+ _(SymFloat) \
166
+ _(SymBool) \
167
+ _(Bool) \
168
+ _(Tuple) \
169
+ _(String) \
170
+ _(Blob) \
171
+ _(GenericList) \
172
+ _(GenericDict) \
173
+ _(Future) \
174
+ _(Await) \
175
+ _(Device) \
176
+ _(Stream) \
177
+ _(Object) \
178
+ _(PyObject) \
179
+ _(Uninitialized) \
180
+ _(Capsule) \
181
+ _(RRef) \
182
+ _(Quantizer) \
183
+ _(Generator) \
184
+ _(Enum)
185
+
186
+ // [doxygen private]
187
+ // These methods are not actually private but we don't want to document them, so
188
+ // they are marked `@private`, which hides them on the doxygen documentation for
189
+ // this page.
190
+
191
+ /// IValue (Interpreter Value) is a tagged union over the types
192
+ /// supported by the TorchScript interpreter. IValues contain their
193
+ /// values as an `IValue::Payload`, which holds primitive types
194
+ /// (`int64_t`, `bool`, `double`, `Device`) and `Tensor` as values,
195
+ /// and all other types as a `c10::intrusive_ptr`. In order to
196
+ /// optimize performance of the destructor and related operations by
197
+ /// making the `Tensor` and `c10::intrusive_ptr` paths generate the
198
+ /// same code, we represent a null `c10::intrusive_ptr` as
199
+ /// `UndefinedTensorImpl::singleton()`, *not* `nullptr`.
200
+ ///
201
+ /// IValues are used as inputs to and outputs from the TorchScript interpreter.
202
+ /// To retrieve the value contained within an IValue, use the `.toX()` methods,
203
+ /// where `X` is the type you are trying to get. Note that neither the `.toX()`
204
+ /// methods nor the templated `.to<T>` functions do any kind of casting, they
205
+ /// only unwrap the contained value. For example:
206
+ ///
207
+ /// \rst
208
+ /// .. code-block:: cpp
209
+ ///
210
+ /// // Make the IValue
211
+ /// torch::IValue my_ivalue(26);
212
+ /// std::cout << my_ivalue << "\n";
213
+ ///
214
+ /// // Unwrap the IValue
215
+ /// int64_t my_int = my_ivalue.toInt();
216
+ /// std::cout << my_int << "\n";
217
+ ///
218
+ /// // This will throw an error!
219
+ /// // `my_ivalue` is tagged as an int and cannot be used as another type
220
+ /// torch::Tensor my_tensor = my_ivalue.toTensor();
221
+ /// \endrst
222
+ struct TORCH_API IValue final {
223
+ IValue(const IValue& rhs) : IValue(rhs.payload, rhs.tag) {
224
+ if (isIntrusivePtr() &&
225
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
226
+ c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
227
+ }
228
+ }
229
+
230
+ IValue(IValue&& rhs) noexcept : tag(rhs.tag) {
231
+ moveFrom(std::move(rhs));
232
+ }
233
+
234
+ /// @private [doxygen private]
235
+ ~IValue() {
236
+ destroy();
237
+ }
238
+
239
+ C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept {
240
+ if (&rhs == this) {
241
+ return *this;
242
+ }
243
+
244
+ destroy();
245
+ moveFrom(std::move(rhs));
246
+ return *this;
247
+ }
248
+
249
+ IValue& operator=(IValue const& rhs) & {
250
+ *this = IValue(rhs);
251
+ return *this;
252
+ }
253
+
254
+ void dump() const;
255
+
256
+ /**
257
+ * Equality comparison. The semantics are the same as Python's `==`:
258
+ * 1. Numerical types are compared by value.
259
+ * 2. Tensors compute element-wise equality, returning a BoolTensor (see:
260
+ * `torch.eq()`)
261
+ * 3. Strings are compared by value.
262
+ * 4. Sequence types (list, tuple) are compared lexicographically by
263
+ * comparing their elements. Different sequence types never compare equal.
264
+ * 5. Mappings (dict) must have equal (key, value) pairs.
265
+ * 6. If not listed above, the default behavior for is to test identity
266
+ * equality (e.g. pointer equality).
267
+ *
268
+ * Why does this return an IValue instead of a bool? Because in PyTorch,
269
+ * `tensor1 == tensor2` returns a `BoolTensor`, not a bool.
270
+ *
271
+ * NOTE: we (like Python) assume that identity equality implies value equality
272
+ * for efficiency.
273
+ * TODO: need to support customizing equality
274
+ */
275
+ IValue equals(const IValue& rhs) const;
276
+ /**
277
+ * This implements the same semantics as `bool(lhs == rhs)` in Python. which
278
+ * is the same as `equals()` except for Tensor types.
279
+ */
280
+ TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs);
281
+ TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs);
282
+
283
+ /**
284
+ * Identity comparison. Checks if `this` is the same object as `rhs`. The
285
+ * semantics are the same as Python's `is` operator.
286
+ *
287
+ * NOTE: Like in Python, this operation is poorly defined for primitive types
288
+ * like numbers and strings. Prefer to use `==` unless you really want to
289
+ * check identity equality.
290
+ */
291
+ bool is(const IValue& rhs) const;
292
+
293
+ /**
294
+ * Hashing for IValues. Returns an IValue-boxed int.
295
+ *
296
+ * Some notes:
297
+ * - Like eager, Tensors are hashed by looking at the pointer. This is not
298
+ * strictly correct because two value-equal tensors with different tensor
299
+ * pointers will hash differently, but we choose to reproduce the eager
300
+ * semantics.
301
+ * - Hashing is not defined on all built-in IValue types (e.g. list and
302
+ * dict), following Python. Calling `hash()` on these types will throw.
303
+ */
304
+ IValue hash() const {
305
+ return (int64_t)IValue::hash(*this);
306
+ }
307
+ // This is defined because `c10::hash` dispatches to a function of this
308
+ // signature. See the member function `hash()`.
309
+ static size_t hash(const IValue& iv);
310
+
311
+ /**
312
+ * @private [doxygen private]
313
+ * [container equality]
314
+ * This is an equality implementation that assumes objects with the same
315
+ * identity equal themselves, for efficiency reasons. We primarily have this
316
+ * for consistency, because Python does the same thing. This actually
317
+ * provokes user-visible changes in behavior due to quirks in torch:
318
+ * [tensor1] == [tensor1] -> True (because container equality will first
319
+ * compare identity) [tensor1] == [tensor1_copy] -> RuntimeError:
320
+ * Boolean value of Tensor with more than one value is ambiguous
321
+ */
322
+ TORCH_API friend bool _fastEqualsForContainer(
323
+ const IValue& lhs,
324
+ const IValue& rhs);
325
+
326
+ private:
327
+ static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) {
328
+ if (a.is_sparse()) {
329
+ return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b);
330
+ }
331
+ if (b.is_sparse()) {
332
+ return isAliasOf(a, b._values()) || isAliasOf(a, b._indices());
333
+ }
334
+ if (a.is_sparse_csr()) {
335
+ return isAliasOf(a.values(), b) || isAliasOf(a.crow_indices(), b) ||
336
+ isAliasOf(a.col_indices(), b);
337
+ }
338
+ if (b.is_sparse_csr()) {
339
+ return isAliasOf(a, b.values()) || isAliasOf(a, b.crow_indices()) ||
340
+ isAliasOf(a, b.col_indices());
341
+ }
342
+
343
+ // Opaque tensors such as the ones constructed by the MKL-DNN backend
344
+ // don't have storage so we just compare their TensorImpls.
345
+ // TODO: Find way to expose alias info for opaque tensors.
346
+ if (!a.has_storage() || !b.has_storage()) {
347
+ return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl();
348
+ }
349
+
350
+ return a.is_alias_of(b);
351
+ }
352
+
353
+ template <typename T>
354
+ bool isListOf() const;
355
+
356
+ public:
357
+ /// @private [doxygen private]
358
+ bool isAliasOf(const IValue& rhs) const {
359
+ if (this->tag != rhs.tag) {
360
+ // Trivially don't alias if the type is different
361
+ return false;
362
+ }
363
+
364
+ // Tensors should be compared based on internal storage
365
+ if (this->isTensor()) {
366
+ return isAliasOf(this->toTensor(), rhs.toTensor());
367
+ }
368
+
369
+ if (!isIntrusivePtr()) {
370
+ // Primitive types don't alias anything
371
+ return false;
372
+ }
373
+
374
+ AT_ASSERT(rhs.isIntrusivePtr());
375
+
376
+ // Other types can be compared by their ptr value
377
+ return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
378
+ }
379
+
380
+ /// @private [doxygen private]
381
+ size_t use_count() const noexcept {
382
+ if (isTensor()) {
383
+ return payload.as_tensor.use_count();
384
+ }
385
+
386
+ if (!isIntrusivePtrLegacyBehavior()) {
387
+ return 1;
388
+ }
389
+
390
+ if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
391
+ return 0;
392
+ }
393
+ return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr);
394
+ }
395
+
396
+ /// @private [doxygen private]
397
+ void swap(IValue& rhs) noexcept {
398
+ if (isTensor() && rhs.isTensor()) {
399
+ std::swap(payload.as_tensor, rhs.payload.as_tensor);
400
+ } else if (isTensor()) {
401
+ at::Tensor t = std::move(payload.as_tensor);
402
+ // As far as I can tell, omitting the usual explicit destructor call
403
+ // is not UB in and of itself, and it's a slight perf win. The
404
+ // destructor is a no-op, because the moved-from Tensor is
405
+ // effectively an intrusive_ptr in the null state, so we don't need
406
+ // the behavior for correctness reasons either. Leaving this
407
+ // explanatory comment, including commented-out destructor call, to
408
+ // make this abundantly clear.
409
+ //
410
+ // payload.as_tensor.~Tensor();
411
+ payload.u = rhs.payload.u;
412
+ new (&rhs.payload.as_tensor) at::Tensor(std::move(t));
413
+ } else if (rhs.isTensor()) {
414
+ rhs.swap(*this);
415
+ return;
416
+ } else {
417
+ std::swap(payload.u, rhs.payload.u);
418
+ }
419
+ std::swap(tag, rhs.tag);
420
+ }
421
+
422
+ // Accessors for subtypes are arranged together below
423
+ // While some of these accessors could be generated through templates,
424
+ // we prefer to write them manually for clarity
425
+
426
+ IValue(at::TensorBase t) : tag(Tag::Tensor) {
427
+ new (&payload.as_tensor) at::Tensor(std::move(t));
428
+ }
429
+ bool isTensor() const {
430
+ return Tag::Tensor == tag;
431
+ }
432
+
433
+ private:
434
+ // Outlined error path so that toTensor() can be inlined.
435
+ [[noreturn]] void reportToTensorTypeError() const;
436
+
437
+ public:
438
+ at::Tensor toTensor() &&;
439
+ at::Tensor& toTensor() &;
440
+ const at::Tensor& toTensor() const&;
441
+ at::TensorImpl* unsafeToTensorImpl() const {
442
+ TORCH_INTERNAL_ASSERT(isTensor());
443
+ return payload.as_tensor.unsafeGetTensorImpl();
444
+ }
445
+
446
+ IValue(at::Storage s) : tag(Tag::Storage) {
447
+ payload.u.as_intrusive_ptr =
448
+ null_to_undefined_tensor(s.unsafeReleaseStorageImpl());
449
+ }
450
+ bool isStorage() const {
451
+ return Tag::Storage == tag;
452
+ }
453
+ c10::Storage toStorage() &&;
454
+ c10::Storage toStorage() const&;
455
+
456
+ const IValue& toIValue() const {
457
+ return *this;
458
+ }
459
+ IValue& toIValue() {
460
+ return *this;
461
+ }
462
+
463
+ /// @private [doxygen private]
464
+ IValue(intrusive_ptr<caffe2::Blob> blob) : tag(Tag::Blob) {
465
+ // TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract
466
+ // and store it as a Tensor instead.
467
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
468
+ }
469
+
470
+ /// @private [doxygen private]
471
+ bool isBlob() const {
472
+ return Tag::Blob == tag;
473
+ }
474
+
475
+ /// @private [doxygen private]
476
+ c10::intrusive_ptr<caffe2::Blob> toBlob() &&;
477
+
478
+ /// @private [doxygen private]
479
+ c10::intrusive_ptr<caffe2::Blob> toBlob() const&;
480
+
481
+ // Capsule. No new callsites of these APIs should
482
+ // be introduced.
483
+ static inline IValue make_capsule(
484
+ intrusive_ptr<torch::CustomClassHolder> blob);
485
+ bool isCapsule() const {
486
+ return Tag::Capsule == tag;
487
+ }
488
+ c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() &&;
489
+ c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() const&;
490
+
491
+ // Custom C++ classes
492
+ template <
493
+ typename T,
494
+ std::enable_if_t<
495
+ std::is_base_of<torch::CustomClassHolder, T>::value,
496
+ int> = 0>
497
+ IValue(intrusive_ptr<T> custom_class);
498
+ bool isCustomClass() const;
499
+ template <typename T>
500
+ c10::intrusive_ptr<T> toCustomClass() &&;
501
+ template <typename T>
502
+ c10::intrusive_ptr<T> toCustomClass() const&;
503
+
504
+ // Tuple
505
+ IValue(c10::intrusive_ptr<ivalue::Tuple> v);
506
+
507
+ template <
508
+ typename... Args,
509
+ std::enable_if_t<
510
+ !std::disjunction<
511
+ std::is_lvalue_reference<Args>...,
512
+ std::negation<std::is_constructible<IValue, Args>>...>::value,
513
+ std::nullptr_t> = nullptr>
514
+ IValue(const std::tuple<Args...>& t);
515
+ template <
516
+ typename... Args,
517
+ std::enable_if_t<
518
+ !std::disjunction<
519
+ std::is_lvalue_reference<Args>...,
520
+ std::negation<std::is_constructible<IValue, Args>>...>::value,
521
+ std::nullptr_t> = nullptr>
522
+ IValue(std::tuple<Args...>&& t);
523
+ bool isTuple() const {
524
+ return Tag::Tuple == tag;
525
+ }
526
+ c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
527
+ c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
528
+ C10_NODISCARD ivalue::Tuple& toTupleRef() const;
529
+
530
+ // Double
531
+ IValue(double d) : tag(Tag::Double) {
532
+ payload.u.as_double = d;
533
+ }
534
+ bool isDouble() const {
535
+ return Tag::Double == tag;
536
+ }
537
+ double toDouble() const {
538
+ AT_ASSERT(isDouble());
539
+ return payload.u.as_double;
540
+ }
541
+
542
+ // ComplexDouble
543
+ template <typename T>
544
+ IValue(c10::complex<T> c);
545
+ bool isComplexDouble() const {
546
+ return Tag::ComplexDouble == tag;
547
+ }
548
+ c10::complex<double> toComplexDouble() const;
549
+
550
+ // Future
551
+ IValue(c10::intrusive_ptr<ivalue::Future> v);
552
+ bool isFuture() const {
553
+ return Tag::Future == tag;
554
+ }
555
+ c10::intrusive_ptr<ivalue::Future> toFuture() &&;
556
+ c10::intrusive_ptr<ivalue::Future> toFuture() const&;
557
+
558
+ IValue(c10::intrusive_ptr<ivalue::Await> v);
559
+ bool isAwait() const {
560
+ return Tag::Await == tag;
561
+ }
562
+ c10::intrusive_ptr<ivalue::Await> toAwait() &&;
563
+ c10::intrusive_ptr<ivalue::Await> toAwait() const&;
564
+
565
+ // RRef
566
+ IValue(c10::intrusive_ptr<c10::RRefInterface> v);
567
+ bool isRRef() const {
568
+ return Tag::RRef == tag;
569
+ }
570
+ c10::intrusive_ptr<c10::RRefInterface> toRRef() &&;
571
+ c10::intrusive_ptr<c10::RRefInterface> toRRef() const&;
572
+
573
+ // Quantizer
574
+ IValue(c10::intrusive_ptr<at::Quantizer> v);
575
+ bool isQuantizer() const {
576
+ return Tag::Quantizer == tag;
577
+ }
578
+ c10::intrusive_ptr<at::Quantizer> toQuantizer() &&;
579
+ c10::intrusive_ptr<at::Quantizer> toQuantizer() const&;
580
+
581
+ // Int
582
+ IValue(int64_t i) : tag(Tag::Int) {
583
+ payload.u.as_int = i;
584
+ }
585
+
586
+ IValue(const c10::SymInt& i) {
587
+ if (auto mi = i.maybe_as_int()) {
588
+ tag = Tag::Int;
589
+ payload.u.as_int = *mi;
590
+ } else {
591
+ tag = Tag::SymInt;
592
+ payload.u.as_intrusive_ptr = i.toSymNode().release();
593
+ }
594
+ }
595
+
596
+ bool isSymInt() const {
597
+ return Tag::SymInt == tag;
598
+ }
599
+
600
+ c10::SymInt toSymInt() &&;
601
+ c10::SymInt toSymInt() const&;
602
+
603
+ IValue(const c10::SymFloat& i) {
604
+ if (i.is_symbolic()) {
605
+ tag = Tag::SymFloat;
606
+ payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
607
+ } else {
608
+ tag = Tag::Double;
609
+ payload.u.as_double = i.as_float_unchecked();
610
+ }
611
+ }
612
+
613
+ bool isSymFloat() const {
614
+ return Tag::SymFloat == tag;
615
+ }
616
+
617
+ c10::SymFloat toSymFloat() &&;
618
+ c10::SymFloat toSymFloat() const&;
619
+
620
+ IValue(const c10::SymBool& i) {
621
+ if (auto mi = i.maybe_as_bool()) {
622
+ tag = Tag::Bool;
623
+ payload.u.as_int = *mi;
624
+ } else {
625
+ tag = Tag::SymBool;
626
+ payload.u.as_intrusive_ptr = i.toSymNodeImpl().release();
627
+ }
628
+ }
629
+
630
+ bool isSymBool() const {
631
+ return Tag::SymBool == tag;
632
+ }
633
+
634
+ c10::SymBool toSymBool() &&;
635
+ c10::SymBool toSymBool() const&;
636
+
637
+ // allow you to pass literals (3, 4) without ambiguity
638
+ IValue(int32_t i) : IValue(static_cast<int64_t>(i)) {}
639
+
640
+ bool isInt() const {
641
+ return Tag::Int == tag;
642
+ }
643
+
644
+ int64_t toInt() const {
645
+ AT_ASSERT(isInt());
646
+ return payload.u.as_int;
647
+ }
648
+
649
+ // Bool
650
+ IValue(bool b) : tag(Tag::Bool) {
651
+ #if defined(__clang__) && defined(__x86_64__)
652
+ // Initializing entire payload stops valgrind's from reporting
653
+ // "jump or move depends on uninitialised value" in IValue copy constructor
654
+ // See https://github.com/pytorch/pytorch/issues/37117
655
+ payload.u.as_int = b;
656
+ #else
657
+ payload.u.as_bool = b;
658
+ #endif
659
+ }
660
+ bool isBool() const {
661
+ return Tag::Bool == tag;
662
+ }
663
+ bool toBool() const {
664
+ AT_ASSERT(isBool());
665
+ return payload.u.as_bool;
666
+ }
667
+
668
+ // IntList
669
+ bool isIntList() const;
670
+ bool isSymIntList() const;
671
+ c10::List<int64_t> toIntList() &&;
672
+ c10::List<int64_t> toIntList() const&;
673
+ std::vector<int64_t> toIntVector() const;
674
+ std::vector<c10::SymInt> toSymIntVector() const;
675
+ at::DimVector toDimVector() const;
676
+
677
+ // ConstantString
678
+ IValue(c10::intrusive_ptr<ivalue::ConstantString> v);
679
+ IValue(std::string v);
680
+ IValue(const char* v) : IValue(std::string(v)) {}
681
+ IValue(c10::string_view v) : IValue(std::string(v)){};
682
+ bool isString() const {
683
+ return Tag::String == tag;
684
+ }
685
+ c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
686
+ c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
687
+ const std::string& toStringRef() const;
688
+ c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
689
+ const;
690
+ c10::string_view toStringView() const;
691
+
692
+ // DoubleList
693
+ bool isDoubleList() const;
694
+ c10::List<double> toDoubleList() &&;
695
+ c10::List<double> toDoubleList() const&;
696
+ std::vector<double> toDoubleVector() const;
697
+
698
+ // ComplexDoubleList
699
+ bool isComplexDoubleList() const;
700
+ c10::List<c10::complex<double>> toComplexDoubleList() &&;
701
+ c10::List<c10::complex<double>> toComplexDoubleList() const&;
702
+ std::vector<c10::complex<double>> toComplexDoubleVector() const;
703
+
704
+ // BoolList
705
+ bool isBoolList() const;
706
+ c10::List<bool> toBoolList() &&;
707
+ c10::List<bool> toBoolList() const&;
708
+
709
+ // TensorList
710
+ bool isTensorList() const;
711
+ c10::List<at::Tensor> toTensorList() &&;
712
+ c10::List<at::Tensor> toTensorList() const&;
713
+ std::vector<at::Tensor> toTensorVector() const;
714
+
715
+ // OptionalTensorList
716
+ bool isOptionalTensorList() const;
717
+ c10::List<c10::optional<at::Tensor>> toOptionalTensorList() &&;
718
+ c10::List<c10::optional<at::Tensor>> toOptionalTensorList() const&;
719
+ std::vector<c10::optional<at::Tensor>> toOptionalTensorVector() const;
720
+
721
+ // GenericList
722
+ IValue(c10::List<IValue> v);
723
+ bool isList() const {
724
+ return Tag::GenericList == tag;
725
+ }
726
+ c10::List<IValue> toList() &&;
727
+ c10::List<IValue> toList() const&;
728
+ c10::ArrayRef<IValue> toListRef() const;
729
+
730
+ // Some template constructors of IValue calls another constructor recursively.
731
+ // This SFINAEs the called constructor exists.
732
+ template <class T>
733
+ using enable_if_ivalue_constructible =
734
+ std::enable_if_t<std::is_constructible<IValue, T>::value, std::nullptr_t>;
735
+
736
+ // The rule for lists is more complicated; the generic constructor is only
737
+ // acceptable if your element isn't SymInt. If you do have a SymInt element,
738
+ // then you must also, at construction time, check if you can decay the list
739
+ // into an int list (this is MANDATORY, as at a use site we may expect
740
+ // toIntList to work even if at the call site you had a SymIntArrayRef
741
+ // argument). In practice, only SymIntArrayRef is used this way, so we
742
+ // didn't bother making it work for the other constructors, we just make sure
743
+ // they're not selectable.
744
+ template <class T>
745
+ using enable_if_list_is_ivalue_constructible = std::enable_if_t<
746
+ std::is_constructible<IValue, T>::value &&
747
+ !std::is_same<T, c10::SymInt>::value,
748
+ std::nullptr_t>;
749
+
750
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
751
+ IValue(c10::List<T>&& v);
752
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
753
+ IValue(const c10::List<T>& v);
754
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
755
+ IValue(at::ArrayRef<T> v);
756
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
757
+ IValue(const std::vector<T>& v);
758
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
759
+ IValue(std::vector<T>&& v);
760
+ template <class T, size_t N>
761
+ IValue(std::array<T, N> v);
762
+
763
+ // Manual constructors for lists of symints, which decay to int list if
764
+ // possible. To avoid ambiguous overload situations, we template them
765
+ // to prevent implicit conversions
766
+ template <class T>
767
+ using enable_if_symint =
768
+ std::enable_if_t<std::is_same<T, c10::SymInt>::value, std::nullptr_t>;
769
+
770
+ template <class T, enable_if_symint<T> = nullptr>
771
+ IValue(at::ArrayRef<T> v);
772
+ template <class T, enable_if_symint<T> = nullptr>
773
+ IValue(at::OptionalArrayRef<T> v);
774
+ template <class T, enable_if_symint<T> = nullptr>
775
+ IValue(const std::vector<T>& v);
776
+ template <class T, enable_if_symint<T> = nullptr>
777
+ IValue(std::vector<T>&& v);
778
+
779
+
780
+ template <class T>
781
+ using enable_if_ilist_is_ivalue_constructible = std::enable_if_t<
782
+ std::is_constructible<IValue, T>::value &&
783
+ std::is_constructible<IValue, typename IListRef<T>::boxed_type>::
784
+ value &&
785
+ !std::is_same<T, c10::SymInt>::value,
786
+ std::nullptr_t>;
787
+
788
+ template <class T, enable_if_ilist_is_ivalue_constructible<T> = nullptr>
789
+ IValue(c10::IListRef<T> v);
790
+
791
+ // GenericDict
792
+ IValue(c10::Dict<IValue, IValue> v);
793
+ bool isGenericDict() const {
794
+ return Tag::GenericDict == tag;
795
+ }
796
+ c10::Dict<IValue, IValue> toGenericDict() &&;
797
+ c10::Dict<IValue, IValue> toGenericDict() const&;
798
+
799
+ template <class Key, class Value>
800
+ IValue(c10::Dict<Key, Value> v);
801
+
802
+ template <class Key, class Value>
803
+ /// \cond
804
+ /// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN
805
+ C10_DEPRECATED_MESSAGE(
806
+ "IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")
807
+ /// \endcond
808
+ IValue(std::unordered_map<Key, Value> v);
809
+
810
+ template <class T, enable_if_ivalue_constructible<T> = nullptr>
811
+ IValue(c10::optional<T> v);
812
+ template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
813
+ IValue(c10::OptionalArrayRef<T> v);
814
+ IValue(c10::nullopt_t);
815
+
816
+ // ClassType
817
+ IValue(c10::intrusive_ptr<ivalue::Object> v);
818
+ bool isObject() const {
819
+ return tag == Tag::Object;
820
+ }
821
+ c10::intrusive_ptr<ivalue::Object> toObject() &&;
822
+ c10::intrusive_ptr<ivalue::Object> toObject() const&;
823
+ ivalue::Object& toObjectRef() const;
824
+
825
+ torch::jit::Module toModule() const;
826
+ bool isModule() const;
827
+
828
+ // PyObject
829
+ IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v);
830
+ bool isPyObject() const {
831
+ return tag == Tag::PyObject;
832
+ }
833
+ c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() &&;
834
+ c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() const&;
835
+ PyObject* toPyObject() const;
836
+
837
+ // Enum
838
+ explicit IValue(c10::intrusive_ptr<ivalue::EnumHolder> v);
839
+ bool isEnum() const {
840
+ return tag == Tag::Enum;
841
+ }
842
+ c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() &&;
843
+ c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() const&;
844
+
845
+ // None
846
+ IValue() : tag(Tag::None) {}
847
+ bool isNone() const {
848
+ return Tag::None == tag;
849
+ }
850
+ std::string toNone() const {
851
+ AT_ASSERT(isNone());
852
+ return "None";
853
+ }
854
+
855
+ static IValue uninitialized() {
856
+ auto i = IValue();
857
+ i.tag = Tag::Uninitialized;
858
+ return i;
859
+ }
860
+
861
+ // Scalar, which gets encoded as either an Int, a Double or a ComplexDouble
862
+ IValue(const at::Scalar& s) : IValue() {
863
+ // NB: do the symbolic versions first, as isFloatingPoint is true
864
+ // for both SymFloat and double
865
+ if (s.isSymInt()) {
866
+ tag = Tag::SymInt;
867
+ payload.u.as_intrusive_ptr = s.toSymInt().toSymNode().release();
868
+ } else if (s.isSymFloat()) {
869
+ tag = Tag::SymFloat;
870
+ payload.u.as_intrusive_ptr = s.toSymFloat().toSymNodeImpl().release();
871
+ } else if (s.isSymBool()) {
872
+ tag = Tag::SymBool;
873
+ payload.u.as_intrusive_ptr = s.toSymBool().toSymNodeImpl().release();
874
+ } else if (s.isFloatingPoint()) {
875
+ tag = Tag::Double;
876
+ payload.u.as_double = s.toDouble();
877
+ } else if (s.isComplex()) {
878
+ *this = s.toComplexDouble();
879
+ } else if (s.isBoolean()) {
880
+ tag = Tag::Bool;
881
+ payload.u.as_bool = s.toBool();
882
+ } else {
883
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
884
+ s.isIntegral(false), "Unknown type in Scalar");
885
+ tag = Tag::Int;
886
+ payload.u.as_int = s.toLong();
887
+ }
888
+ }
889
+
890
+ bool isScalar() const {
891
+ return isDouble() || isInt() || isComplexDouble() || isBool() ||
892
+ isSymInt() || isSymFloat() || isSymBool();
893
+ }
894
+
895
+ at::Scalar toScalar() const {
896
+ if (isDouble())
897
+ return toDouble();
898
+ else if (isInt())
899
+ return toInt();
900
+ else if (isComplexDouble())
901
+ return toComplexDouble();
902
+ else if (isBool())
903
+ return toBool();
904
+ else if (isSymInt())
905
+ return toSymInt();
906
+ else if (isSymFloat())
907
+ return toSymFloat();
908
+ else if (isSymBool())
909
+ return toSymBool();
910
+ throw std::runtime_error("IValue is not a Scalar");
911
+ }
912
+
913
+ // Device
914
+ IValue(c10::Device d) : tag(Tag::Device) {
915
+ payload.u.as_device.type = d.type();
916
+ payload.u.as_device.index = d.index();
917
+ }
918
+ bool isDevice() const {
919
+ return Tag::Device == tag;
920
+ }
921
+ c10::Device toDevice() const {
922
+ AT_ASSERT(isDevice());
923
+ return c10::Device(payload.u.as_device.type, payload.u.as_device.index);
924
+ }
925
+
926
+ // Stream
927
+ IValue(c10::Stream s) : tag(Tag::Stream) {
928
+ auto v = c10::make_intrusive<ivalue::StreamData3Holder>(s.pack3());
929
+ payload.u.as_intrusive_ptr = v.release();
930
+ }
931
+ c10::Stream toStream() &&;
932
+ c10::Stream toStream() const&;
933
+ bool isStream() const {
934
+ return Tag::Stream == tag;
935
+ }
936
+
937
+ // ScalarType
938
+ IValue(ScalarType t)
939
+ : IValue(static_cast<std::underlying_type<ScalarType>::type>(t)) {}
940
+ at::ScalarType toScalarType() const {
941
+ return static_cast<at::ScalarType>(toInt());
942
+ }
943
+
944
+ // Layout
945
+ IValue(Layout l)
946
+ : IValue(static_cast<std::underlying_type<Layout>::type>(l)) {}
947
+ at::Layout toLayout() const {
948
+ return static_cast<at::Layout>(toInt());
949
+ }
950
+
951
+ // MemoryFormat
952
+ IValue(MemoryFormat m)
953
+ : IValue(static_cast<std::underlying_type<MemoryFormat>::type>(m)) {}
954
+ at::MemoryFormat toMemoryFormat() const {
955
+ return static_cast<at::MemoryFormat>(toInt());
956
+ }
957
+
958
+ // QScheme
959
+ IValue(at::QScheme qscheme) : tag(Tag::Int) {
960
+ payload.u.as_int = static_cast<int64_t>(qscheme);
961
+ }
962
+
963
+ at::QScheme toQScheme() const {
964
+ return static_cast<at::QScheme>(toInt());
965
+ }
966
+
967
+ // Dimname
968
+ IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {}
969
+
970
+ at::Dimname toDimname() const {
971
+ return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef()));
972
+ }
973
+
974
+ // Generator
975
+ IValue(at::Generator g) : tag(Tag::Generator) {
976
+ payload.u.as_intrusive_ptr =
977
+ null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl());
978
+ }
979
+ bool isGenerator() const {
980
+ return Tag::Generator == tag;
981
+ }
982
+ at::Generator toGenerator() &&;
983
+ at::Generator toGenerator() const&;
984
+
985
+ // for debugging
986
+ std::string tagKind() const {
987
+ switch (tag) {
988
+ #define DEFINE_CASE(x) \
989
+ case Tag::x: \
990
+ return #x;
991
+ TORCH_FORALL_TAGS(DEFINE_CASE)
992
+ #undef DEFINE_CASE
993
+ }
994
+ return "InvalidTag(" + std::to_string(static_cast<int>(tag)) + ")";
995
+ }
996
+
997
+ // generic v.to<at::Tensor>() implementations
998
+ // that can be used in special functions like pop/push
999
+ // that use template meta-programming.
1000
+ // prefer the directly named methods when you can,
1001
+ // since they are simpler to understand
1002
+
1003
+ // Note: if you get linker errors saying one of these is missing,
1004
+ // change it to ... && = delete; and you will see better error messages for
1005
+ // why However, we cannot commit this because some compiler versions barf on
1006
+ // it.
1007
+ template <typename T>
1008
+ T to() &&;
1009
+ template <typename T>
1010
+ typename c10::detail::ivalue_to_const_ref_overload_return<T>::type to()
1011
+ const&;
1012
+
1013
+ // ToOptional: convert a IValue to the Optional obj that accepts both T and
1014
+ // None
1015
+ template <typename T>
1016
+ optional<T> toOptional();
1017
+ template <typename T>
1018
+ optional<T> toOptional() const;
1019
+
1020
+ /// @private [doxygen private]
1021
+ /// this is a shallow comparison of two IValues to test the object identity
1022
+ bool isSameIdentity(const IValue& rhs) const;
1023
+
1024
+ // Computes the "official" string representation of an IValue. This produces a
1025
+ // TorchScript expression that can be used to recreate an IValue with the same
1026
+ // value (e.g. when we are printing constants in the serializer).
1027
+ //
1028
+ // Callers can use `customFormatter` to override how `repr()` prints out an
1029
+ // IValue. This is useful if you have some other environment where you can
1030
+ // look up values, and you want to print a reference to that environment (like
1031
+ // the serializer's constant table).
1032
+ //
1033
+ // repr() is not necessarily defined on all objects!
1034
+ std::ostream& repr(
1035
+ std::ostream& stream,
1036
+ std::function<bool(std::ostream&, const IValue& v)> customFormatter)
1037
+ const;
1038
+
1039
+ // Computes an "informal" string representation of an IValue. This should be
1040
+ // used for debugging, or servicing `print()`-like functions.
1041
+ // This is different from `repr()` in that there is no expectation that we can
1042
+ // exactly reconstruct an IValue from the output; feel free to use a
1043
+ // concise/pretty form
1044
+ TORCH_API friend std::ostream& operator<<(std::ostream& out, const IValue& v);
1045
+
1046
+ bool isPtrType() const {
1047
+ if (isTensor()) {
1048
+ return payload.as_tensor.defined();
1049
+ }
1050
+ return isIntrusivePtrLegacyBehavior();
1051
+ }
1052
+
1053
+ /// @private [doxygen private]
1054
+ const void* internalToPointer() const {
1055
+ TORCH_INTERNAL_ASSERT(
1056
+ isPtrType(), "Can only call internalToPointer() for pointer types");
1057
+ if (isTensor()) {
1058
+ return payload.as_tensor.unsafeGetTensorImpl();
1059
+ } else {
1060
+ return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()
1061
+ ? payload.u.as_intrusive_ptr
1062
+ : nullptr;
1063
+ }
1064
+ }
1065
+
1066
+ template <typename T = c10::PlatformType>
1067
+ TypePtr type() const;
1068
+
1069
+ // Detect aliased tensors.
1070
+ struct HashAliasedIValue {
1071
+ size_t hashTensor(const at::Tensor& ten) const {
1072
+ if (ten.is_sparse()) {
1073
+ // COO sparse tensors have a "values" tensor and an "indices" tensor
1074
+ // so this will detect overlap of sparse tensors that share a values
1075
+ // tensor, but not sparse tensors that share an indices tensor.
1076
+ return hashTensor(ten._values());
1077
+ } else if (ten.is_sparse_csr()) {
1078
+ // COO sparse tensors have a "values" tensor and an "indices" tensor
1079
+ // so this will detect overlap of sparse tensors that share a values
1080
+ // tensor, but not sparse tensors that share an indices tensor.
1081
+ return hashTensor(ten.values());
1082
+ } else if (!ten.has_storage()) {
1083
+ // Opaque tensors such as the ones constructed by the MKL-DNN backend
1084
+ // don't have storage so we just use their TensorImpls.
1085
+ // TODO: Find way to expose alias info for opaque tensors.
1086
+ return reinterpret_cast<size_t>(ten.unsafeGetTensorImpl());
1087
+ } else {
1088
+ return reinterpret_cast<size_t>(ten.storage().unsafeGetStorageImpl());
1089
+ }
1090
+ }
1091
+ size_t operator()(const IValue& val) const {
1092
+ if (val.isTensor()) {
1093
+ return hashTensor(val.toTensor());
1094
+ }
1095
+ // If it is not a Tensor, then two mutable IValues alias each other only
1096
+ // if they are the same pointer.
1097
+ return val.payload.u.as_int;
1098
+ }
1099
+ };
1100
+
1101
+ struct CompAliasedIValues {
1102
+ bool operator()(const IValue& lhs, const IValue& rhs) const {
1103
+ return lhs.isAliasOf(rhs);
1104
+ }
1105
+ };
1106
+
1107
+ using HashAliasedIValues =
1108
+ std::unordered_set<IValue, HashAliasedIValue, CompAliasedIValues>;
1109
+ using HashAliasedIValueMap =
1110
+ std::unordered_map<IValue, IValue, HashAliasedIValue, CompAliasedIValues>;
1111
+
1112
+ // Chechs if this and rhs has a subvalues in common.
1113
+ // [t1,t2] and [t2, t3] returns true.
1114
+ bool overlaps(const IValue& rhs) const;
1115
+
1116
+ // Inserts all subvalues of this in subValues.
1117
+ void getSubValues(HashAliasedIValues& subValues) const;
1118
+
1119
+ // Apply visitor to every subvalue.
1120
+ // TODO: There are several places that recurse over IValue. This is fragile.
1121
+ // This visitor should be used to recurse over ivalues.
1122
+ void visit(const std::function<bool(const IValue&)>& visitor) const;
1123
+ IValue deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
1124
+ IValue deepcopy(
1125
+ HashAliasedIValueMap& memo,
1126
+ c10::optional<at::Device> device = c10::nullopt) const;
1127
+
1128
+ private:
1129
+ static c10::intrusive_ptr_target* null_to_undefined_tensor(
1130
+ c10::intrusive_ptr_target* p) {
1131
+ return p ? p
1132
+ : static_cast<c10::intrusive_ptr_target*>(
1133
+ c10::UndefinedTensorImpl::singleton());
1134
+ }
1135
+
1136
+ static bool ptrEqual(const IValue& lhs, const IValue& rhs);
1137
+ // NOTE: IValue tags are intentionally private. In the future we may encode
1138
+ // this value different (e.g. using NaN boxing), and this would make it more
1139
+ // costly to determine the tag for all types vs just determining if something
1140
+ // is a particular type. Instead we want clients to use the `isX` methods when
1141
+ // possible. If for perf. reasons you really, absolutely, must have a jump
1142
+ // table, then we can revisit this.
1143
+ enum class Tag : uint32_t {
1144
+ #define DEFINE_TAG(x) x,
1145
+ TORCH_FORALL_TAGS(DEFINE_TAG)
1146
+ #undef DEFINE_TAG
1147
+ };
1148
+
1149
+ #define COUNT_TAG(x) 1 +
1150
+ static constexpr auto kNumTags = TORCH_FORALL_TAGS(COUNT_TAG) 0;
1151
+ #undef COUNT_TAG
1152
+
1153
+ template <
1154
+ class T,
1155
+ class NullType = c10::detail::intrusive_target_default_null_type<T>>
1156
+ c10::intrusive_ptr<T, NullType> moveToIntrusivePtr();
1157
+ template <
1158
+ typename T,
1159
+ class NullType = c10::detail::intrusive_target_default_null_type<T>>
1160
+ c10::intrusive_ptr<T, NullType> toIntrusivePtr() const;
1161
+
1162
+ void destroy() {
1163
+ // We carefully construct this call to both 1) avoid UB by using
1164
+ // the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable
1165
+ // the compiler to generate the same code for each case. It is
1166
+ // surprisingly difficult to get this right.
1167
+ if (isTensor() || isIntrusivePtr()) {
1168
+ c10::intrusive_ptr_target* p = isTensor()
1169
+ ? payload.as_tensor.unsafeGetTensorImpl()
1170
+ : payload.u.as_intrusive_ptr;
1171
+ c10::intrusive_ptr<intrusive_ptr_target, c10::UndefinedTensorImpl>::
1172
+ reclaim(p);
1173
+ // No need to make this destructor call!
1174
+ // payload.as_tensor.~Tensor();
1175
+ }
1176
+ }
1177
+
1178
+ C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept {
1179
+ if (rhs.isTensor()) {
1180
+ new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
1181
+ // As far as I can tell, omitting the usual explicit destructor call
1182
+ // is not UB in and of itself, and it's a slight perf win. The
1183
+ // destructor is a no-op, because the moved-from Tensor is
1184
+ // effectively an intrusive_ptr in the null state, so we don't need
1185
+ // the behavior for correctness reasons either. Leaving this
1186
+ // explanatory comment, including commented-out destructor call, to
1187
+ // make this abundantly clear.
1188
+ //
1189
+ // rhs.payload.as_tensor.~Tensor();
1190
+ } else {
1191
+ payload.u = rhs.payload.u;
1192
+ }
1193
+ tag = rhs.tag;
1194
+ rhs.clearToNone();
1195
+ }
1196
+
1197
+ void clearToNone() noexcept {
1198
+ payload.u.as_int = 0;
1199
+ tag = Tag::None;
1200
+ }
1201
+
1202
+ private:
1203
+ // This is the source of truth for isIntrusivePtr; edit results here
1204
+ // as needed and isIntrusivePtr will pick them up.
1205
+ // NOLINTBEGIN(bugprone-branch-clone)
1206
+ static constexpr bool isIntrusivePtrConstexpr(Tag tag) {
1207
+ switch (tag) {
1208
+ case Tag::None:
1209
+ return false;
1210
+ case Tag::Tensor:
1211
+ return false;
1212
+ case Tag::Storage:
1213
+ return true;
1214
+ case Tag::Generator:
1215
+ return true;
1216
+ case Tag::Double:
1217
+ return false;
1218
+ case Tag::ComplexDouble:
1219
+ return true;
1220
+ case Tag::Int:
1221
+ return false;
1222
+ case Tag::SymInt:
1223
+ return true;
1224
+ case Tag::SymFloat:
1225
+ return true;
1226
+ case Tag::SymBool:
1227
+ return true;
1228
+ case Tag::Bool:
1229
+ return false;
1230
+ case Tag::Tuple:
1231
+ return true;
1232
+ case Tag::String:
1233
+ return true;
1234
+ case Tag::Blob:
1235
+ return true;
1236
+ case Tag::GenericList:
1237
+ return true;
1238
+ case Tag::GenericDict:
1239
+ return true;
1240
+ case Tag::Future:
1241
+ return true;
1242
+ case Tag::Await:
1243
+ return true;
1244
+ case Tag::Device:
1245
+ return false;
1246
+ case Tag::Stream:
1247
+ return true;
1248
+ case Tag::Object:
1249
+ return true;
1250
+ case Tag::PyObject:
1251
+ return true;
1252
+ case Tag::Uninitialized:
1253
+ return false;
1254
+ case Tag::Capsule:
1255
+ return true;
1256
+ case Tag::RRef:
1257
+ return true;
1258
+ case Tag::Quantizer:
1259
+ return true;
1260
+ case Tag::Enum:
1261
+ return true;
1262
+ }
1263
+ return false;
1264
+ }
1265
+ // NOLINTEND(bugprone-branch-clone)
1266
+
1267
+ public:
1268
+ // Don't edit this just to add results for new tags; edit
1269
+ // isIntrusivePtrConstexpr above.
1270
+ bool isIntrusivePtr() const {
1271
+ // Implementation NOTE: the switch in isIntrusivePtrConstexpr
1272
+ // above is the previous production implementation of this
1273
+ // function. We observed that, at least on x86_64, the generated
1274
+ // instruction sequence was a similar bit vector test to what we
1275
+ // have manually implemented below, except that there was an extra
1276
+ // "bounds check" branch confirming, essentially, that `tag <
1277
+ // kNumTags` and providing a consistent result in that case. We
1278
+ // don't care about the result if tag is out of bounds, so we'd
1279
+ // like to eliminate that comparison and branch; manually
1280
+ // implementing this function as a bit test is the simplest way I
1281
+ // could find to accomplish that elimination.
1282
+ static constexpr uint32_t kTruthTableBitVector =
1283
+ #define TRUTH_TABLE_ENTRY(tag) \
1284
+ (uint32_t(isIntrusivePtrConstexpr(Tag::tag)) << uint32_t(Tag::tag)) |
1285
+ TORCH_FORALL_TAGS(TRUTH_TABLE_ENTRY)
1286
+ #undef TRUTH_TABLE_ENTRY
1287
+ 0;
1288
+
1289
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
1290
+ static_cast<uint32_t>(tag) < kNumTags,
1291
+ "unexpected tag ",
1292
+ static_cast<int>(tag));
1293
+ return kTruthTableBitVector & (1 << (uint32_t(tag) % 32));
1294
+ }
1295
+
1296
+ // Storage and Generator were treated specially when
1297
+ // is_intrusive_ptr was stored as explicit state. This getter
1298
+ // preserves the old behavior for use with WeakIValue for now.
1299
+ bool isIntrusivePtrLegacyBehavior() const {
1300
+ if (tag == Tag::Storage || tag == Tag::Generator) {
1301
+ return payload.u.as_intrusive_ptr !=
1302
+ c10::UndefinedTensorImpl::singleton();
1303
+ } else {
1304
+ return isIntrusivePtr();
1305
+ }
1306
+ }
1307
+
1308
+ union Payload {
1309
+ // [TriviallyCopyablePayload]
1310
+ // We use a nested union here so that we can make the copy easy
1311
+ // and efficient in the non-tensor (i.e., trivially copyable)
1312
+ // case. Specifically, we do not have to do a switch-on-tag to
1313
+ // figure out which union member to assign; we can just use
1314
+ // TriviallyCopyablePayload::operator=.
1315
+ union TriviallyCopyablePayload {
1316
+ TriviallyCopyablePayload() : as_int(0) {}
1317
+ int64_t as_int;
1318
+ double as_double;
1319
+ bool as_bool;
1320
+ // Invariant: never nullptr; null state is represented as
1321
+ // c10::UndefinedTensorImpl::singleton() for consistency of
1322
+ // representation with Tensor.
1323
+ c10::intrusive_ptr_target* as_intrusive_ptr;
1324
+ struct {
1325
+ c10::DeviceType type;
1326
+ DeviceIndex index;
1327
+ } as_device;
1328
+ } u;
1329
+ at::Tensor as_tensor;
1330
+ Payload() : u() {}
1331
+ ~Payload() {}
1332
+ };
1333
+
1334
+ IValue(const Payload& p, Tag t) : tag(t) {
1335
+ if (isTensor()) {
1336
+ new (&payload.as_tensor) at::Tensor(p.as_tensor);
1337
+ } else {
1338
+ payload.u = p.u;
1339
+ }
1340
+ }
1341
+
1342
+ template <typename T>
1343
+ struct TagType {};
1344
+
1345
+ friend MaybeOwnedTraits<IValue>;
1346
+
1347
+ Payload payload;
1348
+ Tag tag{IValue::Tag::None};
1349
+ friend struct WeakIValue;
1350
+ };
1351
+
1352
+ struct TORCH_API WeakIValue final {
1353
+ WeakIValue() = default;
1354
+
1355
+ WeakIValue(const WeakIValue& rhs)
1356
+ : payload(rhs.payload),
1357
+ tag(rhs.tag),
1358
+ is_intrusive_ptr(rhs.is_intrusive_ptr) {
1359
+ if (is_intrusive_ptr &&
1360
+ payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
1361
+ c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
1362
+ }
1363
+ }
1364
+ WeakIValue(const IValue& rhs)
1365
+ : tag(rhs.tag), is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) {
1366
+ if (rhs.isTensor()) {
1367
+ payload.as_intrusive_ptr = rhs.unsafeToTensorImpl();
1368
+ is_intrusive_ptr = true;
1369
+ } else {
1370
+ payload = rhs.payload.u;
1371
+ }
1372
+ if (is_intrusive_ptr) {
1373
+ if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
1374
+ c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
1375
+ }
1376
+ }
1377
+ }
1378
+ WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() {
1379
+ swap(rhs);
1380
+ }
1381
+ ~WeakIValue() {
1382
+ if (is_intrusive_ptr &&
1383
+ payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
1384
+ c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr);
1385
+ }
1386
+ }
1387
+ WeakIValue& operator=(WeakIValue&& rhs) & noexcept {
1388
+ WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None
1389
+ return *this;
1390
+ }
1391
+ WeakIValue& operator=(WeakIValue const& rhs) & {
1392
+ WeakIValue(rhs).swap(*this);
1393
+ return *this;
1394
+ }
1395
+ void swap(WeakIValue& rhs) noexcept {
1396
+ std::swap(payload, rhs.payload);
1397
+ std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
1398
+ std::swap(tag, rhs.tag);
1399
+ }
1400
+
1401
+ bool isSameIdentity(const WeakIValue& rhs) const {
1402
+ return payload.as_int == rhs.payload.as_int && tag == rhs.tag &&
1403
+ is_intrusive_ptr == rhs.is_intrusive_ptr;
1404
+ }
1405
+
1406
+ IValue lock() const {
1407
+ if (!is_intrusive_ptr) {
1408
+ IValue::Payload newPayload;
1409
+ newPayload.u = payload;
1410
+ return IValue(newPayload, tag);
1411
+ }
1412
+ if (IValue::Tag::Tensor == tag) {
1413
+ auto temp =
1414
+ c10::weak_intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl>::
1415
+ reclaim(static_cast<at::TensorImpl*>(payload.as_intrusive_ptr));
1416
+ c10::intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl> ip(
1417
+ temp.lock());
1418
+ temp.release();
1419
+ if (!ip) {
1420
+ return IValue();
1421
+ } else {
1422
+ return IValue(at::Tensor(std::move(ip)));
1423
+ }
1424
+ } else {
1425
+ auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target>::reclaim(
1426
+ payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
1427
+ ? nullptr
1428
+ : payload.as_intrusive_ptr);
1429
+ IValue::Payload pl;
1430
+ pl.u.as_intrusive_ptr = temp.lock().release();
1431
+ temp.release();
1432
+ if (!pl.u.as_intrusive_ptr) {
1433
+ return IValue();
1434
+ } else {
1435
+ return IValue(pl, tag);
1436
+ }
1437
+ }
1438
+ }
1439
+
1440
+ size_t use_count() const noexcept {
1441
+ if (!is_intrusive_ptr) {
1442
+ return 1;
1443
+ }
1444
+ auto temp = c10::weak_intrusive_ptr<
1445
+ c10::intrusive_ptr_target,
1446
+ c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
1447
+ size_t result = temp.use_count();
1448
+ temp.release();
1449
+ return result;
1450
+ }
1451
+
1452
+ size_t weak_use_count() const noexcept {
1453
+ if (!is_intrusive_ptr) {
1454
+ return 1;
1455
+ }
1456
+ auto temp = c10::weak_intrusive_ptr<
1457
+ c10::intrusive_ptr_target,
1458
+ c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr);
1459
+ size_t result = temp.weak_use_count();
1460
+ temp.release();
1461
+ return result;
1462
+ }
1463
+ size_t hash() const {
1464
+ return payload.as_int;
1465
+ }
1466
+
1467
+ private:
1468
+ using Payload = IValue::Payload::TriviallyCopyablePayload;
1469
+ Payload payload;
1470
+ IValue::Tag tag{IValue::Tag::None};
1471
+ bool is_intrusive_ptr{false};
1472
+ };
1473
+
1474
+ // An owning pointer to a type. When the type is class type, it requires a pair
1475
+ // of shared_ptrs to the class type and its owning CU, so that the class type is
1476
+ // guaranteed to stay alive as long as we hold this object.
1477
+ struct TORCH_API StrongTypePtr {
1478
+ StrongTypePtr(std::shared_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
1479
+
1480
+ std::shared_ptr<torch::jit::CompilationUnit> cu_;
1481
+ TypePtr type_;
1482
+ };
1483
+
1484
+ // [Constant Object Weak CompilationUnit Reference]
1485
+ // A non owning pointer to a type. When a class get inserted as a constant
1486
+ // into a graph, if we used a strong pointer we would have a circular reference
1487
+ // from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the
1488
+ // Constant Object)
1489
+ struct TORCH_API WeakTypePtr {
1490
+ WeakTypePtr(std::weak_ptr<torch::jit::CompilationUnit> cu, TypePtr type);
1491
+
1492
+ std::weak_ptr<torch::jit::CompilationUnit> cu_;
1493
+ TypePtr type_;
1494
+ };
1495
+
1496
+ // internal build errors with std::variant :/
1497
+ struct WeakOrStrongCompilationUnit {
1498
+ explicit WeakOrStrongCompilationUnit(
1499
+ std::shared_ptr<torch::jit::CompilationUnit> shared_cu)
1500
+ : strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {}
1501
+
1502
+ explicit WeakOrStrongCompilationUnit(
1503
+ std::weak_ptr<torch::jit::CompilationUnit> weak_cu)
1504
+ : strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {}
1505
+
1506
+ std::shared_ptr<torch::jit::CompilationUnit> getStrongRefOrThrow() const {
1507
+ TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt);
1508
+ return *strong_ptr_;
1509
+ }
1510
+
1511
+ std::weak_ptr<torch::jit::CompilationUnit> getWeakRefOrThrow() const {
1512
+ TORCH_INTERNAL_ASSERT(weak_ptr_ != c10::nullopt);
1513
+ return *weak_ptr_;
1514
+ }
1515
+
1516
+ bool holdingStrongRef() const {
1517
+ return strong_ptr_ != c10::nullopt;
1518
+ }
1519
+
1520
+ bool holdingEmptyStrongRef() const {
1521
+ return holdingStrongRef() && *strong_ptr_ == nullptr;
1522
+ }
1523
+
1524
+ c10::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
1525
+ c10::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
1526
+ };
1527
+
1528
+ // An Object will hold a non-owning Compilation Unit reference if it is a
1529
+ // Constant in the graph and a Owning reference otherwise
1530
+ struct TORCH_API WeakOrStrongTypePtr {
1531
+ explicit WeakOrStrongTypePtr(WeakTypePtr weak)
1532
+ : cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))),
1533
+ type_(std::move(weak.type_)) {}
1534
+ explicit WeakOrStrongTypePtr(StrongTypePtr strong)
1535
+ : cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))),
1536
+ type_(std::move(strong.type_)) {}
1537
+ explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type)
1538
+ : cu_(std::move(cu)), type_(std::move(type)) {}
1539
+ WeakTypePtr asWeakTypePtr() const;
1540
+
1541
+ WeakOrStrongCompilationUnit cu_;
1542
+ TypePtr type_;
1543
+
1544
+ bool holds_strong_ref() const {
1545
+ return cu_.holdingStrongRef();
1546
+ }
1547
+
1548
+ bool holds_empty_strong_ref() const {
1549
+ return cu_.holdingEmptyStrongRef();
1550
+ }
1551
+ };
1552
+
1553
+ } // namespace c10
1554
+
1555
+ #include <ATen/core/ivalue_inl.h> // IWYU pragma: keep
venv/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h ADDED
@@ -0,0 +1,2545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <condition_variable>
4
+ #include <memory>
5
+ #include <type_traits>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/Dict.h>
9
+ #include <ATen/core/List.h>
10
+ #include <ATen/core/IListRef.h>
11
+ #include <ATen/core/functional.h>
12
+ #include <ATen/core/jit_type.h>
13
+ #include <ATen/core/qualified_name.h>
14
+ #include <ATen/core/rref_interface.h>
15
+ #include <ATen/core/symbol.h>
16
+ #include <c10/core/DeviceGuard.h>
17
+ #include <c10/core/Event.h>
18
+ #include <c10/core/Scalar.h>
19
+ #include <c10/core/Stream.h>
20
+ #include <c10/core/StreamGuard.h>
21
+ #include <c10/core/TensorImpl.h>
22
+ #include <c10/core/UndefinedTensorImpl.h>
23
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
24
+ #include <c10/util/FunctionRef.h>
25
+ #include <c10/util/Logging.h>
26
+ #include <c10/util/hash.h>
27
+ #include <c10/util/intrusive_ptr.h>
28
+ #include <c10/util/irange.h>
29
+
30
+ namespace torch {
31
+ namespace jit {
32
+ struct Function;
33
+ struct CompilationUnit;
34
+ } // namespace jit
35
+ TORCH_API bool isCustomClass(const c10::IValue& v);
36
+ } // namespace torch
37
+ namespace c10 {
38
+ struct IValue;
39
+ struct ClassType;
40
+ struct TupleType;
41
+ struct EnumType;
42
+ struct InferredType;
43
+
44
+ // For custom class __init__ registration, we need to pass in a function
45
+ // that looks like this: [](IValue x, args...)
46
+
47
+ // However, make_boxed_from_unboxed_functor.h automatically sets the input types
48
+ // of the function by introspecting the types of the functor (which is IValue in
49
+ // this case). However, we need the type it binds to be Foo.
50
+
51
+ // Instead, we pass in a lambda [](ivalue_holder<CurClass> x, args...) from
52
+ // which getTypePtr can recover the original class pointer.
53
+
54
+ template <typename TaggedCapsuleType>
55
+ struct tagged_capsule {
56
+ IValue ivalue;
57
+ };
58
+
59
+ template <class T, class NullType>
60
+ c10::intrusive_ptr<T, NullType> IValue::moveToIntrusivePtr() {
61
+ auto t = c10::intrusive_ptr<T, NullType>::reclaim(
62
+ payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
63
+ ? NullType::singleton()
64
+ : static_cast<T*>(payload.u.as_intrusive_ptr));
65
+ clearToNone();
66
+ return t;
67
+ }
68
+ template <typename T, class NullType>
69
+ c10::intrusive_ptr<T, NullType> IValue::toIntrusivePtr() const {
70
+ if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
71
+ return c10::intrusive_ptr<T, NullType>();
72
+ }
73
+ c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
74
+ return c10::intrusive_ptr<T, NullType>::reclaim(
75
+ static_cast<T*>(payload.u.as_intrusive_ptr));
76
+ }
77
+
78
+ template <class T, class U>
79
+ intrusive_ptr<T> static_intrusive_pointer_cast(intrusive_ptr<U> r) {
80
+ return intrusive_ptr<T>::reclaim(static_cast<T*>(r.release()));
81
+ }
82
+
83
+ template <class T, class U>
84
+ intrusive_ptr<T> dynamic_intrusive_pointer_cast(intrusive_ptr<U> r) {
85
+ return intrusive_ptr<T>::reclaim(dynamic_cast<T*>(r.release()));
86
+ }
87
+
88
+ inline c10::intrusive_ptr<ivalue::Future> IValue::toFuture() && {
89
+ AT_ASSERT(isFuture(), "Expected Future but got ", tagKind());
90
+ return moveToIntrusivePtr<ivalue::Future>();
91
+ }
92
+ inline c10::intrusive_ptr<ivalue::Future> IValue::toFuture() const& {
93
+ AT_ASSERT(isFuture(), "Expected Future but got ", tagKind());
94
+ return toIntrusivePtr<ivalue::Future>();
95
+ }
96
+ inline c10::intrusive_ptr<ivalue::Await> IValue::toAwait() && {
97
+ AT_ASSERT(isAwait(), "Expected Await but got ", tagKind());
98
+ return moveToIntrusivePtr<ivalue::Await>();
99
+ }
100
+ inline c10::intrusive_ptr<ivalue::Await> IValue::toAwait() const& {
101
+ AT_ASSERT(isAwait(), "Expected Await but got ", tagKind());
102
+ return toIntrusivePtr<ivalue::Await>();
103
+ }
104
+ inline c10::intrusive_ptr<c10::RRefInterface> IValue::toRRef() && {
105
+ AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind());
106
+ return moveToIntrusivePtr<c10::RRefInterface>();
107
+ }
108
+ inline c10::intrusive_ptr<c10::RRefInterface> IValue::toRRef() const& {
109
+ AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind());
110
+ return toIntrusivePtr<c10::RRefInterface>();
111
+ }
112
+ inline c10::intrusive_ptr<at::Quantizer> IValue::toQuantizer() && {
113
+ AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind());
114
+ return moveToIntrusivePtr<at::Quantizer>();
115
+ }
116
+ inline c10::intrusive_ptr<at::Quantizer> IValue::toQuantizer() const& {
117
+ AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind());
118
+ return toIntrusivePtr<at::Quantizer>();
119
+ }
120
+ inline c10::intrusive_ptr<ivalue::ConstantString> IValue::toString() && {
121
+ AT_ASSERT(isString(), "Expected String but got ", tagKind());
122
+ return moveToIntrusivePtr<ivalue::ConstantString>();
123
+ }
124
+ inline c10::intrusive_ptr<ivalue::ConstantString> IValue::toString() const& {
125
+ AT_ASSERT(isString(), "Expected String but got ", tagKind());
126
+ return toIntrusivePtr<ivalue::ConstantString>();
127
+ }
128
+ inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() && {
129
+ AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
130
+ return moveToIntrusivePtr<ivalue::Object>();
131
+ }
132
+ inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() const& {
133
+ AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
134
+ return toIntrusivePtr<ivalue::Object>();
135
+ }
136
+ inline c10::intrusive_ptr<ivalue::PyObjectHolder> IValue::
137
+ toPyObjectHolder() && {
138
+ TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind());
139
+ return moveToIntrusivePtr<ivalue::PyObjectHolder>();
140
+ }
141
+ inline c10::intrusive_ptr<ivalue::PyObjectHolder> IValue::toPyObjectHolder()
142
+ const& {
143
+ TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind());
144
+ return toIntrusivePtr<ivalue::PyObjectHolder>();
145
+ }
146
+ inline c10::intrusive_ptr<ivalue::EnumHolder> IValue::toEnumHolder() && {
147
+ TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind());
148
+ return moveToIntrusivePtr<ivalue::EnumHolder>();
149
+ }
150
+ inline c10::intrusive_ptr<ivalue::EnumHolder> IValue::toEnumHolder() const& {
151
+ TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind());
152
+ return toIntrusivePtr<ivalue::EnumHolder>();
153
+ }
154
+ inline c10::complex<double> IValue::toComplexDouble() const {
155
+ TORCH_INTERNAL_ASSERT(isComplexDouble(), "Expected ComplexDouble but got ", tagKind());
156
+ auto ptr = toIntrusivePtr<ivalue::ComplexHolder>();
157
+ return (*ptr).val;
158
+ }
159
+ inline at::Tensor IValue::toTensor() && {
160
+ if (C10_UNLIKELY(!isTensor())) {
161
+ reportToTensorTypeError();
162
+ }
163
+ auto result = std::move(payload.as_tensor);
164
+ // As far as I can tell, omitting the usual explicit destructor call
165
+ // is not UB in and of itself, and it's a slight perf win. The
166
+ // destructor is a no-op, because the moved-from Tensor is
167
+ // effectively an intrusive_ptr in the null state, so we don't need
168
+ // the behavior for correctness reasons either. Leaving this
169
+ // explanatory comment, including commented-out destructor call, to
170
+ // make this abundantly clear.
171
+ //
172
+ // payload.as_tensor.~Tensor();
173
+ clearToNone();
174
+ return result;
175
+ }
176
+ inline at::Tensor& IValue::toTensor() & {
177
+ if (C10_UNLIKELY(!isTensor())) {
178
+ reportToTensorTypeError();
179
+ }
180
+ return payload.as_tensor;
181
+ }
182
+ inline const at::Tensor& IValue::toTensor() const& {
183
+ if (C10_UNLIKELY(!isTensor())) {
184
+ reportToTensorTypeError();
185
+ }
186
+ return payload.as_tensor;
187
+ }
188
+ inline c10::Storage IValue::toStorage() && {
189
+ AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind());
190
+ return c10::Storage(
191
+ moveToIntrusivePtr<at::StorageImpl>());
192
+ }
193
+ inline c10::Storage IValue::toStorage() const& {
194
+ AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind());
195
+ return c10::Storage(toIntrusivePtr<at::StorageImpl>());
196
+ }
197
+ inline c10::Stream IValue::toStream() && {
198
+ AT_ASSERT(isStream(), "Expected Stream but got ", tagKind());
199
+ auto ptr = toIntrusivePtr<ivalue::StreamData3Holder>();
200
+ return c10::Stream::unpack3((*ptr).val.stream_id,
201
+ (*ptr).val.device_index,
202
+ (*ptr).val.device_type);
203
+ }
204
+ inline c10::Stream IValue::toStream() const& {
205
+ AT_ASSERT(isStream(), "Expected Stream but got ", tagKind());
206
+ auto ptr = toIntrusivePtr<ivalue::StreamData3Holder>();
207
+ return c10::Stream::unpack3((*ptr).val.stream_id,
208
+ (*ptr).val.device_index,
209
+ (*ptr).val.device_type);
210
+ }
211
+ inline c10::intrusive_ptr<caffe2::Blob> IValue::toBlob() && {
212
+ AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind());
213
+ return moveToIntrusivePtr<caffe2::Blob>();
214
+ }
215
+ inline c10::intrusive_ptr<caffe2::Blob> IValue::toBlob() const& {
216
+ AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind());
217
+ return toIntrusivePtr<caffe2::Blob>();
218
+ ;
219
+ }
220
+ inline c10::intrusive_ptr<torch::CustomClassHolder> IValue::toCapsule() && {
221
+ TORCH_INTERNAL_ASSERT(isCapsule());
222
+ return moveToIntrusivePtr<torch::CustomClassHolder>();
223
+ }
224
+ inline c10::intrusive_ptr<torch::CustomClassHolder> IValue::toCapsule() const& {
225
+ TORCH_INTERNAL_ASSERT(isCapsule());
226
+ return toIntrusivePtr<torch::CustomClassHolder>();
227
+ }
228
+ inline at::Generator IValue::toGenerator() && {
229
+ AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind());
230
+ return at::Generator(moveToIntrusivePtr<at::GeneratorImpl>());
231
+ }
232
+ inline at::Generator IValue::toGenerator() const& {
233
+ AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind());
234
+ return at::Generator(toIntrusivePtr<at::GeneratorImpl>());
235
+ }
236
+ inline c10::SymInt IValue::toSymInt() && {
237
+ AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind());
238
+ if (isSymInt()) {
239
+ return c10::SymInt(moveToIntrusivePtr<c10::SymNodeImpl>());
240
+ } else {
241
+ return c10::SymInt(payload.u.as_int);
242
+ }
243
+ }
244
+ inline c10::SymInt IValue::toSymInt() const& {
245
+ AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind());
246
+ if (isSymInt()) {
247
+ return c10::SymInt(toIntrusivePtr<c10::SymNodeImpl>());
248
+ } else {
249
+ return c10::SymInt(payload.u.as_int);
250
+ }
251
+ }
252
+ inline c10::SymFloat IValue::toSymFloat() && {
253
+ AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind());
254
+ if (isSymFloat()) {
255
+ return c10::SymFloat(moveToIntrusivePtr<c10::SymNodeImpl>());
256
+ } else {
257
+ return c10::SymFloat(payload.u.as_double);
258
+ }
259
+ }
260
+ inline c10::SymFloat IValue::toSymFloat() const& {
261
+ AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind());
262
+ if (isSymFloat()) {
263
+ return c10::SymFloat(toIntrusivePtr<c10::SymNodeImpl>());
264
+ } else {
265
+ return c10::SymFloat(payload.u.as_double);
266
+ }
267
+ }
268
+ inline c10::SymBool IValue::toSymBool() && {
269
+ AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind());
270
+ if (isSymBool()) {
271
+ return c10::SymBool(moveToIntrusivePtr<c10::SymNodeImpl>());
272
+ } else {
273
+ return c10::SymBool(payload.u.as_bool);
274
+ }
275
+ }
276
+
277
+ inline c10::SymBool IValue::toSymBool() const& {
278
+ AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind());
279
+ if (isSymBool()) {
280
+ return c10::SymBool(toIntrusivePtr<c10::SymNodeImpl>());
281
+ } else {
282
+ return c10::SymBool(payload.u.as_bool);
283
+ }
284
+ }
285
+
286
+ namespace ivalue {
287
+
288
+ void TORCH_API
289
+ checkCustomClassType(const ClassType* expected_type, const Type* actual_type);
290
+
291
+ template <typename T>
292
+ using Shared = c10::intrusive_ptr<T>;
293
+
294
+ // string
295
+ struct TORCH_API ConstantString final : c10::intrusive_ptr_target {
296
+ private:
297
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
298
+ const std::string str_;
299
+
300
+ public:
301
+ ConstantString(std::string str) : str_(std::move(str)) {}
302
+ ConstantString(c10::string_view str) : str_(std::string(str)) {}
303
+ static c10::intrusive_ptr<ConstantString> create(std::string str_);
304
+ static c10::intrusive_ptr<ConstantString> create(c10::string_view str_);
305
+ static c10::intrusive_ptr<ConstantString> create(const char* str_);
306
+
307
+ const std::string& string() const {
308
+ return str_;
309
+ }
310
+ c10::string_view string_view() const {
311
+ return str_;
312
+ }
313
+
314
+ operator const std::string&() const {
315
+ return string();
316
+ }
317
+ TORCH_API friend std::ostream& operator<<(
318
+ std::ostream& out,
319
+ const ConstantString& v);
320
+ };
321
+
322
+ struct Future;
323
+
324
+ struct TORCH_API TupleElements {
325
+ private:
326
+ size_t inlineSize_;
327
+ // We represent TupleElements this way to save doing a heap
328
+ // allocation in the common (at least for unpickling) case where we
329
+ // have only 3 elements. We have our own union instead of
330
+ // c10::SmallVector<IValue> because c10::SmallVector<IValue> always
331
+ // stores the begin/end/capacity pointers, which would be a waste of
332
+ // space in our use case.
333
+ union {
334
+ std::vector<IValue> elementsVector_;
335
+ // Don't want to declare a std::array because the convenient
336
+ // iteration and size members are a footgun in this case -- the
337
+ // actual size of the array may be smaller than 3!
338
+ // NOLINTNEXTLINE(*c-arrays*)
339
+ IValue elementsInline_[3];
340
+ };
341
+
342
+ void destroyInline() {
343
+ for (const auto ii : c10::irange(inlineSize_)) {
344
+ elementsInline_[ii].~IValue();
345
+ }
346
+ }
347
+ public:
348
+
349
+ using iterator = IValue*;
350
+ using const_iterator = const IValue*;
351
+
352
+ TupleElements() : inlineSize_(0) {
353
+ new (&elementsVector_) std::vector<IValue>();
354
+ }
355
+
356
+ explicit TupleElements(std::vector<IValue> elements)
357
+ : inlineSize_(0), elementsVector_(std::move(elements)) {}
358
+
359
+ explicit TupleElements(c10::ArrayRef<IValue> elements)
360
+ : inlineSize_(elements.size() <= 3 ? elements.size() : 0) {
361
+ switch (inlineSize_) {
362
+ case 3:
363
+ new (&elementsInline_[2]) IValue(elements[2]);
364
+ [[fallthrough]];
365
+ case 2:
366
+ new (&elementsInline_[1]) IValue(elements[1]);
367
+ [[fallthrough]];
368
+ case 1:
369
+ new (&elementsInline_[0]) IValue(elements[0]);
370
+ break;
371
+ case 0:
372
+ new (&elementsVector_) std::vector<IValue>(elements.begin(), elements.end());
373
+ break;
374
+ }
375
+ }
376
+
377
+ explicit TupleElements(IValue&& e1)
378
+ : inlineSize_(1) {
379
+ new (&elementsInline_[0]) IValue(std::move(e1));
380
+ }
381
+
382
+ explicit TupleElements(IValue&& e1, IValue&& e2)
383
+ : inlineSize_(2) {
384
+ new (&elementsInline_[0]) IValue(std::move(e1));
385
+ new (&elementsInline_[1]) IValue(std::move(e2));
386
+ }
387
+
388
+ explicit TupleElements(IValue&& e1, IValue&& e2, IValue&& e3)
389
+ : inlineSize_(3) {
390
+ new (&elementsInline_[0]) IValue(std::move(e1));
391
+ new (&elementsInline_[1]) IValue(std::move(e2));
392
+ new (&elementsInline_[2]) IValue(std::move(e3));
393
+ }
394
+
395
+ ~TupleElements() {
396
+ if (inlineSize_) {
397
+ destroyInline();
398
+ } else {
399
+ elementsVector_.~vector();
400
+ }
401
+ }
402
+
403
+ // It would be nice to make this noncopyable to prevent people from
404
+ // writing code like `auto output =
405
+ // forward(...).toTupleRef().elements()` (which does refcount bumps on
406
+ // each element, unlike the more efficient but verbose
407
+ // ```
408
+ // auto outputIntrusivePtr = forward(...).toTuple();
409
+ // const auto& output = outputIntrusivePtr->elements();
410
+ // ```
411
+ // ), but there is simply an overwhelming amount of code that does
412
+ // it the inefficient way.
413
+ // See also operator std::vector below.
414
+ TupleElements(const TupleElements& rhs)
415
+ : inlineSize_(rhs.inlineSize_) {
416
+ if (rhs.inlineSize_) {
417
+ for (const auto ii : c10::irange(inlineSize_)) {
418
+ new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
419
+ }
420
+ } else {
421
+ new (&elementsVector_) std::vector<IValue>(rhs.elementsVector_);
422
+ }
423
+ }
424
+
425
+ TupleElements& operator=(const TupleElements& rhs) {
426
+ if (inlineSize_) {
427
+ if (rhs.inlineSize_) {
428
+ for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) {
429
+ elementsInline_[ii] = rhs.elementsInline_[ii];
430
+ }
431
+ if (rhs.inlineSize_ > inlineSize_) {
432
+ for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) {
433
+ new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
434
+ }
435
+ } else {
436
+ for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) {
437
+ elementsInline_[ii].~IValue();
438
+ }
439
+ }
440
+ } else {
441
+ destroyInline();
442
+ new (&elementsVector_) std::vector<IValue>(rhs.elementsVector_);
443
+ }
444
+ } else {
445
+ if (rhs.inlineSize_) {
446
+ elementsVector_.~vector();
447
+ for (const auto ii : c10::irange(rhs.inlineSize_)) {
448
+ new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]);
449
+ }
450
+ } else {
451
+ elementsVector_ = rhs.elementsVector_;
452
+ }
453
+ }
454
+ inlineSize_ = rhs.inlineSize_;
455
+ return *this;
456
+ }
457
+
458
+ TupleElements(TupleElements&& rhs) noexcept
459
+ : inlineSize_(rhs.inlineSize_) {
460
+ if (inlineSize_) {
461
+ for (const auto ii : c10::irange(inlineSize_)) {
462
+ new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
463
+ }
464
+ } else {
465
+ new (&elementsVector_) std::vector<IValue>(std::move(rhs.elementsVector_));
466
+ }
467
+ }
468
+
469
+ TupleElements& operator=(TupleElements&& rhs) noexcept {
470
+ if (inlineSize_) {
471
+ if (rhs.inlineSize_) {
472
+ for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) {
473
+ elementsInline_[ii] = std::move(rhs.elementsInline_[ii]);
474
+ }
475
+ if (rhs.inlineSize_ > inlineSize_) {
476
+ for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) {
477
+ new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
478
+ }
479
+ } else {
480
+ for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) {
481
+ elementsInline_[ii].~IValue();
482
+ }
483
+ }
484
+ } else {
485
+ destroyInline();
486
+ new (&elementsVector_) std::vector<IValue>(std::move(rhs.elementsVector_));
487
+ }
488
+ } else {
489
+ if (rhs.inlineSize_) {
490
+ elementsVector_.~vector();
491
+ for (const auto ii : c10::irange(rhs.inlineSize_)) {
492
+ new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii]));
493
+ }
494
+ } else {
495
+ elementsVector_ = std::move(rhs.elementsVector_);
496
+ }
497
+ }
498
+ inlineSize_ = rhs.inlineSize_;
499
+ return *this;
500
+ }
501
+
502
+ C10_NODISCARD c10::ArrayRef<IValue> asArrayRef() const {
503
+ if (inlineSize_) {
504
+ return c10::ArrayRef<IValue>(elementsInline_, inlineSize_);
505
+ } else {
506
+ return elementsVector_;
507
+ }
508
+ }
509
+
510
+ // Mimic implicit conversion from std::vector to ArrayRef.
511
+ operator c10::ArrayRef<IValue>() const {
512
+ return asArrayRef();
513
+ }
514
+
515
+ static size_t hash(const TupleElements& v) {
516
+ return c10::hash<c10::ArrayRef<IValue>>()(v.asArrayRef());
517
+ }
518
+
519
+ void setContents(std::vector<IValue>&& contents) {
520
+ if (inlineSize_) {
521
+ destroyInline();
522
+ new (&elementsVector_) std::vector<IValue>(std::move(contents));
523
+ inlineSize_ = 0;
524
+ } else {
525
+ elementsVector_ = std::move(contents);
526
+ }
527
+ }
528
+
529
+ C10_NODISCARD bool empty() const {
530
+ return inlineSize_ ? false : elementsVector_.empty();
531
+ }
532
+
533
+ C10_NODISCARD size_t size() const {
534
+ return inlineSize_ ? inlineSize_ : elementsVector_.size();
535
+ }
536
+
537
+ C10_NODISCARD IValue& operator[](size_t idx) {
538
+ if (inlineSize_) {
539
+ return elementsInline_[idx];
540
+ } else {
541
+ return elementsVector_[idx];
542
+ }
543
+ }
544
+
545
+ C10_NODISCARD const IValue& operator[](size_t idx) const {
546
+ if (inlineSize_) {
547
+ return elementsInline_[idx];
548
+ } else {
549
+ return elementsVector_[idx];
550
+ }
551
+ }
552
+
553
+ C10_NODISCARD IValue& at(size_t idx) {
554
+ if (inlineSize_) {
555
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
556
+ TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
557
+ return elementsInline_[idx];
558
+ } else {
559
+ return elementsVector_.at(idx);
560
+ }
561
+ }
562
+
563
+ C10_NODISCARD const IValue& at(size_t idx) const {
564
+ if (inlineSize_) {
565
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
566
+ TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
567
+ return elementsInline_[idx];
568
+ } else {
569
+ TORCH_CHECK(idx < elementsVector_.size(), "TupleElements: invalid index Index = ", idx, "; Length = ", elementsVector_.size());
570
+ return elementsVector_.at(idx);
571
+ }
572
+ }
573
+
574
+ C10_NODISCARD iterator begin() {
575
+ if (inlineSize_) {
576
+ return elementsInline_;
577
+ } else {
578
+ return elementsVector_.data();
579
+ }
580
+ }
581
+
582
+ C10_NODISCARD iterator end() {
583
+ if (inlineSize_) {
584
+ return elementsInline_ + inlineSize_;
585
+ } else {
586
+ return elementsVector_.data() + elementsVector_.size();
587
+ }
588
+ }
589
+
590
+ C10_NODISCARD const_iterator begin() const {
591
+ if (inlineSize_) {
592
+ return elementsInline_;
593
+ } else {
594
+ return elementsVector_.data();
595
+ }
596
+ }
597
+
598
+ C10_NODISCARD const_iterator end() const {
599
+ if (inlineSize_) {
600
+ return elementsInline_ + inlineSize_;
601
+ } else {
602
+ return elementsVector_.data() + elementsVector_.size();
603
+ }
604
+ }
605
+
606
+ C10_NODISCARD const_iterator cbegin() const {
607
+ return begin();
608
+ }
609
+
610
+ C10_NODISCARD const_iterator cend() const {
611
+ return end();
612
+ }
613
+
614
+ C10_NODISCARD std::vector<IValue> vec() const & {
615
+ return asArrayRef().vec();
616
+ }
617
+
618
+ C10_NODISCARD IValue& back() {
619
+ return *(end() - 1);
620
+ }
621
+
622
+ C10_NODISCARD const IValue& back() const {
623
+ return *(end() - 1);
624
+ }
625
+
626
+ C10_NODISCARD std::vector<IValue> vec() && {
627
+ std::vector<IValue> result;
628
+ result.reserve(size());
629
+ for (auto&& iv : *this) {
630
+ result.push_back(std::move(iv));
631
+ }
632
+ return result;
633
+ }
634
+
635
+ // More compatibility shims for the overwhelming amount of code that
636
+ // likes to copy tuple elements into a vector; see comment above the
637
+ // copy constructor.
638
+ operator std::vector<IValue>() const & {
639
+ return vec();
640
+ }
641
+
642
+ operator std::vector<IValue>() && {
643
+ return vec();
644
+ }
645
+ };
646
+
647
+ template <typename T>
648
+ struct TupleTypeFactory {};
649
+
650
+ template <>
651
+ struct TORCH_API TupleTypeFactory<TupleType> {
652
+ static TupleTypePtr create(std::vector<TypePtr> types) {
653
+ return TupleType::create(std::move(types));
654
+ }
655
+ static TupleTypePtr fallback(const Type& type);
656
+ };
657
+
658
+ template <>
659
+ struct TORCH_API TupleTypeFactory<c10::DynamicType> {
660
+ static DynamicTypePtr create(const std::vector<TypePtr>& elemTypes);
661
+ static DynamicTypePtr fallback(const Type&);
662
+ };
663
+
664
+ struct TORCH_API Tuple : c10::intrusive_ptr_target {
665
+ private:
666
+ TupleElements elements_;
667
+ mutable c10::TypePtr type_; // lazily computed for unnamed tuples
668
+
669
+ public:
670
+ // named tuples have additional type information, so we
671
+ // directly create them tagged
672
+ static c10::intrusive_ptr<Tuple> createNamed(
673
+ std::vector<IValue> elements_,
674
+ c10::TypePtr type_) {
675
+ return c10::make_intrusive<Tuple>(std::move(elements_), std::move(type_));
676
+ }
677
+
678
+ static c10::intrusive_ptr<Tuple> createNamed(
679
+ TupleElements elements_,
680
+ std::shared_ptr<TupleType> type_) {
681
+ return c10::make_intrusive<Tuple>(std::move(elements_), std::move(type_));
682
+ }
683
+
684
+ static c10::intrusive_ptr<Tuple> createNamed(
685
+ std::initializer_list<IValue> elements_,
686
+ std::shared_ptr<TupleType> type_) {
687
+ return createNamed(TupleElements(c10::ArrayRef<IValue>(elements_)), std::move(type_));
688
+ }
689
+
690
+ // MSVC apparently can't disambiguate the other two overloads of
691
+ // create when passed an initializer_list without this.
692
+ static c10::intrusive_ptr<Tuple> create(std::initializer_list<IValue> elements_) {
693
+ return create(c10::ArrayRef<IValue>(elements_));
694
+ }
695
+
696
+ static c10::intrusive_ptr<Tuple> create(std::vector<IValue> elements_) {
697
+ return c10::make_intrusive<Tuple>(std::move(elements_));
698
+ }
699
+
700
+ static c10::intrusive_ptr<Tuple> create(TupleElements elements_) {
701
+ return c10::make_intrusive<Tuple>(std::move(elements_));
702
+ }
703
+
704
+ static c10::intrusive_ptr<Tuple> create(c10::ArrayRef<IValue> elements_) {
705
+ return create(TupleElements(elements_));
706
+ }
707
+
708
+ static c10::intrusive_ptr<Tuple> create(IValue e1) {
709
+ return c10::make_intrusive<Tuple>(std::move(e1));
710
+ }
711
+
712
+ static c10::intrusive_ptr<Tuple> create(IValue e1, IValue e2) {
713
+ return c10::make_intrusive<Tuple>(std::move(e1), std::move(e2));
714
+ }
715
+
716
+ static c10::intrusive_ptr<Tuple> create(IValue e1, IValue e2, IValue e3) {
717
+ return c10::make_intrusive<Tuple>(std::move(e1), std::move(e2), std::move(e3));
718
+ }
719
+
720
+ private:
721
+ // Workaround inability to use `>` operator in template argument list.
722
+ template <typename... Args>
723
+ static constexpr bool hasMoreThanThreeArgs() {
724
+ return sizeof...(Args) > 3;
725
+ }
726
+
727
+ public:
728
+ template <typename... Args>
729
+ static c10::intrusive_ptr<Tuple> create(Args&&... elements_) {
730
+ switch (sizeof...(Args)) {
731
+ case 1:
732
+ case 2:
733
+ case 3:
734
+ return create(IValue(std::forward<Args>(elements_))...);
735
+ default:
736
+ return create(
737
+ std::vector<IValue>{IValue(std::forward<Args>(elements_))...});
738
+ }
739
+ }
740
+
741
+ // Again, it would be nice to make this noncopyable, but there's a
742
+ // lot of extant code that copies Tuples.
743
+ // Tuple(const Tuple& rhs) = delete;
744
+
745
+ const TupleElements& elements() const& {
746
+ return elements_;
747
+ }
748
+
749
+ TupleElements elements() && {
750
+ return std::move(elements_);
751
+ }
752
+
753
+ void setElements(std::vector<IValue>&& elements) {
754
+ elements_.setContents(std::move(elements));
755
+ }
756
+
757
+ void setElements(TupleElements&& elements) {
758
+ elements_ = std::move(elements);
759
+ }
760
+
761
+ void unsafeSetElement(size_t idx, const IValue& element) {
762
+ elements_[idx] = element;
763
+ }
764
+
765
+ void unsafeSetElement(size_t idx, IValue&& element) {
766
+ elements_[idx] = std::move(element);
767
+ }
768
+
769
+ size_t size() const {
770
+ return elements_.size();
771
+ }
772
+
773
+ template <typename T = c10::TupleType>
774
+ std::shared_ptr<T> type() const {
775
+ if (!type_) {
776
+ type_ = TupleTypeFactory<T>::create(fmap(elements(), [&](const IValue& v) {
777
+ return v.type<typename T::ElementType>();
778
+ }));
779
+ }
780
+ if (auto t = type_->cast<T>()) {
781
+ return t;
782
+ }
783
+ return TupleTypeFactory<T>::fallback(*type_);
784
+ }
785
+
786
+ static size_t hash(const Tuple& t) {
787
+ return c10::get_hash(t.elements());
788
+ }
789
+
790
+ TORCH_API friend bool operator==(
791
+ const ivalue::Tuple& lhs,
792
+ const ivalue::Tuple& rhs);
793
+
794
+ private:
795
+ // NOTE: If we try to avoid the overloads without
796
+ // `std::shared_ptr<TupleType> type` by defaulting it to nullptr, we
797
+ // end up having to call (part of) the shared_ptr destructor for
798
+ // `type` even though we should know statically it won't do
799
+ // anything.
800
+ explicit Tuple(std::vector<IValue> elements)
801
+ : elements_(std::move(elements)){}
802
+
803
+ explicit Tuple(std::vector<IValue> elements, c10::TypePtr type)
804
+ : elements_(std::move(elements)), type_(std::move(type)) {}
805
+
806
+ explicit Tuple(TupleElements&& elements)
807
+ : elements_(std::move(elements)) {}
808
+
809
+ explicit Tuple(TupleElements&& elements, std::shared_ptr<TupleType> type)
810
+ : elements_(std::move(elements)), type_(std::move(type)) {}
811
+
812
+ explicit Tuple(IValue&& e1)
813
+ : elements_(std::move(e1)) {}
814
+
815
+ explicit Tuple(IValue&& e1, std::shared_ptr<TupleType> type)
816
+ : elements_(std::move(e1)), type_(std::move(type)) {}
817
+
818
+ explicit Tuple(IValue&& e1, IValue&& e2)
819
+ : elements_(std::move(e1), std::move(e2)) {}
820
+
821
+ explicit Tuple(IValue&& e1, IValue&& e2, std::shared_ptr<TupleType> type)
822
+ : elements_(std::move(e1), std::move(e2)), type_(std::move(type)) {}
823
+
824
+ explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3)
825
+ : elements_(std::move(e1), std::move(e2), std::move(e3)) {}
826
+
827
+ explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3, std::shared_ptr<TupleType> type)
828
+ : elements_(std::move(e1), std::move(e2), std::move(e3)), type_(std::move(type)) {}
829
+
830
+ friend class c10::intrusive_ptr<Tuple>;
831
+ };
832
+
833
+ struct Object;
834
+ struct PyObjectHolder;
835
+ struct EnumHolder;
836
+ } // namespace ivalue
837
+
838
+ // Future
839
+ struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target {
840
+ private:
841
+ // Keep this private in order to force users to go through make_intrusive and
842
+ // thus prevent creating a Future that's not held by an intrusive_ptr.
843
+ explicit Future(TypePtr type, std::vector<c10::Device> devices={})
844
+ : type_(std::move(type)),
845
+ impl_(getTypeOfDevices(devices)),
846
+ devices_(sortAndDeduplicateDevices(impl_, std::move(devices))) {}
847
+
848
+ friend c10::intrusive_ptr<Future>;
849
+
850
+ struct FutureCallback {
851
+ std::function<void(Future&)> callback;
852
+ bool uses_future; // whether the Future& passed in is actually used
853
+
854
+ template <typename T>
855
+ FutureCallback(T callback, bool uses_future)
856
+ : callback(std::move(callback)), uses_future(uses_future) {}
857
+ };
858
+
859
+ public:
860
+ Future(const Future&) = delete;
861
+ Future(Future&&) = delete;
862
+ Future& operator=(const Future&) = delete;
863
+ Future& operator=(Future&&) = delete;
864
+
865
+ struct TORCH_API FutureError final : public std::exception {
866
+ explicit FutureError(std::string&& error_msg_)
867
+ : error_msg(std::move(error_msg_)) {}
868
+
869
+ FutureError() = default;
870
+
871
+ const char* what() const noexcept override {
872
+ return error_msg.c_str();
873
+ }
874
+
875
+ std::string error_msg;
876
+ };
877
+
878
+ /**
879
+ * Wait on the future until it completes.
880
+ */
881
+ void wait() {
882
+ std::unique_lock<std::mutex> lock(mutex_);
883
+ finished_cv_.wait(lock, [&]() -> bool { return completed_; });
884
+ synchronizeWithCurrentStreams();
885
+ }
886
+
887
+ /**
888
+ * Wait on the future until it completes and throw an
889
+ * exception if an error exists.
890
+ */
891
+ void waitAndThrow() {
892
+ wait();
893
+
894
+ if (eptr_) {
895
+ std::rethrow_exception(eptr_);
896
+ }
897
+ }
898
+
899
+ /**
900
+ * Explicitly mark the future as completed with the output value. Optionally,
901
+ * the storages for all tensors in IValue can be passed as well. The DataPtrs
902
+ * of these storages are used to synchronize CUDA streams. If storages isn't
903
+ * given we will attempt to extract it from the value, if we need to (this
904
+ * happens if a non-empty set of devices was given to the constructor). Thus
905
+ * one only needs to provide storages when 1) they cannot be extracted through
906
+ * IValue::getSubValues() or through pickling in case of Python object; or
907
+ * when 2) customized storage extraction is more efficient.
908
+ */
909
+ using WeakStorage = c10::weak_intrusive_ptr<c10::StorageImpl>;
910
+ void markCompleted(
911
+ IValue value,
912
+ c10::optional<std::vector<WeakStorage>> storages = c10::nullopt) {
913
+ // Start by performing all steps that can throw, before setting any field.
914
+ // Do this before even acquiring the mutex, because extractStorages might
915
+ // acquire the GIL, which could lead to a lock inversion with our mutex.
916
+ // See https://github.com/pytorch/pytorch/issues/58239.
917
+ std::vector<WeakStorage> actualStorages;
918
+ std::vector<c10::Device> usedDevices;
919
+ try {
920
+ // FIXME We should always extract DataPtrs, in order to catch the case of
921
+ // users using CUDA values but forgetting to set devices, which currently
922
+ // leads to a silent synchronization/correctness issue. However, as this
923
+ // might worsen perf in CPU-only cases, we should only do so after careful
924
+ // benchmarks.
925
+ if (impl_.type() != c10::kCPU) {
926
+ actualStorages =
927
+ storages.has_value() ? std::move(*storages) : extractStorages(value);
928
+ usedDevices = getDevicesOfStorages(impl_, actualStorages);
929
+ ensureIsSubsetOfDevices(usedDevices, devices_);
930
+ }
931
+ } catch (const std::exception&) {
932
+ setError(std::current_exception());
933
+ return;
934
+ }
935
+
936
+ std::unique_lock<std::mutex> lock(mutex_);
937
+ TORCH_CHECK(
938
+ !completed(),
939
+ "Attempting to mark a completed Future as complete again. Note that "
940
+ "a Future can only be marked completed once.");
941
+
942
+ // Only set value_ and completed_ flag once all checks and preparation steps
943
+ // have returned successfully to allow for proper error propagation.
944
+ value_ = std::move(value);
945
+ completed_ = true;
946
+
947
+ currentDevice_ = impl_.getDevice();
948
+ storages_ = std::move(actualStorages);
949
+ for (const c10::Device& device : usedDevices) {
950
+ c10::Event event(impl_.type());
951
+ event.record(impl_.getStream(device));
952
+ events_.push_back(std::move(event));
953
+ }
954
+
955
+ std::vector<FutureCallback> cbs;
956
+ cbs.swap(callbacks_);
957
+ lock.unlock();
958
+
959
+ finished_cv_.notify_all();
960
+ for (auto& callback : cbs) {
961
+ invokeCallback(std::move(callback.callback), callback.uses_future);
962
+ }
963
+ }
964
+
965
+ void markCompleted() {
966
+ markCompleted(IValue{});
967
+ }
968
+
969
+ void setError(std::exception_ptr eptr) {
970
+ std::unique_lock<std::mutex> lock(mutex_);
971
+ setErrorInternal(std::move(eptr), lock);
972
+ }
973
+
974
+ void setErrorIfNeeded(std::exception_ptr eptr) {
975
+ std::unique_lock<std::mutex> lock(mutex_);
976
+ if (completed_) {
977
+ // This should be rare and shouldn't cause log spew. Its important to
978
+ // log errors and thats why we have this log here.
979
+ std::string msg = c10::str(
980
+ "Skipping setting following error on the Future since "
981
+ "it is already marked completed (this is not necessarily "
982
+ "an error):\n",
983
+ tryRetrieveErrorMessageInternal(std::move(eptr)));
984
+ if (eptr_) {
985
+ msg += c10::str(
986
+ ", \nOriginal exception:\n",
987
+ tryRetrieveErrorMessageInternal(eptr_));
988
+ }
989
+ LOG(INFO) << msg;
990
+ return;
991
+ } else {
992
+ setErrorInternal(std::move(eptr), lock);
993
+ }
994
+ }
995
+
996
+ // Get the result of the current future.
997
+ IValue value() {
998
+ std::unique_lock<std::mutex> lock(mutex_);
999
+ AT_ASSERT(completed());
1000
+ if (eptr_) {
1001
+ std::rethrow_exception(eptr_);
1002
+ }
1003
+ return value_;
1004
+ }
1005
+
1006
+ // This accessor should only be used if we know that the future is
1007
+ // completed() with no error.
1008
+ const IValue& constValue() const {
1009
+ std::unique_lock<std::mutex> lock(mutex_);
1010
+ AT_ASSERT(completed());
1011
+ TORCH_INTERNAL_ASSERT(
1012
+ !eptr_,
1013
+ "value() accessor should only be used when future is not completed with ",
1014
+ "an error, but future had the following error: ",
1015
+ tryRetrieveErrorMessageInternal(eptr_)
1016
+ );
1017
+ return value_;
1018
+ }
1019
+
1020
+ // This accessor should only be used if we know that the future is
1021
+ // completed() with no error.
1022
+ const std::vector<WeakStorage>& storages() const {
1023
+ std::unique_lock<std::mutex> lock(mutex_);
1024
+ AT_ASSERT(completed());
1025
+ AT_ASSERT(!eptr_);
1026
+ return storages_;
1027
+ }
1028
+
1029
+ /**
1030
+ * Add a callback to the future.
1031
+ * The callbacks will be executed once the future completes.
1032
+ * If the future has already completed,
1033
+ * this function will execute the callback immediately.
1034
+ */
1035
+ template <typename T>
1036
+ void addCallback(T callback, bool uses_future = true) {
1037
+ #if __cpp_lib_is_invocable >= 201703
1038
+ static_assert(
1039
+ std::is_invocable_r<void, T, Future&>::value,
1040
+ "The callback must have signature void(Future&)");
1041
+ #endif
1042
+
1043
+ std::unique_lock<std::mutex> lock(mutex_);
1044
+ if (completed()) {
1045
+ lock.unlock();
1046
+ invokeCallback(std::move(callback), uses_future);
1047
+ return;
1048
+ }
1049
+ callbacks_.emplace_back(std::move(callback), uses_future);
1050
+ }
1051
+
1052
+ /**
1053
+ * Add a callback to the future, and return another Future to hold the return
1054
+ * value of the callback. This is necessary when the callback provider needs
1055
+ * to know for sure when the callback has finished.
1056
+ */
1057
+ template <typename T>
1058
+ c10::intrusive_ptr<Future> then(T callback, TypePtr type) {
1059
+ using IValueWithStorages = std::tuple<IValue, std::vector<WeakStorage>>;
1060
+ #if __cpp_lib_is_invocable >= 201703
1061
+ static_assert(
1062
+ std::disjunction<
1063
+ std::is_invocable_r<IValue, T, Future&>,
1064
+ std::is_invocable_r<IValueWithStorages, T, Future&>>::value,
1065
+ "The callback must have signature IValue(Future&) or "
1066
+ "std::tuple<IValue, std::vector<Storage>>(Future&)");
1067
+ #endif
1068
+ auto childFut = createInstance(::std::move(type));
1069
+ addCallback([childFut,
1070
+ cb = std::move(callback)](Future& parentFut) mutable {
1071
+ try {
1072
+ if constexpr (::std::is_convertible_v<typename c10::invoke_result_t<T &&, Future&>, IValueWithStorages>) {
1073
+ auto [ivalue, storages] = cb(parentFut);
1074
+ childFut->markCompleted(::std::move(ivalue), ::std::move(storages));
1075
+ } else {
1076
+ childFut->markCompleted(cb(parentFut));
1077
+ }
1078
+ } catch (std::exception&) {
1079
+ childFut->setError(std::current_exception());
1080
+ }
1081
+ });
1082
+ return childFut;
1083
+ }
1084
+
1085
+ template <typename T>
1086
+ c10::intrusive_ptr<Future> thenAsync(T callback, TypePtr type) {
1087
+ #if __cpp_lib_is_invocable >= 201703
1088
+ static_assert(
1089
+ std::is_invocable_r<c10::intrusive_ptr<Future>, T, Future&>::value,
1090
+ "The callback must have signature c10::intrusive_ptr<Future>(Future&)");
1091
+ #endif
1092
+ auto childFut = createInstance(std::move(type));
1093
+ addCallback(
1094
+ [childFut, cb = std::move(callback)](Future& parentFut) mutable {
1095
+ c10::intrusive_ptr<Future> intermediateFut;
1096
+ try {
1097
+ intermediateFut = cb(parentFut);
1098
+ } catch (std::exception&) {
1099
+ childFut->setError(std::current_exception());
1100
+ return;
1101
+ }
1102
+ intermediateFut->addCallback(
1103
+ [childFut = std::move(childFut)](Future& intermediateFut) {
1104
+ if (intermediateFut.hasError()) {
1105
+ childFut->setError(intermediateFut.exception_ptr());
1106
+ } else {
1107
+ childFut->markCompleted(
1108
+ intermediateFut.value(), intermediateFut.storages());
1109
+ }
1110
+ });
1111
+ });
1112
+ return childFut;
1113
+ }
1114
+
1115
+ // Tries to retrieve the error message from std::exception_ptr.
1116
+ std::string tryRetrieveErrorMessage() const {
1117
+ TORCH_CHECK(hasError(), "No error present on the future.");
1118
+ std::unique_lock<std::mutex> lock(mutex_);
1119
+ return tryRetrieveErrorMessageInternal(eptr_);
1120
+ }
1121
+
1122
+ // Check if the current future has completed
1123
+ bool completed() const {
1124
+ return completed_;
1125
+ }
1126
+
1127
+ bool hasValue() const {
1128
+ std::unique_lock<std::mutex> lock(mutex_);
1129
+ return completed_ && !eptr_;
1130
+ }
1131
+
1132
+ bool hasError() const {
1133
+ std::unique_lock<std::mutex> lock(mutex_);
1134
+ return eptr_ ? true : false;
1135
+ }
1136
+
1137
+ std::exception_ptr exception_ptr() const {
1138
+ std::unique_lock<std::mutex> lock(mutex_);
1139
+ return eptr_;
1140
+ }
1141
+
1142
+ TORCH_API friend std::ostream& operator<<(
1143
+ std::ostream& out,
1144
+ const Future& v);
1145
+
1146
+ const TypePtr& elementType() const {
1147
+ return type_;
1148
+ }
1149
+
1150
+ const std::vector<c10::Device>& devices() const {
1151
+ return devices_;
1152
+ }
1153
+
1154
+ // This method should be used when one intends to manually create a child
1155
+ // future, for example when implementing a customized version of then().
1156
+ c10::intrusive_ptr<Future> createInstance(at::TypePtr type) {
1157
+ return c10::make_intrusive<Future>(std::move(type), devices_);
1158
+ }
1159
+
1160
+ private:
1161
+
1162
+ // This method should always be used when invoking a callback (regardless of
1163
+ // how/when that happens) as it will ensure that the proper "environment" is
1164
+ // set up before running the callback, as in, it will set up the CUDA streams,
1165
+ // synchronize them with the value, and so on (if needed).
1166
+ template<typename T>
1167
+ void invokeCallback(T callback, bool uses_future) {
1168
+ #if __cpp_lib_is_invocable >= 201703
1169
+ static_assert(
1170
+ std::is_invocable_r<void, T, Future&>::value,
1171
+ "The callback must have signature void(Future&)");
1172
+ #endif
1173
+
1174
+ // The synchronization performed below shouldn't be needed when the future
1175
+ // is not used by the callback.
1176
+ if (uses_future) {
1177
+ c10::OptionalDeviceGuard deviceGuard(currentDevice_);
1178
+
1179
+ std::vector<c10::Stream> streams;
1180
+ streams.reserve(devices_.size());
1181
+ for (const c10::Device& device : devices_) {
1182
+ streams.push_back(impl_.getStreamFromGlobalPool(device));
1183
+ }
1184
+ c10::MultiStreamGuard streamGuard(streams);
1185
+ synchronizeWithCurrentStreams();
1186
+ callback(*this);
1187
+ } else {
1188
+ callback(*this);
1189
+ }
1190
+ }
1191
+
1192
+ // This method should be called before this future's value is used, as it
1193
+ // ensures that the CUDA streams that are "current" at the callsite properly
1194
+ // synchronize with the value.
1195
+ void synchronizeWithCurrentStreams() {
1196
+ for (c10::Event& event : events_) {
1197
+ event.block(impl_.getStream(event.device()));
1198
+ }
1199
+
1200
+ for (const WeakStorage& weak_storage : storages_) {
1201
+ c10::intrusive_ptr<c10::StorageImpl> storage = weak_storage.lock();
1202
+ if (!storage) {
1203
+ continue;
1204
+ }
1205
+ if (!storage->device().is_cpu()) {
1206
+ impl_.recordDataPtrOnStream(
1207
+ storage->data_ptr(), impl_.getStream(storage->device()));
1208
+ }
1209
+ }
1210
+ }
1211
+
1212
+ void setErrorInternal(
1213
+ std::exception_ptr eptr,
1214
+ std::unique_lock<std::mutex>& lock) {
1215
+ TORCH_CHECK(
1216
+ !eptr_,
1217
+ "Error already set on this Future: ",
1218
+ tryRetrieveErrorMessageInternal(eptr_),
1219
+ ", trying to set error: ",
1220
+ tryRetrieveErrorMessageInternal(eptr));
1221
+ TORCH_INTERNAL_ASSERT(!completed(), "Future is already marked completed");
1222
+ completed_ = true;
1223
+ eptr_ = std::move(eptr);
1224
+
1225
+ std::vector<FutureCallback> cbs;
1226
+ cbs.swap(callbacks_);
1227
+ lock.unlock();
1228
+
1229
+ finished_cv_.notify_all();
1230
+ for (auto& callback : cbs) {
1231
+ invokeCallback(std::move(callback.callback), callback.uses_future);
1232
+ }
1233
+ }
1234
+
1235
+ // Tries to retrieve the error message from std::exception_ptr.
1236
+ std::string tryRetrieveErrorMessageInternal(std::exception_ptr eptr) const {
1237
+ try {
1238
+ std::rethrow_exception(std::move(eptr));
1239
+ } catch (const std::exception& e) {
1240
+ return e.what();
1241
+ } catch (...) {
1242
+ return "Unknown Exception Type";
1243
+ }
1244
+ }
1245
+
1246
+ // Defined in ivalue.cpp.
1247
+ static std::vector<WeakStorage> extractStorages(
1248
+ const at::IValue& value);
1249
+
1250
+ static std::vector<c10::Device> getDevicesOfStorages(
1251
+ const c10::impl::VirtualGuardImpl& impl,
1252
+ const std::vector<WeakStorage>& storages) {
1253
+ c10::DeviceIndex deviceCount = impl.deviceCount();
1254
+ std::vector<bool> isDeviceUsed(deviceCount, false);
1255
+ for (const WeakStorage& weak_storage : storages) {
1256
+ c10::intrusive_ptr<c10::StorageImpl> storage = weak_storage.lock();
1257
+ if (!storage) {
1258
+ continue;
1259
+ }
1260
+ c10::Device device = storage->device();
1261
+ if (!device.is_cpu()) {
1262
+ TORCH_CHECK_VALUE(
1263
+ device.type() == impl.type(),
1264
+ "Expected all data ptrs to be on a device of type ",
1265
+ impl.type(),
1266
+ ", got one on device ",
1267
+ device);
1268
+ isDeviceUsed[device.index()] = true;
1269
+ }
1270
+ }
1271
+ std::vector<c10::Device> devices;
1272
+ for (c10::DeviceIndex idx = 0; idx < deviceCount; idx++) {
1273
+ if (isDeviceUsed[idx]) {
1274
+ devices.emplace_back(impl.type(), idx);
1275
+ }
1276
+ }
1277
+ return devices;
1278
+ }
1279
+
1280
+ static std::string formatSetOfDevices(
1281
+ const std::vector<c10::Device>& devices) {
1282
+ if (devices.empty()) {
1283
+ return "(none)";
1284
+ }
1285
+ std::ostringstream oss;
1286
+ oss << devices[0];
1287
+ for (const auto idx : c10::irange(1, devices.size())) {
1288
+ if (idx == devices.size() - 1) {
1289
+ oss << " and ";
1290
+ } else {
1291
+ oss << ", ";
1292
+ }
1293
+ oss << devices[idx];
1294
+ }
1295
+ return oss.str();
1296
+ }
1297
+
1298
+ static c10::DeviceType getTypeOfDevices(
1299
+ const std::vector<c10::Device>& devices) {
1300
+ if (devices.empty()) {
1301
+ return c10::kCPU;
1302
+ }
1303
+ c10::DeviceType deviceType = devices[0].type();
1304
+ for (const auto idx : c10::irange(1, devices.size())) {
1305
+ TORCH_CHECK_VALUE(
1306
+ devices[idx].type() == deviceType,
1307
+ "Expected all devices to be of the same type, but got a mismatch between ",
1308
+ devices[0],
1309
+ " and ",
1310
+ devices[idx]);
1311
+ }
1312
+ return deviceType;
1313
+ }
1314
+
1315
+ // We need devices to be sorted in order to use ensureIsSubsetOfDevices.
1316
+ static std::vector<c10::Device> sortAndDeduplicateDevices(
1317
+ const c10::impl::VirtualGuardImpl& /*impl*/,
1318
+ std::vector<c10::Device> devices) {
1319
+ std::sort(
1320
+ devices.begin(), devices.end(),
1321
+ [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); });
1322
+ // Deduplicate by compacting.
1323
+ size_t targetIdx = 0;
1324
+ for (const auto sourceIdx : c10::irange(devices.size())) {
1325
+ TORCH_CHECK_VALUE(
1326
+ devices[sourceIdx].has_index(),
1327
+ "Expected devices to have indices, got ", devices[sourceIdx]);
1328
+ if (targetIdx > 0 && devices[targetIdx - 1].index() == devices[sourceIdx].index()) {
1329
+ // It's a duplicate, skip it.
1330
+ continue;
1331
+ }
1332
+ if (sourceIdx != targetIdx) {
1333
+ devices[targetIdx] = devices[sourceIdx];
1334
+ }
1335
+ targetIdx++;
1336
+ }
1337
+ // If there were duplicates there's now a gap at the end: trim it. Resizing
1338
+ // requires the item type to be default-constructible (which c10::Device is
1339
+ // not) because in principle it could be required to create new items. Since
1340
+ // we know we'll shrink the vector, we provide a custom dummy value instead.
1341
+ devices.resize(targetIdx, c10::Device(c10::kCPU));
1342
+ return devices;
1343
+ }
1344
+
1345
+ static void ensureIsSubsetOfDevices(
1346
+ const std::vector<c10::Device>& subset,
1347
+ const std::vector<c10::Device>& superset) {
1348
+ // We assume the devices in both vectors have the same consistent type, and
1349
+ // their indices are unique and sorted.
1350
+ std::vector<c10::Device> excessDevices;
1351
+ std::set_difference(
1352
+ subset.begin(),
1353
+ subset.end(),
1354
+ superset.begin(),
1355
+ superset.end(),
1356
+ std::back_inserter(excessDevices),
1357
+ [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); });
1358
+ TORCH_CHECK_VALUE(
1359
+ excessDevices.empty(),
1360
+ "The result contained tensors residing on device(s) ",
1361
+ formatSetOfDevices(excessDevices),
1362
+ " which are not among the expected device(s) ",
1363
+ formatSetOfDevices(superset));
1364
+ }
1365
+
1366
+ mutable std::mutex mutex_;
1367
+ std::atomic_bool completed_ = {false}; // is this future complete
1368
+ std::condition_variable finished_cv_;
1369
+
1370
+ IValue value_; // when finished the value
1371
+ TypePtr type_;
1372
+ std::vector<FutureCallback> callbacks_;
1373
+ std::exception_ptr eptr_;
1374
+
1375
+ // An upcast pointer to a virtual class which allows us to manipulate events,
1376
+ // streams, ... in a generic way, without an explicit dependency on CUDA.
1377
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
1378
+ const c10::impl::VirtualGuardImpl impl_;
1379
+
1380
+ // The device that was current when markCompleted was called, which we'll
1381
+ // restore when invoking callbacks. It's optional because we'll only store it
1382
+ // if the future completes successfully.
1383
+ optional<c10::Device> currentDevice_;
1384
+
1385
+ // The events that correspond to the completion of the async I/O kernels. They
1386
+ // are recorded on the appropriate streams when the future is marked completed
1387
+ // and can then be queried/waited/blocked on. There is one event for each
1388
+ // distinct device on which the value's tensors reside.
1389
+ std::vector<c10::Event> events_;
1390
+
1391
+ // A cached version of the storages extracted from the value when the future
1392
+ // is first marked completed.
1393
+ std::vector<WeakStorage> storages_;
1394
+
1395
+ // The bounding set of devices that this future, and any of its children, is
1396
+ // allowed to use. This is a superset of the set of devices used by the events
1397
+ // above. We need this to know what streams (for which devices) to set as
1398
+ // current when invoking a callback, thus allowing the callback to use devices
1399
+ // that the parent future didn't use. This field is set to the value provided
1400
+ // in the constructor and will be "inherited" by all child futures.
1401
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
1402
+ const std::vector<c10::Device> devices_;
1403
+ };
1404
+
1405
+ struct C10_EXPORT ivalue::Await final : c10::intrusive_ptr_target {
1406
+ private:
1407
+ explicit Await(TypePtr elType, std::function<IValue()> fn)
1408
+ : elType_(std::move(elType)), type_(AwaitType::create(elType_)), fn_(std::move(fn)) {}
1409
+
1410
+ explicit Await(TypePtr elType) : elType_(std::move(elType)), type_(AwaitType::create(elType_)) { }
1411
+
1412
+ friend c10::intrusive_ptr<Await>;
1413
+
1414
+ public:
1415
+ Await(const Await&) = delete;
1416
+ Await(Await&&) = delete;
1417
+ Await& operator=(const Await&) = delete;
1418
+ Await& operator=(Await&&) = delete;
1419
+
1420
+ IValue wait() {
1421
+ if (!completed_) {
1422
+ TORCH_CHECK(fn_, "Incompleted Await: fn can't be None");
1423
+ value_ = fn_();
1424
+ completed_ = true;
1425
+ args_ = {};
1426
+ }
1427
+ return value_;
1428
+ }
1429
+
1430
+ IValue value() {
1431
+ TORCH_CHECK(completed_, "Await must be completed");
1432
+ return value_;
1433
+ }
1434
+
1435
+ void setFn(std::function<IValue()> fn) {
1436
+ fn_ = std::move(fn);
1437
+ }
1438
+
1439
+ bool completed() {
1440
+ return completed_;
1441
+ }
1442
+
1443
+ void markCompleted(IValue value) {
1444
+ value_ = std::move(value);
1445
+ completed_ = true;
1446
+ }
1447
+
1448
+ TORCH_API friend std::ostream& operator<<(
1449
+ std::ostream& out,
1450
+ const Await& v);
1451
+
1452
+ const TypePtr& elementType() const {
1453
+ return elType_;
1454
+ }
1455
+
1456
+ const TypePtr& type() const {
1457
+ return type_;
1458
+ }
1459
+
1460
+ void setArgs(std::vector<IValue> args) {
1461
+ args_ = std::move(args);
1462
+ }
1463
+
1464
+ std::vector<IValue>& args() {
1465
+ return args_;
1466
+ }
1467
+
1468
+ private:
1469
+ TypePtr elType_;
1470
+ TypePtr type_;
1471
+ std::vector<IValue> args_;
1472
+ std::function<IValue()> fn_;
1473
+ IValue value_;
1474
+ bool completed_{};
1475
+ };
1476
+
1477
+ // Input is a list of Futures with the same target type.
1478
+ // Output is a Future to the List of completed Futures.
1479
+ TORCH_API intrusive_ptr<ivalue::Future> collectAll(
1480
+ const c10::List<c10::intrusive_ptr<ivalue::Future>>& srcs);
1481
+ // Input is a List of Futures with the same target type.
1482
+ // Output is a Future that will be updated with a seen value.
1483
+ TORCH_API intrusive_ptr<ivalue::Future> collectAny(
1484
+ const c10::List<c10::intrusive_ptr<ivalue::Future>>& srcs);
1485
+
1486
+ // User-defined object.
1487
+ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target {
1488
+ public:
1489
+ // In general, class types hold a shared_ptr to its owning CompilationUnit,
1490
+ // so that its type and methods do not get deallocated while the class exists.
1491
+ // However, the CompilationUnit holds ownership of the type's graphs, so
1492
+ // inserting a constant object into a Graph would create a reference cycle if
1493
+ // that constant object held a shared_ptr to its CU. For these objects we
1494
+ // instatiate them with non-owning references to its CU
1495
+ Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) {
1496
+ slots_.resize(numSlots);
1497
+ }
1498
+
1499
+ Object(StrongTypePtr type, size_t numSlots)
1500
+ : type_(WeakOrStrongTypePtr(std::move(type))) {
1501
+ slots_.resize(numSlots);
1502
+ }
1503
+
1504
+ static c10::intrusive_ptr<Object> create(
1505
+ WeakOrStrongTypePtr type,
1506
+ size_t numSlots) {
1507
+ return c10::make_intrusive<Object>(std::move(type), numSlots);
1508
+ }
1509
+
1510
+ static c10::intrusive_ptr<Object> create(
1511
+ StrongTypePtr type,
1512
+ size_t numSlots) {
1513
+ return c10::make_intrusive<Object>(std::move(type), numSlots);
1514
+ }
1515
+
1516
+ static c10::intrusive_ptr<Object> create(ClassTypePtr classType, size_t numSlots);
1517
+
1518
+ /**
1519
+ * Slot API.
1520
+ *
1521
+ * Attributes are stored as a simple vector so that lookups are fast at
1522
+ * runtime. A "slot" is just an index into that vector, which can be computed
1523
+ * statically if you have access to the class type. Use this API if you are
1524
+ * writing compiler stuff.
1525
+ */
1526
+ void setSlot(size_t slot, IValue v) {
1527
+ if (slot >= slots_.size()) {
1528
+ // for module types, it is possible that the members of the class have
1529
+ // expanded after the object was created. In this case, we expand
1530
+ // the slots to the right size
1531
+ resizeObject(slot);
1532
+ }
1533
+ slots_[slot] = std::move(v);
1534
+ }
1535
+
1536
+ const IValue& getSlot(size_t slot) const {
1537
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(slot < slots_.size());
1538
+ // NOTE: This lookup is fairly hot, so we use unchecked access to the
1539
+ // vector. Errors should still be detectable with ASan.
1540
+ return slots_[slot];
1541
+ }
1542
+
1543
+ void unsafeRemoveSlot(size_t slot) {
1544
+ TORCH_CHECK(slot < slots_.size());
1545
+ slots_.erase(slots_.begin() + static_cast<std::ptrdiff_t>(slot));
1546
+ }
1547
+
1548
+ /**
1549
+ * Attribute API.
1550
+ *
1551
+ * Wrappers around the slot stuff so that users can access attributes
1552
+ * directly. Use this API if you are a user.
1553
+ *
1554
+ * Note: Unlike in Python, TorchScript must make a distinction between
1555
+ * attributes (which are IValues) and methods (which are Methods). If you
1556
+ * want a method, use `obj.type()->getMethod()`
1557
+ */
1558
+ IValue getAttr(const std::string& name) const;
1559
+ void setAttr(const std::string& name, IValue v);
1560
+ // Remove attribute by name, caller is responsible for
1561
+ // the safety of this operation
1562
+ // We didn't remove the attribute in the type because the type
1563
+ // might be shared by multiple objects.
1564
+ // Therefore after removing attribute, the object is in an inconsistent
1565
+ // state where it has more attribute types in its Type than
1566
+ // the attribute slots it has, user needs to make sure the object
1567
+ // has consistent by removing the attribute in type as well
1568
+ void unsafeRemoveAttr(const std::string& name);
1569
+
1570
+ std::string name() const;
1571
+
1572
+ const std::vector<IValue>& slots() const {
1573
+ return slots_;
1574
+ }
1575
+ std::shared_ptr<ClassType> type() const;
1576
+
1577
+ std::shared_ptr<torch::jit::CompilationUnit> compilation_unit() {
1578
+ if (type_.holds_strong_ref()) {
1579
+ return type_.cu_.getStrongRefOrThrow();
1580
+ } else {
1581
+ auto weak_ptr = type_.cu_.getWeakRefOrThrow();
1582
+ return std::shared_ptr<torch::jit::CompilationUnit>(weak_ptr);
1583
+ }
1584
+ }
1585
+
1586
+ c10::intrusive_ptr<Object> copy_to_weak_compilation_ref() const;
1587
+
1588
+ void unsafe_make_weak_compilation_ref() {
1589
+ type_ = WeakOrStrongTypePtr(type_.asWeakTypePtr());
1590
+ }
1591
+
1592
+ c10::intrusive_ptr<Object> copy() const;
1593
+
1594
+ c10::intrusive_ptr<Object> deepcopy(
1595
+ c10::optional<at::Device> device = c10::nullopt) const;
1596
+
1597
+ c10::intrusive_ptr<Object> deepcopy(
1598
+ IValue::HashAliasedIValueMap& memo,
1599
+ c10::optional<at::Device> device = c10::nullopt) const;
1600
+
1601
+ bool is_weak_compilation_ref() const {
1602
+ return !type_.holds_strong_ref();
1603
+ }
1604
+
1605
+ bool is_empty_strong_compilation_ref() const {
1606
+ return type_.holds_empty_strong_ref();
1607
+ }
1608
+
1609
+ private:
1610
+ void resizeObject(size_t slot);
1611
+ WeakOrStrongTypePtr type_;
1612
+ std::vector<IValue> slots_;
1613
+ };
1614
+
1615
+ // virtual ivalue PyObjectHolder that hold a py::object, we make this virtual
1616
+ // because the py::object and refcounting logic should happen in libtorch_python
1617
+ // see concrete implementation in python_ivalue.h
1618
+ struct ivalue::PyObjectHolder : c10::intrusive_ptr_target {
1619
+ public:
1620
+ virtual PyObject* getPyObject() = 0;
1621
+ virtual c10::InferredType tryToInferType() = 0;
1622
+ virtual IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt) = 0;
1623
+ virtual std::string toStr() = 0;
1624
+ virtual std::vector<at::Tensor> extractTensors() = 0;
1625
+
1626
+ ~PyObjectHolder() override = default;
1627
+ };
1628
+
1629
+ struct ivalue::EnumHolder : c10::intrusive_ptr_target {
1630
+ public:
1631
+ EnumHolder(std::shared_ptr<EnumType> type, std::string name, IValue value)
1632
+ : type_(std::move(type)),
1633
+ name_(std::move(name)),
1634
+ value_(std::move(value)) {}
1635
+
1636
+ bool is(const ivalue::EnumHolder& rhs) {
1637
+ return *this == rhs;
1638
+ }
1639
+
1640
+ friend bool operator==(
1641
+ const ivalue::EnumHolder& lhs,
1642
+ const ivalue::EnumHolder& rhs);
1643
+
1644
+ TORCH_API friend std::ostream& operator<<(
1645
+ std::ostream& out,
1646
+ const ivalue::EnumHolder& v);
1647
+
1648
+ TORCH_API const std::string& qualifiedClassName() const;
1649
+
1650
+ const std::string& unqualifiedClassName() const;
1651
+
1652
+ const std::string& name() const {
1653
+ return name_;
1654
+ }
1655
+
1656
+ const IValue& value() const {
1657
+ return value_;
1658
+ }
1659
+
1660
+ std::shared_ptr<EnumType> type() const {
1661
+ return type_;
1662
+ }
1663
+
1664
+ private:
1665
+ std::shared_ptr<EnumType> type_;
1666
+ std::string name_;
1667
+ IValue value_;
1668
+ };
1669
+
1670
+ #undef TORCH_FORALL_TAGS
1671
+
1672
+ namespace detail {
1673
+
1674
+ struct _guarded_unsigned_long_unique_dummy final {
1675
+ _guarded_unsigned_long_unique_dummy(int64_t){};
1676
+ };
1677
+ using _guarded_unsigned_long = std::conditional_t<
1678
+ std::is_same<unsigned long, uint32_t>::value ||
1679
+ std::is_same<unsigned long, uint64_t>::value,
1680
+ _guarded_unsigned_long_unique_dummy,
1681
+ unsigned long>;
1682
+
1683
+ } // namespace detail
1684
+
1685
+ inline ivalue::Object& IValue::toObjectRef() const {
1686
+ AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
1687
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), "Attempted to create null reference");
1688
+ return *static_cast<c10::ivalue::Object*>(payload.u.as_intrusive_ptr);
1689
+ }
1690
+
1691
+ // note: when adding a DEFINE_TO case here you should also add a
1692
+ // toX method to IValue. These named methods are much more discoverable
1693
+ // than the to templated function.
1694
+
1695
+ #define DEFINE_TO(T, method_name) \
1696
+ template <> \
1697
+ inline T IValue::to<T>()&& { \
1698
+ return static_cast<T>(std::move(*this).method_name()); \
1699
+ } \
1700
+ template <> \
1701
+ inline c10::detail::ivalue_to_const_ref_overload_return<T>::type IValue::to<T>() const& { \
1702
+ typedef c10::detail::ivalue_to_const_ref_overload_return<T>::type return_type; \
1703
+ return static_cast<return_type>(this->method_name()); \
1704
+ }
1705
+
1706
+ DEFINE_TO(at::Tensor, toTensor)
1707
+ DEFINE_TO(at::Storage, toStorage)
1708
+ DEFINE_TO(c10::Stream, toStream)
1709
+ DEFINE_TO(float, toDouble)
1710
+ DEFINE_TO(double, toDouble)
1711
+ DEFINE_TO(c10::complex<double>, toComplexDouble)
1712
+ DEFINE_TO(unsigned char, toInt)
1713
+ DEFINE_TO(signed char, toInt)
1714
+ DEFINE_TO(unsigned short, toInt)
1715
+ DEFINE_TO(short, toInt)
1716
+ DEFINE_TO(int, toInt)
1717
+ DEFINE_TO(uint32_t, toInt)
1718
+ DEFINE_TO(uint64_t, toInt)
1719
+ DEFINE_TO(detail::_guarded_unsigned_long, toInt)
1720
+ DEFINE_TO(int64_t, toInt)
1721
+ DEFINE_TO(bool, toBool)
1722
+ DEFINE_TO(c10::intrusive_ptr<caffe2::Blob>, toBlob);
1723
+ DEFINE_TO(c10::intrusive_ptr<ivalue::ConstantString>, toString)
1724
+ DEFINE_TO(c10::intrusive_ptr<ivalue::Object>, toObject)
1725
+ DEFINE_TO(at::Scalar, toScalar)
1726
+ DEFINE_TO(c10::List<int64_t>, toIntList)
1727
+ DEFINE_TO(c10::List<double>, toDoubleList)
1728
+ DEFINE_TO(c10::List<c10::complex<double>>, toComplexDoubleList)
1729
+ DEFINE_TO(c10::List<bool>, toBoolList)
1730
+ DEFINE_TO(c10::List<at::Tensor>, toTensorList)
1731
+ DEFINE_TO(c10::impl::GenericList, toList)
1732
+ DEFINE_TO(c10::impl::GenericDict, toGenericDict)
1733
+ DEFINE_TO(c10::intrusive_ptr<ivalue::Tuple>, toTuple)
1734
+ DEFINE_TO(std::string, toStringRef)
1735
+ DEFINE_TO(c10::string_view, toStringView)
1736
+ DEFINE_TO(c10::intrusive_ptr<ivalue::Future>, toFuture)
1737
+ DEFINE_TO(c10::intrusive_ptr<ivalue::Await>, toAwait)
1738
+ DEFINE_TO(c10::intrusive_ptr<c10::RRefInterface>, toRRef)
1739
+ DEFINE_TO(c10::intrusive_ptr<at::Quantizer>, toQuantizer)
1740
+ DEFINE_TO(IValue, toIValue)
1741
+ DEFINE_TO(c10::Device, toDevice)
1742
+ DEFINE_TO(at::ScalarType, toScalarType)
1743
+ DEFINE_TO(at::Layout, toLayout)
1744
+ DEFINE_TO(at::MemoryFormat, toMemoryFormat)
1745
+ DEFINE_TO(at::QScheme, toQScheme)
1746
+ DEFINE_TO(at::Dimname, toDimname)
1747
+ DEFINE_TO(at::Generator, toGenerator)
1748
+ DEFINE_TO(c10::SymInt, toSymInt)
1749
+ DEFINE_TO(c10::SymFloat, toSymFloat)
1750
+ DEFINE_TO(c10::SymBool, toSymBool)
1751
+
1752
+ template <class T>
1753
+ struct _fake_type {};
1754
+
1755
+ // generic_to<T> converts an IValue from a generic list or generic dict
1756
+ // to a concrete list/dict type likelike List<T>, Dict<...> or optional<T>.
1757
+ // Note that in the case of lists, this only works for IValue-based lists,
1758
+ // i.e. not for int64_t, double, ...
1759
+ // generic_to<T> is an implementation detail of IValue::to<T> and not
1760
+ // supposed to be called directly.
1761
+ // The _fake_type<T> parameter allows us to overload
1762
+ // based on the return type.
1763
+ template <class Elem>
1764
+ // TODO this is deprecated but we don't throw a warning because a lot of ops in
1765
+ // native_functions.yaml still return std::vector.
1766
+ // C10_DEPRECATED_MESSAGE("IValues based on std::vector<T> are potentially slow
1767
+ // and deprecated. Please use torch::List<T> instead.")
1768
+ std::vector<Elem> generic_to(IValue ivalue, _fake_type<std::vector<Elem>>) {
1769
+ // We need to do a deep copy of the vector because there might be other
1770
+ // references to this same IValue that also use the list. We can't just
1771
+ // move the elements out.
1772
+ auto list = std::move(ivalue).to<List<Elem>>();
1773
+ std::vector<Elem> result;
1774
+ result.reserve(list.size());
1775
+ for (Elem v : list) {
1776
+ result.push_back(std::move(v));
1777
+ }
1778
+ return result;
1779
+ }
1780
+
1781
+ template <typename T>
1782
+ c10::intrusive_ptr<T> IValue::toCustomClass() && {
1783
+ static_assert(
1784
+ std::is_base_of<torch::CustomClassHolder, T>::value == true,
1785
+ "toCustomClass requires that template parameter T must inherit "
1786
+ "from torch::CustomClassHolder");
1787
+ auto obj = toObject();
1788
+ TORCH_CHECK(
1789
+ obj->slots().size() == 1,
1790
+ "Tried to cast IValue to custom class but it did "
1791
+ "not contain a custom class!");
1792
+ const auto* expected_type = c10::getCustomClassType<c10::intrusive_ptr<T>>().get();
1793
+ ivalue::checkCustomClassType(expected_type, type().get());
1794
+ auto userObj =
1795
+ c10::static_intrusive_pointer_cast<T>(obj->getSlot(0).toCapsule());
1796
+ return userObj;
1797
+ }
1798
+
1799
+ template <typename T>
1800
+ c10::intrusive_ptr<T> IValue::toCustomClass() const& {
1801
+ static_assert(
1802
+ std::is_base_of<torch::CustomClassHolder, T>::value == true,
1803
+ "toCustomClass requires that template parameter T must inherit "
1804
+ "from torch::CustomClassHolder");
1805
+ auto obj = toObject();
1806
+ TORCH_CHECK(
1807
+ obj->slots().size() == 1,
1808
+ "Tried to cast IValue to custom class but it did "
1809
+ "not contain a custom class!");
1810
+ const auto* expected_type = c10::getCustomClassType<c10::intrusive_ptr<T>>().get();
1811
+ ivalue::checkCustomClassType(expected_type, type().get());
1812
+ auto userObj =
1813
+ c10::static_intrusive_pointer_cast<T>(obj->getSlot(0).toCapsule());
1814
+ return userObj;
1815
+ }
1816
+
1817
+ template <typename T>
1818
+ T generic_to(IValue ivalue, _fake_type<T>) {
1819
+ using ElemType = typename std::remove_pointer<T>::type::element_type;
1820
+ return std::move(ivalue).toCustomClass<ElemType>();
1821
+ }
1822
+
1823
+ template <typename T>
1824
+ tagged_capsule<T> generic_to(IValue ivalue, _fake_type<tagged_capsule<T>>) {
1825
+ return tagged_capsule<T>{std::move(ivalue)};
1826
+ }
1827
+
1828
+ template <typename Elem>
1829
+ c10::List<Elem> generic_to(IValue ivalue, _fake_type<c10::List<Elem>>) {
1830
+ return impl::toTypedList<Elem>(std::move(ivalue).toList());
1831
+ }
1832
+
1833
+ template <typename T>
1834
+ static T createVectorLikeFromList(const c10::detail::ListImpl* impl) {
1835
+ T result;
1836
+ result.reserve(impl->list.size());
1837
+ for (const auto & i : impl->list) {
1838
+ result.push_back(i.to<typename T::value_type>());
1839
+ }
1840
+ return result;
1841
+ }
1842
+
1843
+ template <typename T>
1844
+ static std::vector<T> createVectorFromList(const c10::detail::ListImpl* impl) {
1845
+ return createVectorLikeFromList<std::vector<T>>(impl);
1846
+ }
1847
+
1848
+ template <typename T>
1849
+ std::vector<T> createVectorFromList(const c10::List<T>& impl) {
1850
+ std::vector<T> result;
1851
+ result.reserve(impl.size());
1852
+ for (size_t i = 0, N = impl.size(); i < N; ++i) {
1853
+ result.push_back(impl[i]);
1854
+ }
1855
+ return result;
1856
+ }
1857
+
1858
+ template <typename T>
1859
+ OptionalArray<T> generic_to(IValue ivalue, _fake_type<OptionalArray<T>>) {
1860
+ if (ivalue.isNone()) {
1861
+ return {};
1862
+ }
1863
+ return createVectorFromList<T>(
1864
+ std::move(ivalue).to<c10::List<T>>()
1865
+ );
1866
+ }
1867
+
1868
+ namespace detail {
1869
+ template <typename Elem, size_t... I>
1870
+ std::array<Elem, sizeof...(I)> generic_to_array(
1871
+ IValue ivalue,
1872
+ _fake_type<std::array<Elem, sizeof...(I)>>,
1873
+ std::index_sequence<I...>) {
1874
+ // We need to do a deep copy of the array because there might be other
1875
+ // references to this same IValue that also use the list. We can't just
1876
+ // move the elements out.
1877
+ auto list = std::move(ivalue).to<List<Elem>>();
1878
+ TORCH_CHECK(
1879
+ list.size() == sizeof...(I),
1880
+ "Tried to convert a List with ",
1881
+ list.size(),
1882
+ " elements to a fixed-size array of size ",
1883
+ sizeof...(I));
1884
+ return {list[I]...};
1885
+ }
1886
+ } // namespace detail
1887
+
1888
+ template <typename Elem, size_t N>
1889
+ std::array<Elem, N> generic_to(
1890
+ IValue ivalue,
1891
+ _fake_type<std::array<Elem, N>> ft) {
1892
+ return detail::generic_to_array(ivalue, ft, std::make_index_sequence<N>());
1893
+ }
1894
+
1895
+ template <typename Key, typename Value>
1896
+ c10::Dict<Key, Value> generic_to(
1897
+ IValue ivalue,
1898
+ _fake_type<c10::Dict<Key, Value>>) {
1899
+ return impl::toTypedDict<Key, Value>(std::move(ivalue).toGenericDict());
1900
+ }
1901
+
1902
+ template <typename K, typename V>
1903
+ C10_DEPRECATED_MESSAGE(
1904
+ "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict<K, V> instead.")
1905
+ std::unordered_map<K, V> generic_to(
1906
+ IValue ivalue,
1907
+ _fake_type<std::unordered_map<K, V>>) {
1908
+ std::unordered_map<K, V> specialized_dict;
1909
+
1910
+ for (const auto& item : std::move(ivalue).toGenericDict()) {
1911
+ specialized_dict[item.key().template to<K>()] = item.value().template to<V>();
1912
+ }
1913
+
1914
+ return specialized_dict;
1915
+ }
1916
+
1917
+ template <typename T>
1918
+ c10::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
1919
+ if (ivalue.isNone()) {
1920
+ return c10::nullopt;
1921
+ }
1922
+ return std::move(ivalue).to<T>();
1923
+ }
1924
+
1925
+ namespace detail {
1926
+ template <typename Tuple, std::size_t... INDEX>
1927
+ Tuple generic_to_tuple_impl(
1928
+ const ivalue::TupleElements& t,
1929
+ std::index_sequence<INDEX...>) {
1930
+ return std::make_tuple(
1931
+ t[INDEX].to<typename std::tuple_element<INDEX, Tuple>::type>()...);
1932
+ }
1933
+ } // namespace detail
1934
+
1935
+ template <
1936
+ typename... Args,
1937
+ typename Indices = std::make_index_sequence<sizeof...(Args)>,
1938
+ std::enable_if_t<
1939
+ !std::disjunction<
1940
+ std::is_lvalue_reference<Args>...,
1941
+ std::negation<std::is_constructible<IValue, Args>>...>::value,
1942
+ std::nullptr_t> = nullptr>
1943
+ std::tuple<Args...> generic_to(const IValue& ivalue, _fake_type<std::tuple<Args...>>) {
1944
+ const auto& vals = ivalue.toTupleRef().elements();
1945
+ TORCH_CHECK(vals.size() == sizeof...(Args));
1946
+ return detail::generic_to_tuple_impl<std::tuple<Args...>>(vals, Indices{});
1947
+ }
1948
+
1949
+ template <typename T>
1950
+ inline T IValue::to() && {
1951
+ return generic_to(std::move(*this), _fake_type<T>{});
1952
+ }
1953
+
1954
+ template <>
1955
+ inline c10::optional<c10::string_view> IValue::to() && {
1956
+ // In the default implementation, the IValue is destroyed with std::move.
1957
+ // But if the unboxed type is optional<string_view> we cannot destroy
1958
+ // the IValue.
1959
+ return generic_to(*this, _fake_type<c10::optional<c10::string_view>>{});
1960
+ }
1961
+
1962
+ template <typename T>
1963
+ inline typename c10::detail::ivalue_to_const_ref_overload_return<T>::type IValue::to() const& {
1964
+ return generic_to(*this, _fake_type<T>{});
1965
+ }
1966
+
1967
+ inline c10::List<int64_t> IValue::toIntList() && {
1968
+ AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
1969
+ return c10::List<int64_t>(moveToIntrusivePtr<c10::detail::ListImpl>());
1970
+ }
1971
+ inline c10::List<int64_t> IValue::toIntList() const& {
1972
+ AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
1973
+ return c10::List<int64_t>(toIntrusivePtr<c10::detail::ListImpl>());
1974
+ }
1975
+ inline std::vector<int64_t> IValue::toIntVector() const {
1976
+ AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
1977
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
1978
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
1979
+ "called toIntVector on null intrusive_ptr IValue");
1980
+ return createVectorFromList<int64_t>(
1981
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
1982
+ }
1983
+ inline std::vector<c10::SymInt> IValue::toSymIntVector() const {
1984
+ AT_ASSERT(isSymIntList() || isIntList(), "Expected SymIntList or IntList but got ", tagKind());
1985
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
1986
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
1987
+ "called toSymIntVector on null intrusive_ptr IValue");
1988
+ return createVectorFromList<c10::SymInt>(
1989
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
1990
+ }
1991
+ inline at::DimVector IValue::toDimVector() const {
1992
+ AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind());
1993
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
1994
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
1995
+ "called toDimVector on null intrusive_ptr IValue");
1996
+ return createVectorLikeFromList<at::DimVector>(
1997
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
1998
+ }
1999
+ inline c10::List<double> IValue::toDoubleList() && {
2000
+ AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
2001
+ return c10::List<double>(moveToIntrusivePtr<c10::detail::ListImpl>());
2002
+ }
2003
+ inline c10::List<double> IValue::toDoubleList() const& {
2004
+ AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
2005
+ return c10::List<double>(toIntrusivePtr<c10::detail::ListImpl>());
2006
+ }
2007
+ inline std::vector<double> IValue::toDoubleVector() const {
2008
+ AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind());
2009
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2010
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2011
+ "called toDoubleVector on null intrusive_ptr IValue");
2012
+ return createVectorFromList<double>(
2013
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
2014
+ }
2015
+ inline c10::List<c10::complex<double>> IValue::toComplexDoubleList() && {
2016
+ AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
2017
+ return c10::List<c10::complex<double>>(moveToIntrusivePtr<c10::detail::ListImpl>());
2018
+ }
2019
+ inline c10::List<c10::complex<double>> IValue::toComplexDoubleList() const& {
2020
+ AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
2021
+ return c10::List<c10::complex<double>>(toIntrusivePtr<c10::detail::ListImpl>());
2022
+ }
2023
+ inline std::vector<c10::complex<double>> IValue::toComplexDoubleVector() const {
2024
+ AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind());
2025
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2026
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2027
+ "called toComplexDoubleVector on null intrusive_ptr IValue");
2028
+ return createVectorFromList<c10::complex<double>>(
2029
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
2030
+ }
2031
+ inline c10::List<bool> IValue::toBoolList() && {
2032
+ AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind());
2033
+ return c10::List<bool>(moveToIntrusivePtr<c10::detail::ListImpl>());
2034
+ }
2035
+ inline c10::List<bool> IValue::toBoolList() const& {
2036
+ AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind());
2037
+ return c10::List<bool>(toIntrusivePtr<c10::detail::ListImpl>());
2038
+ }
2039
+ inline c10::List<at::Tensor> IValue::toTensorList() && {
2040
+ AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
2041
+ return c10::List<at::Tensor>(moveToIntrusivePtr<c10::detail::ListImpl>());
2042
+ }
2043
+ inline c10::List<at::Tensor> IValue::toTensorList() const& {
2044
+ AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
2045
+ return c10::List<at::Tensor>(toIntrusivePtr<c10::detail::ListImpl>());
2046
+ }
2047
+ inline std::vector<at::Tensor> IValue::toTensorVector() const {
2048
+ AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind());
2049
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2050
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2051
+ "called toTensorVector on null intrusive_ptr IValue");
2052
+ return createVectorFromList<at::Tensor>(
2053
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
2054
+ }
2055
+ inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() && {
2056
+ AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
2057
+ return c10::List<c10::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>());
2058
+ }
2059
+ inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() const& {
2060
+ AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
2061
+ return c10::List<c10::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>());
2062
+ }
2063
+ inline std::vector<c10::optional<at::Tensor>> IValue::toOptionalTensorVector() const {
2064
+ AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
2065
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2066
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2067
+ "called toOptionalTensorVector on null intrusive_ptr IValue");
2068
+ return createVectorFromList<c10::optional<at::Tensor>>(
2069
+ static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
2070
+ }
2071
+ inline c10::List<IValue> IValue::toList() && {
2072
+ AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
2073
+ return c10::List<IValue>(moveToIntrusivePtr<c10::detail::ListImpl>());
2074
+ }
2075
+ inline c10::List<IValue> IValue::toList() const& {
2076
+ AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
2077
+ return c10::List<IValue>(toIntrusivePtr<c10::detail::ListImpl>());
2078
+ }
2079
+ inline c10::ArrayRef<IValue> IValue::toListRef() const {
2080
+ AT_ASSERT(isList(), "Expected GenericList but got ", tagKind());
2081
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2082
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2083
+ "called toListRef on null intrusive_ptr IValue");
2084
+ return static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr)
2085
+ ->list;
2086
+ }
2087
+ inline c10::Dict<IValue, IValue> IValue::toGenericDict() && {
2088
+ AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind());
2089
+ return c10::Dict<IValue, IValue>(moveToIntrusivePtr<c10::detail::DictImpl>());
2090
+ }
2091
+ inline c10::Dict<IValue, IValue> IValue::toGenericDict() const& {
2092
+ AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind());
2093
+ return c10::Dict<IValue, IValue>(toIntrusivePtr<c10::detail::DictImpl>());
2094
+ }
2095
+ inline c10::intrusive_ptr<ivalue::Tuple> IValue::toTuple() && {
2096
+ AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
2097
+ return moveToIntrusivePtr<ivalue::Tuple>();
2098
+ }
2099
+ inline c10::intrusive_ptr<ivalue::Tuple> IValue::toTuple() const& {
2100
+ AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
2101
+ return toIntrusivePtr<ivalue::Tuple>();
2102
+ }
2103
+ inline ivalue::Tuple& IValue::toTupleRef() const {
2104
+ AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind());
2105
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2106
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2107
+ "called toTupleRef on null intrusive_ptr IValue");
2108
+ return *static_cast<c10::ivalue::Tuple*>(
2109
+ payload.u.as_intrusive_ptr);
2110
+ }
2111
+
2112
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::Tuple> v)
2113
+ : tag(Tag::Tuple) {
2114
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2115
+ }
2116
+ template <
2117
+ typename... Args,
2118
+ std::enable_if_t<
2119
+ !std::disjunction<
2120
+ std::is_lvalue_reference<Args>...,
2121
+ std::negation<std::is_constructible<IValue, Args>>...>::value,
2122
+ std::nullptr_t>>
2123
+ inline IValue::IValue(const std::tuple<Args...>& t)
2124
+ : IValue(c10::guts::apply(c10::ivalue::Tuple::create<const Args&...>, t)) {
2125
+ }
2126
+
2127
+ template <
2128
+ typename... Args,
2129
+ std::enable_if_t<
2130
+ !std::disjunction<
2131
+ std::is_lvalue_reference<Args>...,
2132
+ std::negation<std::is_constructible<IValue, Args>>...>::value,
2133
+ std::nullptr_t>>
2134
+ inline IValue::IValue(std::tuple<Args...>&& t)
2135
+ : IValue(c10::guts::apply(c10::ivalue::Tuple::create<Args&&...>, std::move(t))) {
2136
+ }
2137
+
2138
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::ConstantString> v)
2139
+ : tag(Tag::String) {
2140
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2141
+ }
2142
+ inline IValue::IValue(std::string v)
2143
+ : IValue(ivalue::ConstantString::create(std::move(v))) {}
2144
+
2145
+ inline IValue::IValue(c10::impl::GenericList v)
2146
+ : tag(Tag::GenericList) {
2147
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release());
2148
+ }
2149
+
2150
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2151
+ inline IValue::IValue(c10::List<T>&& v) : IValue(impl::toList<T>(std::move(v))) {}
2152
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2153
+ inline IValue::IValue(const c10::List<T>& v) : IValue(impl::toList<T>(v)) {}
2154
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2155
+ inline IValue::IValue(at::ArrayRef<T> v) : IValue(c10::List<T>()) {
2156
+ auto list = to<c10::List<T>>();
2157
+ list.reserve(v.size());
2158
+ for (const auto& e : v) {
2159
+ list.push_back(e);
2160
+ }
2161
+ }
2162
+ template <class T, IValue::enable_if_symint<T>>
2163
+ inline IValue::IValue(at::ArrayRef<T> v) : IValue() {
2164
+ auto vi = c10::asIntArrayRefSlowOpt(v);
2165
+ if (vi.has_value()) {
2166
+ // This list is entirely integers; ensure it is typed as
2167
+ // an IntList so toIntList works
2168
+ *this = IValue(*vi);
2169
+ } else {
2170
+ // This list has SymInts; type it as a SymInt
2171
+ *this = IValue(impl::toList<c10::SymInt>(c10::List<c10::SymInt>()));
2172
+ auto list = to<c10::List<c10::SymInt>>();
2173
+ list.reserve(v.size());
2174
+ for (const auto& e : v) {
2175
+ list.push_back(e);
2176
+ }
2177
+ }
2178
+ }
2179
+ template <class T, IValue::enable_if_symint<T>>
2180
+ inline IValue::IValue(at::OptionalArrayRef<T> mb_v) : IValue() {
2181
+ if (!mb_v.has_value()) return;
2182
+ *this = IValue(*mb_v);
2183
+ }
2184
+ template <class T, IValue::enable_if_symint<T>>
2185
+ inline IValue::IValue(const std::vector<T>& v) : IValue() {
2186
+ *this = IValue(at::ArrayRef<T>(v));
2187
+ }
2188
+ template <class T, IValue::enable_if_symint<T>>
2189
+ inline IValue::IValue(std::vector<T>&& v) : IValue() {
2190
+ auto vi = c10::asIntArrayRefSlowOpt(v);
2191
+ if (vi.has_value()) {
2192
+ // This list is entirely integers; ensure it is typed as
2193
+ // an IntList so toIntList works
2194
+ *this = IValue(*vi);
2195
+ } else {
2196
+ // This list has SymInts; type it as a SymInt
2197
+ *this = IValue(impl::toList<c10::SymInt>(c10::List<c10::SymInt>()));
2198
+ auto list = to<c10::List<c10::SymInt>>();
2199
+ list.reserve(v.size());
2200
+ for (auto& e : v) {
2201
+ list.push_back(std::move(e));
2202
+ }
2203
+ }
2204
+ }
2205
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2206
+ inline IValue::IValue(const std::vector<T>& v) : IValue(c10::List<T>()) {
2207
+ auto list = to<c10::List<T>>();
2208
+ list.reserve(v.size());
2209
+ for (const auto& e : v) {
2210
+ list.push_back(e);
2211
+ }
2212
+ }
2213
+
2214
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2215
+ inline IValue::IValue(std::vector<T>&& v) : IValue(c10::List<T>()) {
2216
+ auto list = to<c10::List<T>>();
2217
+ list.reserve(v.size());
2218
+ if constexpr (std::is_same_v<T, bool>) {
2219
+ for (auto e : v) {
2220
+ list.push_back(e);
2221
+ }
2222
+ } else {
2223
+ for (auto& e : v) {
2224
+ list.push_back(std::move(e));
2225
+ }
2226
+ }
2227
+ }
2228
+
2229
+ template <class T, IValue::enable_if_list_is_ivalue_constructible<T>>
2230
+ inline IValue::IValue(c10::OptionalArrayRef<T> v) : IValue() {
2231
+ if (v.has_value()) {
2232
+ *this = IValue(std::move(*v));
2233
+ }
2234
+ }
2235
+
2236
+ template <class T, size_t N>
2237
+ inline IValue::IValue(std::array<T, N> v) : IValue(c10::List<T>()) {
2238
+ auto list = to<c10::List<T>>();
2239
+ list.reserve(v.size());
2240
+ for (auto& e : v) {
2241
+ list.push_back(std::move(e));
2242
+ }
2243
+ }
2244
+
2245
+ template <class T, IValue::enable_if_ilist_is_ivalue_constructible<T>>
2246
+ inline IValue::IValue(c10::IListRef<T> v) : IValue() {
2247
+ constexpr bool boxed_type_constructs_ivalue =
2248
+ std::is_constructible<IValue, typename c10::IListRef<T>::boxed_type>::value;
2249
+ // First, we try to use the boxed value.
2250
+ // If we fail (either it's not in the boxed state, or its boxed type
2251
+ // can not construct an IValue), we fallback to copying the list.
2252
+ if (boxed_type_constructs_ivalue && v.isBoxed()) {
2253
+ *this = IValue(impl::toList(v.toBoxed()));
2254
+ } else {
2255
+ c10::List<T> list;
2256
+ list.reserve(v.size());
2257
+ for (const auto& t : v) {
2258
+ list.push_back(t);
2259
+ }
2260
+ *this = IValue(impl::toList(std::move(list)));
2261
+ }
2262
+ }
2263
+
2264
+ inline IValue::IValue(c10::impl::GenericDict v)
2265
+ : tag(Tag::GenericDict) {
2266
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release());
2267
+ }
2268
+ template <class Key, class Value>
2269
+ inline IValue::IValue(c10::Dict<Key, Value> v)
2270
+ : IValue(impl::toGenericDict(std::move(v))) {}
2271
+
2272
+ template <class Key, class Value>
2273
+ inline IValue::IValue(std::unordered_map<Key, Value> v)
2274
+ : IValue(Dict<Key, Value>()) {
2275
+ auto dict = to<c10::Dict<Key, Value>>();
2276
+ dict.reserve(v.size());
2277
+ for (auto& e : v) {
2278
+ dict.insert(std::move(e.first), std::move(e.second));
2279
+ }
2280
+ }
2281
+
2282
+ template <class T, IValue::enable_if_ivalue_constructible<T>>
2283
+ inline IValue::IValue(c10::optional<T> v) : IValue() {
2284
+ if (v.has_value()) {
2285
+ *this = IValue(std::move(*v));
2286
+ }
2287
+ }
2288
+
2289
+ inline IValue::IValue(c10::nullopt_t) : IValue() {}
2290
+
2291
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::Object> v)
2292
+ : tag(Tag::Object) {
2293
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2294
+ }
2295
+
2296
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v)
2297
+ : tag(Tag::PyObject) {
2298
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2299
+ }
2300
+
2301
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::EnumHolder> v)
2302
+ : tag(Tag::Enum) {
2303
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2304
+ }
2305
+
2306
+ inline IValue IValue::make_capsule(
2307
+ intrusive_ptr<torch::CustomClassHolder> blob) {
2308
+ IValue iv;
2309
+ iv.tag = Tag::Capsule;
2310
+ iv.payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
2311
+ return iv;
2312
+ }
2313
+
2314
+ template <
2315
+ typename T,
2316
+ std::enable_if_t<std::is_base_of<torch::CustomClassHolder, T>::value, int>>
2317
+ IValue::IValue(c10::intrusive_ptr<T> custom_class) : tag(Tag::Object) {
2318
+ auto classType = []() {
2319
+ try {
2320
+ return c10::getCustomClassType<c10::intrusive_ptr<T>>();
2321
+ } catch (const c10::Error&) {
2322
+ throw c10::Error(
2323
+ "Trying to instantiate a class that isn't a registered custom class: " +
2324
+ std::string(c10::util::get_fully_qualified_type_name<T>()),
2325
+ "");
2326
+ }
2327
+ }();
2328
+ auto ivalue_obj = c10::ivalue::Object::create(std::move(classType), /* numSlots */1);
2329
+ ivalue_obj->setSlot(0, IValue::make_capsule(std::move(custom_class)));
2330
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(ivalue_obj.release());
2331
+
2332
+ }
2333
+
2334
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::Future> v)
2335
+ : tag(Tag::Future) {
2336
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2337
+ }
2338
+
2339
+ inline IValue::IValue(c10::intrusive_ptr<ivalue::Await> v)
2340
+ : tag(Tag::Await) {
2341
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2342
+ }
2343
+
2344
+ inline IValue::IValue(c10::intrusive_ptr<c10::RRefInterface> v)
2345
+ : tag(Tag::RRef) {
2346
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2347
+ }
2348
+
2349
+ inline IValue::IValue(c10::intrusive_ptr<at::Quantizer> v)
2350
+ : tag(Tag::Quantizer) {
2351
+ payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release());
2352
+ }
2353
+
2354
+ template <typename T>
2355
+ inline IValue::IValue(c10::complex<T> c)
2356
+ : tag(Tag::ComplexDouble) {
2357
+ auto v = c10::make_intrusive<ivalue::ComplexHolder>(c);
2358
+ payload.u.as_intrusive_ptr = v.release();
2359
+ }
2360
+
2361
+ inline const std::string& IValue::toStringRef() const {
2362
+ AT_ASSERT(isString(), "Expected String but got ", tagKind());
2363
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2364
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2365
+ "called toStringRef on null intrusive_ptr IValue");
2366
+ return static_cast<const c10::ivalue::ConstantString*>(
2367
+ payload.u.as_intrusive_ptr)
2368
+ ->string();
2369
+ }
2370
+ inline c10::optional<std::reference_wrapper<const std::string>> IValue::
2371
+ toOptionalStringRef() const {
2372
+ if (isNone()) {
2373
+ return c10::nullopt;
2374
+ }
2375
+ AT_ASSERT(isString(), "Expected optional<string> but got ", tagKind());
2376
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2377
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2378
+ "called toOptionalStringRef on null intrusive_ptr IValue");
2379
+ return std::reference_wrapper<const std::string>(
2380
+ static_cast<const c10::ivalue::ConstantString*>(payload.u.as_intrusive_ptr)
2381
+ ->string());
2382
+ }
2383
+
2384
+ inline c10::string_view IValue::toStringView() const {
2385
+ AT_ASSERT(isString(), "Expected String but got ", tagKind());
2386
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
2387
+ payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
2388
+ "called toStringView on null intrusive_ptr IValue");
2389
+ return static_cast<const c10::ivalue::ConstantString*>(
2390
+ payload.u.as_intrusive_ptr)
2391
+ ->string_view();
2392
+ }
2393
+
2394
+ inline PyObject* IValue::toPyObject() const {
2395
+ return toPyObjectHolder()->getPyObject();
2396
+ }
2397
+
2398
+ template <typename T>
2399
+ inline optional<T> IValue::toOptional() {
2400
+ if (this->isNone()) {
2401
+ return nullopt;
2402
+ }
2403
+ return this->to<T>();
2404
+ }
2405
+
2406
+ template <typename T>
2407
+ inline optional<T> IValue::toOptional() const {
2408
+ if (this->isNone()) {
2409
+ return nullopt;
2410
+ }
2411
+ return this->to<T>();
2412
+ }
2413
+
2414
+ inline bool IValue::isCustomClass() const {
2415
+ return torch::isCustomClass(*this);
2416
+ }
2417
+
2418
+ inline bool IValue::isSameIdentity(const IValue& rhs) const {
2419
+ // We choose to not use memcmp for payload check due to potential random
2420
+ // padding characters on union type
2421
+
2422
+ // Semantics:
2423
+ // 1. Immutable primitive values of the same type (Int, Double, None, Bool,
2424
+ // Str) return value equality
2425
+ // 2. If it is a tensor type, we need to take undefined tensor into account
2426
+ // 3. Undefined_tensor is None and vice versa should be true
2427
+ // 4. If it is a reference type (i.e. isIntrusivePtr()), then is True when
2428
+ // the pointed-to object is the same.
2429
+ // 5. False for all other comparisons.
2430
+ if (this->isNone() && rhs.isNone()) {
2431
+ return true;
2432
+ } else if (this->isBool() && rhs.isBool()) {
2433
+ // for bool type, do equality check
2434
+ return this->toBool() == rhs.toBool();
2435
+ } else if (this->isTensor() && rhs.isTensor()) {
2436
+ return this->payload.as_tensor.is_same(rhs.payload.as_tensor);
2437
+ } else if (this->isTensor() && rhs.isNone()) {
2438
+ // special case: undefined tensor and None are the same identity
2439
+ return !this->payload.as_tensor.defined();
2440
+ } else if (this->isNone() && rhs.isTensor()) {
2441
+ // special case: undefined tensor and None are the same identity
2442
+ return !rhs.payload.as_tensor.defined();
2443
+ } else if (this->isInt() && rhs.isInt()) {
2444
+ return this->toInt() == rhs.toInt();
2445
+ } else if (this->isDouble() && rhs.isDouble()) {
2446
+ return this->toDouble() == rhs.toDouble();
2447
+ } else if (this->isString() && rhs.isString()) {
2448
+ return this->toStringRef() == rhs.toStringRef();
2449
+ } else {
2450
+ // for objects holding in IValue, do shallow compare on pointer address to
2451
+ // testify the identity
2452
+ return this->isIntrusivePtr() && rhs.isIntrusivePtr() &&
2453
+ this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
2454
+ }
2455
+ }
2456
+
2457
+ namespace ivalue {
2458
+ namespace detail {
2459
+
2460
+ template <typename T>
2461
+ IValue from_(T&& x, std::true_type) {
2462
+ return IValue(std::forward<T>(x));
2463
+ }
2464
+ template <typename T>
2465
+ IValue from_(c10::intrusive_ptr<T> x, std::false_type) {
2466
+ return IValue(std::move(x));
2467
+ }
2468
+ template <typename T>
2469
+ IValue from_(T&& /*x*/, std::false_type) {
2470
+ static_assert(
2471
+ guts::false_t<T>::value,
2472
+ "You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)");
2473
+ return IValue();
2474
+ }
2475
+ } // namespace detail
2476
+
2477
+ template <typename T>
2478
+ IValue from(T&& x) {
2479
+ return detail::from_(
2480
+ std::forward<T>(x), typename std::is_constructible<IValue, T>::type{});
2481
+ }
2482
+
2483
+ } // namespace ivalue
2484
+
2485
+
2486
+ template <>
2487
+ struct MaybeOwnedTraits<IValue> {
2488
+ using owned_type = IValue;
2489
+ using borrow_type = IValue;
2490
+
2491
+ static borrow_type createBorrow(const owned_type& from) {
2492
+ if (!from.isPtrType()) {
2493
+ return from;
2494
+ }
2495
+ if (from.isTensor()) {
2496
+ return IValue(MaybeOwnedTraits<at::Tensor>::createBorrow(from.toTensor()));
2497
+ } else {
2498
+ return IValue(from.payload, from.tag);
2499
+ }
2500
+ }
2501
+
2502
+ static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
2503
+ lhs.clearToNone();
2504
+ if (!rhs.isPtrType()) {
2505
+ lhs = rhs;
2506
+ } else if (rhs.isTensor()) {
2507
+ lhs = IValue(MaybeOwnedTraits<at::Tensor>::createBorrow(rhs.toTensor()));
2508
+ } else {
2509
+ lhs = IValue(rhs.payload, rhs.tag);
2510
+ }
2511
+ }
2512
+
2513
+ static void destroyBorrow(borrow_type& toDestroy) {
2514
+ toDestroy.clearToNone();
2515
+ }
2516
+
2517
+ static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
2518
+ return borrow;
2519
+ }
2520
+
2521
+ static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
2522
+ return &borrow;
2523
+ }
2524
+
2525
+ static bool debugBorrowIsValid(const borrow_type&) {
2526
+ return true;
2527
+ }
2528
+ };
2529
+
2530
+ template <>
2531
+ struct IValue::TagType<c10::Type> {
2532
+ static TORCH_API c10::TypePtr get(const IValue&);
2533
+ };
2534
+
2535
+ template <>
2536
+ struct IValue::TagType<c10::DynamicType> {
2537
+ static TORCH_API c10::TypePtr get(const IValue&);
2538
+ };
2539
+
2540
+ template <typename T>
2541
+ TypePtr IValue::type() const {
2542
+ return IValue::TagType<T>::get(*this);
2543
+ }
2544
+
2545
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <type_traits>
5
+
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/MaybeOwned.h>
8
+
9
+ namespace c10 {
10
+
11
+ // Compatibility wrapper around a raw pointer so that existing code
12
+ // written to deal with a shared_ptr can keep working.
13
+ template <typename T>
14
+ class SingletonTypePtr {
15
+ public:
16
+ /* implicit */ SingletonTypePtr(T* p) : repr_(p) {}
17
+
18
+ // We need this to satisfy Pybind11, but it shouldn't be hit.
19
+ explicit SingletonTypePtr(std::shared_ptr<T>) { TORCH_CHECK(false); }
20
+
21
+ using element_type = typename std::shared_ptr<T>::element_type;
22
+
23
+ template <typename U = T, std::enable_if_t<!std::is_same<std::remove_const_t<U>, void>::value, bool> = true>
24
+ T& operator*() const {
25
+ return *repr_;
26
+ }
27
+
28
+ T* get() const {
29
+ return repr_;
30
+ }
31
+
32
+ T* operator->() const {
33
+ return repr_;
34
+ }
35
+
36
+ operator bool() const {
37
+ return repr_ != nullptr;
38
+ }
39
+
40
+ private:
41
+ T* repr_{nullptr};
42
+ };
43
+
44
+ template <typename T, typename U>
45
+ bool operator==(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) {
46
+ return (void*)lhs.get() == (void*)rhs.get();
47
+ }
48
+
49
+ template <typename T, typename U>
50
+ bool operator!=(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) {
51
+ return !(lhs == rhs);
52
+ }
53
+
54
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <ATen/cuda/Exceptions.h>
7
+
8
+ #include <ATen/cudnn/cudnn-wrapper.h>
9
+ #include <ATen/cudnn/Utils.h>
10
+ #include <ATen/core/Tensor.h>
11
+ #include <ATen/TensorUtils.h>
12
+ #include <ATen/cuda/ATenCUDAGeneral.h>
13
+ #include <cuda.h>
14
+
15
+ #ifndef AT_PER_OPERATOR_HEADERS
16
+ #include <ATen/Functions.h>
17
+ #else
18
+ #include <ATen/ops/empty.h>
19
+ #endif
20
+
21
+ #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8907
22
+ #define USE_CUDNN_RNN_V8_API
23
+ #endif
24
+
25
+ namespace at { namespace native {
26
+
27
+ std::string cudnnTypeToString(cudnnDataType_t dtype);
28
+
29
+ // TODO: Add constructors for all of the descriptors
30
+
31
+ inline int dataSize(cudnnDataType_t dataType)
32
+ {
33
+ switch (dataType) {
34
+ #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200
35
+ case CUDNN_DATA_BFLOAT16:
36
+ #endif
37
+ case CUDNN_DATA_HALF: return 2;
38
+ case CUDNN_DATA_FLOAT: return 4;
39
+ default: return 8;
40
+ }
41
+ }
42
+
43
+ // The stride for a size-1 dimensions is not uniquely determined; in
44
+ // fact, it can be anything you want, because the fact that the
45
+ // tensor is size 1 at this dimension means that you will never actually
46
+ // try advancing your pointer by this stride.
47
+ //
48
+ // However, CuDNN has a much more stringent requirement on strides:
49
+ // if you are passing a contiguous input, it better be the case
50
+ // that the stride for dim i is the product of the sizes of dims
51
+ // i+1 to the end. This stride is indeed uniquely determined. This
52
+ // function modifies 'stride' in place so this invariant holds.
53
+ template <typename T>
54
+ static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) {
55
+ int64_t z = 1;
56
+ int index = 0;
57
+ std::vector<int> permutation(dim);
58
+
59
+ if (nhwc) {
60
+ permutation[index++] = 1;
61
+ }
62
+ for (int d = dim-1; d > 1; d--) {
63
+ permutation[index++] = d;
64
+ }
65
+ if (!nhwc) {
66
+ permutation[index++] = 1;
67
+ }
68
+ permutation[index++] = 0;
69
+ for (int d : permutation) {
70
+ if (size[d] == 1) {
71
+ stride[d] = z;
72
+ } else {
73
+ z *= size[d];
74
+ }
75
+ }
76
+ }
77
+
78
+ template <typename T, cudnnStatus_t (*dtor)(T*)>
79
+ struct DescriptorDeleter {
80
+ void operator()(T* x) {
81
+ if (x != nullptr) {
82
+ AT_CUDNN_CHECK(dtor(x));
83
+ }
84
+ }
85
+ };
86
+
87
+ // A generic class for wrapping cuDNN descriptor types. All you need
88
+ // is to give the underlying type the Descriptor_t points to (usually,
89
+ // if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct),
90
+ // the constructor and the destructor. Subclasses are responsible
91
+ // for defining a set() function to actually set the descriptor.
92
+ //
93
+ // Descriptors default construct to a nullptr, and have a descriptor
94
+ // initialized the first time you call set() or any other initializing
95
+ // function.
96
+ template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
97
+ class TORCH_CUDA_CPP_API Descriptor {
98
+ public:
99
+ // TODO: Figure out why const-correctness doesn't work here
100
+
101
+ // Use desc() to access the underlying descriptor pointer in
102
+ // a read-only fashion. Most client code should use this.
103
+ // If the descriptor was never initialized, this will return
104
+ // nullptr.
105
+ T* desc() const { return desc_.get(); }
106
+ T* desc() { return desc_.get(); }
107
+
108
+ // Use mut_desc() to access the underlying descriptor pointer
109
+ // if you intend to modify what it points to (e.g., using
110
+ // cudnnSetFooDescriptor). This will ensure that the descriptor
111
+ // is initialized. Code in this file will use this function.
112
+ T* mut_desc() { init(); return desc_.get(); }
113
+ protected:
114
+ void init() {
115
+ if (desc_ == nullptr) {
116
+ T* raw_desc;
117
+ AT_CUDNN_CHECK(ctor(&raw_desc));
118
+ desc_.reset(raw_desc);
119
+ }
120
+ }
121
+ private:
122
+ std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
123
+ };
124
+
125
+ class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
126
+ cudnnRNNDataStruct,
127
+ &cudnnCreateRNNDataDescriptor,
128
+ &cudnnDestroyRNNDataDescriptor> {
129
+ public:
130
+ void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
131
+ private:
132
+ void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
133
+ AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
134
+ }
135
+ };
136
+
137
+ class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor<
138
+ cudnnTensorStruct,
139
+ &cudnnCreateTensorDescriptor,
140
+ &cudnnDestroyTensorDescriptor> {
141
+ public:
142
+ TensorDescriptor() = default;
143
+ explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
144
+ set(t, pad);
145
+ }
146
+
147
+ // Note [CuDNN broadcast padding]
148
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
149
+ // pad specifies the minimum dimensionality of the tensor descriptor
150
+ // we produce (it doesn't have anything to do with, e.g., convolution
151
+ // padding). If 't' is lower-dimensional than 'pad', the remaining
152
+ // dimensions (on the right) are padded with ones. This doesn't
153
+ // affect the underlying data layout. This is particularly useful for
154
+ // dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is
155
+ // done in two steps: first, the client code is expected to pad out
156
+ // (the dimensions) input tensors to be the same dimension as the
157
+ // target broadcast, and then second, CuDNN takes of actually
158
+ // broadcasting size 1 dimensions.
159
+
160
+ void set(const at::Tensor &t, size_t pad = 0);
161
+ void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0);
162
+ void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
163
+
164
+ void print();
165
+
166
+ private:
167
+ void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc);
168
+
169
+ void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) {
170
+ fixSizeOneDimStride<int>(dim, size, stride, nhwc);
171
+ AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride));
172
+ }
173
+ };
174
+
175
+ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
176
+
177
+ class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor<
178
+ cudnnFilterStruct,
179
+ &cudnnCreateFilterDescriptor,
180
+ &cudnnDestroyFilterDescriptor> {
181
+ public:
182
+ void set(const at::Tensor &t, int64_t pad = 0) {
183
+ set(t, at::MemoryFormat::Contiguous, pad);
184
+ }
185
+
186
+ void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
187
+
188
+ void print();
189
+ private:
190
+ void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) {
191
+ AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size));
192
+ }
193
+ };
194
+
195
+ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d);
196
+
197
+ struct TORCH_CUDA_CPP_API ConvolutionDescriptor
198
+ : public Descriptor<
199
+ cudnnConvolutionStruct,
200
+ &cudnnCreateConvolutionDescriptor,
201
+ &cudnnDestroyConvolutionDescriptor> {
202
+ void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) {
203
+ cudnnDataType_t mathType = dataType;
204
+ if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT;
205
+ AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale,
206
+ CUDNN_CROSS_CORRELATION, mathType));
207
+ AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups));
208
+ // See Note [behavior of cudnnFind and cudnnGet]
209
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH));
210
+ if(dataType == CUDNN_DATA_HALF) {
211
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH));
212
+ } else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) {
213
+ AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH));
214
+ }
215
+ }
216
+ };
217
+
218
+ struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
219
+ : public Descriptor<
220
+ cudnnSpatialTransformerStruct,
221
+ &cudnnCreateSpatialTransformerDescriptor,
222
+ &cudnnDestroySpatialTransformerDescriptor> {
223
+ void set(cudnnDataType_t dataType, int dim, int* size) {
224
+ AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size));
225
+ }
226
+ };
227
+
228
+ struct TORCH_CUDA_CPP_API DropoutDescriptor
229
+ : public Descriptor<
230
+ cudnnDropoutStruct,
231
+ &cudnnCreateDropoutDescriptor,
232
+ &cudnnDestroyDropoutDescriptor> {
233
+ at::Tensor state;
234
+
235
+ // Initialize a dropout descriptor's RNG state.
236
+ // WARNING: This function is very expensive, avoid calling this function!
237
+ void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) {
238
+ TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
239
+ size_t state_size;
240
+ AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size));
241
+ AT_ASSERT(options.device().type() == kCUDA);
242
+ AT_ASSERT(options.dtype() == kByte);
243
+ state = at::empty({static_cast<int64_t>(state_size)}, options);
244
+ AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed));
245
+ }
246
+
247
+ // Restore a dropout descriptor given a dropout probability and existing RNG state.
248
+ void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
249
+ TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
250
+ state = state_;
251
+ void *state_ptr = state.data_ptr();
252
+ size_t state_size = state.size(0);
253
+ // NB: The seed doesn't actually matter, so we give a dummy value
254
+ AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */));
255
+ }
256
+
257
+ // Restore a dropout descriptor corresponding to no dropout
258
+ void set_no_dropout(cudnnHandle_t handle) {
259
+ // NB: seed doesn't matter when dropout = 0, because no random number
260
+ // initialization actually takes place when there is no dropout.
261
+ // NB: Empirically, cudnnSetDropoutDescriptor is cheap when
262
+ // dropout == 0
263
+ AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */));
264
+ }
265
+ };
266
+
267
+ struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor<
268
+ cudnnRNNStruct,
269
+ &cudnnCreateRNNDescriptor,
270
+ &cudnnDestroyRNNDescriptor> {
271
+ DropoutDescriptor dropout_desc_;
272
+ void set(cudnnHandle_t handle,
273
+ #ifdef USE_CUDNN_RNN_V8_API
274
+ int input_size,
275
+ bool packed,
276
+ #endif
277
+ int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc,
278
+ cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional,
279
+ cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) {
280
+ dropout_desc_ = std::move(dropout_desc);
281
+ #ifndef USE_CUDNN_RNN_V8_API
282
+ AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
283
+ handle,
284
+ mut_desc(),
285
+ hidden_size,
286
+ num_layers,
287
+ dropout_desc_.desc(),
288
+ input_mode,
289
+ bidirectional,
290
+ mode,
291
+ algo,
292
+ datatype));
293
+ if (proj_size != 0) {
294
+ AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers(
295
+ handle,
296
+ /*rnnDesc=*/mut_desc(),
297
+ /*recProjSize=*/proj_size,
298
+ /*outProjSize=*/0));
299
+ }
300
+ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
301
+ if (prop->major >= 7) {
302
+ if (input_type == CUDNN_DATA_HALF) {
303
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH);
304
+ }
305
+ else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) {
306
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH);
307
+ }
308
+ else {
309
+ // Technically, as the default it's not necessary to explicitly
310
+ // set this.
311
+ cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH);
312
+ }
313
+ }
314
+ #else
315
+ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
316
+ auto math_type = CUDNN_DEFAULT_MATH;
317
+ if (prop->major >= 7) {
318
+ if (input_type == CUDNN_DATA_HALF) {
319
+ math_type = CUDNN_TENSOR_OP_MATH;
320
+ } else if (!allow_tf32) {
321
+ math_type = CUDNN_FMA_MATH;
322
+ }
323
+ }
324
+ AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v8(
325
+ mut_desc(),
326
+ algo,
327
+ mode,
328
+ CUDNN_RNN_DOUBLE_BIAS,
329
+ bidirectional,
330
+ input_mode,
331
+ input_type,
332
+ datatype,
333
+ math_type,
334
+ input_size,
335
+ hidden_size,
336
+ proj_size ? proj_size : hidden_size,
337
+ num_layers,
338
+ dropout_desc_.desc(),
339
+ packed ? CUDNN_RNN_PADDED_IO_DISABLED : CUDNN_RNN_PADDED_IO_ENABLED));
340
+ #endif
341
+ }
342
+ };
343
+
344
+ struct TORCH_CUDA_CPP_API CTCLossDescriptor
345
+ : public Descriptor<
346
+ cudnnCTCLossStruct,
347
+ &cudnnCreateCTCLossDescriptor,
348
+ &cudnnDestroyCTCLossDescriptor> {
349
+ void set(cudnnDataType_t datatype) {
350
+ AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype));
351
+ }
352
+ void setEx(
353
+ cudnnDataType_t datatype,
354
+ cudnnLossNormalizationMode_t normMode,
355
+ cudnnNanPropagation_t gradMode) {
356
+ AT_CUDNN_CHECK(
357
+ cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode));
358
+ }
359
+ };
360
+
361
+ struct TORCH_CUDA_CPP_API ActivationDescriptor
362
+ : public Descriptor<
363
+ cudnnActivationStruct,
364
+ &cudnnCreateActivationDescriptor,
365
+ &cudnnDestroyActivationDescriptor> {
366
+ void set(cudnnActivationMode_t mode) {
367
+ AT_ASSERT(
368
+ mode == CUDNN_ACTIVATION_RELU,
369
+ "TODO: support more cuDNN activation modes");
370
+ AT_CUDNN_CHECK(cudnnSetActivationDescriptor(
371
+ mut_desc(),
372
+ mode,
373
+ cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
374
+ std::numeric_limits<double>::max()));
375
+ }
376
+ };
377
+
378
+ union Constant
379
+ {
380
+ float f;
381
+ double d;
382
+ Constant(cudnnDataType_t dataType, double value) {
383
+ if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
384
+ f = static_cast<float>(value);
385
+ } else {
386
+ d = value;
387
+ }
388
+ }
389
+ };
390
+
391
+ }} // namespace
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Exceptions.h ADDED
File without changes
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cudnn/cudnn-wrapper.h>
4
+ #include <ATen/cuda/ATenCUDAGeneral.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ TORCH_CUDA_CPP_API cudnnHandle_t getCudnnHandle();
9
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/cudnn/Handle.h>
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cudnn/cudnn-wrapper.h>
4
+ #include <ATen/Tensor.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ TORCH_CUDA_CPP_API cudnnDataType_t
9
+ getCudnnDataTypeFromScalarType(const at::ScalarType dtype);
10
+ cudnnDataType_t getCudnnDataType(const at::Tensor& tensor);
11
+
12
+ int64_t cudnn_version();
13
+
14
+ }} // namespace at::cudnn
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ #include <ATen/cudnn/cudnn-wrapper.h>
6
+ #include <ATen/cudnn/Handle.h>
7
+
8
+ namespace at { namespace native {
9
+
10
+ // cuDNN has a buggy check for tensor being contiguous (that is, it does
11
+ // not ignore stride for dimension that is equal to 0). This function
12
+ // makes tensors which have zero stride contiguous, by setting the
13
+ // strides to 1 as cuDNN likes.
14
+ inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
15
+ for (auto s : t.strides()) {
16
+ if (s == 0) return t.contiguous();
17
+ }
18
+ return t;
19
+ }
20
+
21
+ }}
venv/lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cudnn.h>
4
+
5
+ #define STRINGIFY(x) #x
6
+ #define STRING(x) STRINGIFY(x)
7
+
8
+ #if CUDNN_MAJOR < 6
9
+ #pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0")
10
+ #pragma message "We strongly encourage you to move to 6.0 and above."
11
+ #pragma message "This message is intended to annoy you enough to update."
12
+ #endif
13
+
14
+ #undef STRINGIFY
15
+ #undef STRING
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale);
20
+ TORCH_API void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out);
21
+ TORCH_API void _amp_foreach_non_finite_check_and_unscale_cpu_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale);
22
+ TORCH_API void _amp_foreach_non_finite_check_and_unscale_cuda_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale);
23
+ } // namespace native
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
26
+ inline at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) {
27
+ return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
28
+ }
29
+
30
+ // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) {
32
+ return at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices, col_indices, out_int32, transpose, out);
33
+ }
34
+ // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
36
+ return at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices, col_indices, out_int32, transpose, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _dim_arange(const at::Tensor & like, int64_t dim);
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _foreach_tanh_out(at::TensorList self, at::TensorList out);
20
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_tanh_slow(at::TensorList self);
21
+ TORCH_API void foreach_tensor_tanh_slow_(at::TensorList self);
22
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_tanh_cuda(at::TensorList self);
23
+ TORCH_API void foreach_tensor_tanh_cuda_(at::TensorList self);
24
+ } // namespace native
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _nested_view_from_jagged_copy {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_view_from_jagged_copy")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx);
26
+ };
27
+
28
+ struct TORCH_API _nested_view_from_jagged_copy_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_view_from_jagged_copy")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_available_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API bool _nnpack_available();
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _scaled_mm_cuda(const at::Tensor & self, const at::Tensor & mat2, const c10::optional<at::Tensor> & bias={}, c10::optional<at::ScalarType> out_dtype=c10::nullopt, const c10::optional<at::Tensor> & scale_a={}, const c10::optional<at::Tensor> & scale_b={}, const c10::optional<at::Tensor> & scale_result={}, bool use_fast_accum=false);
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _scaled_mm_out_cuda(const at::Tensor & self, const at::Tensor & mat2, const c10::optional<at::Tensor> & bias, c10::optional<at::ScalarType> out_dtype, const c10::optional<at::Tensor> & scale_a, const c10::optional<at::Tensor> & scale_b, const c10::optional<at::Tensor> & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax);
21
+ } // namespace native
22
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_slow_conv2d_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
26
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) {
27
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) {
32
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
33
+ }
34
+ }
35
+
36
+ // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
37
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
38
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
42
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
43
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias);
44
+ }
45
+ }
46
+
47
+ // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
48
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
49
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
53
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
54
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
55
+ }
56
+ }
57
+
58
+ // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
59
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
60
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
64
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
65
+ return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
66
+ }
67
+ }
68
+
69
+ // aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
70
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
71
+ return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
75
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
76
+ return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask);
77
+ }
78
+ }
79
+
80
+ // aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
81
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
82
+ return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
86
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
87
+ return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
88
+ }
89
+ }
90
+
91
+ // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
92
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
93
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2);
94
+ }
95
+ namespace symint {
96
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
97
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
98
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2);
99
+ }
100
+ }
101
+
102
+ // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
103
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
104
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2);
105
+ }
106
+ namespace symint {
107
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
108
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
109
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2);
110
+ }
111
+ }
112
+
113
+ // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
114
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
115
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
116
+ }
117
+ namespace symint {
118
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
119
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
120
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
121
+ }
122
+ }
123
+
124
+ // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
125
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
126
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
127
+ }
128
+ namespace symint {
129
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
130
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
131
+ return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
132
+ }
133
+ }
134
+
135
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sparse_semi_structured_linear_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
26
+ inline at::Tensor _sparse_semi_structured_linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional<at::Tensor> & bias={}, c10::optional<c10::string_view> activation=c10::nullopt, c10::optional<at::ScalarType> out_dtype=c10::nullopt) {
27
+ return at::_ops::_sparse_semi_structured_linear::call(input, weight, meta, bias, activation, out_dtype);
28
+ }
29
+
30
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_test_optional_intlist_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
26
+ inline at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
27
+ return at::_ops::_test_optional_intlist::call(values, addends);
28
+ }
29
+
30
+ // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _test_optional_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) {
32
+ return at::_ops::_test_optional_intlist_out::call(values, addends, out);
33
+ }
34
+ // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _test_optional_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
36
+ return at::_ops::_test_optional_intlist_out::call(values, addends, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _to_sparse_bsr_out(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
20
+ TORCH_API at::Tensor dense_to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt);
21
+ TORCH_API at::Tensor coo_to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt);
22
+ TORCH_API at::Tensor sparse_compressed_to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt);
23
+ } // namespace native
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_unsafe_view_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
26
+ inline at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
27
+ return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
32
+ return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
33
+ }
34
+ }
35
+
36
+ // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
37
+ inline at::Tensor _unsafe_view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
38
+ return at::_ops::_unsafe_view::call(self, size);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor _unsafe_view(const at::Tensor & self, c10::SymIntArrayRef size) {
43
+ return at::_ops::_unsafe_view::call(self, size);
44
+ }
45
+ }
46
+
47
+ // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
48
+ inline at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
49
+ return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
53
+ at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
54
+ return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
55
+ }
56
+ }
57
+
58
+ // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
60
+ return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
64
+ at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
65
+ return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
66
+ }
67
+ }
68
+
69
+ // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
70
+ inline at::Tensor & _unsafe_view_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
71
+ return at::_ops::_unsafe_view_out::call(self, size, out);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
75
+ at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
76
+ return at::_ops::_unsafe_view_out::call(self, size, out);
77
+ }
78
+ }
79
+
80
+ // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
81
+ inline at::Tensor & _unsafe_view_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
82
+ return at::_ops::_unsafe_view_out::call(self, size, out);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
86
+ at::Tensor & _unsafe_view_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
87
+ return at::_ops::_unsafe_view_out::call(self, size, out);
88
+ }
89
+ }
90
+
91
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
21
+ TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
22
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
23
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
24
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
25
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
26
+
27
+ } // namespace meta
28
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API abs {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API abs_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API abs_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/absolute_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API absolute {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API absolute_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API absolute_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor atan2(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & atan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
22
+ TORCH_API at::Tensor & atan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace meta
26
+ } // namespace at