applied-ai-018 commited on
Commit
179ee59
·
verified ·
1 Parent(s): 74ee0e3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/18.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/18.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/18.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h +445 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h +425 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h +277 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h +111 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h +47 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h +15 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h +186 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h +114 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h +0 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h +59 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h +953 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h +25 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h +98 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h +736 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h +226 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h +45 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h +113 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h +406 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h +22 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h +80 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h +40 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h +53 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h +28 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/wrap_outputs.h +155 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp +32 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp +67 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +271 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp +353 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp +77 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp +180 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/intra_node_comm.hpp +121 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h +34 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h +61 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h +100 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h +158 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h +114 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h +144 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h +28 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h +47 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h +59 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h +238 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h +72 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h +52 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h +298 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h +150 -0
ckpts/universal/global_step20/zero/18.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bf67ee9de3e9a365f0b5dd2d62346c04f5bc8478372f083e5f43cfda7e88ee1
3
+ size 9372
ckpts/universal/global_step20/zero/18.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a05841f7b201086703b3692b033146486a1a777398e619e698428a1368047331
3
+ size 9387
ckpts/universal/global_step20/zero/18.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1ab797cd65eea281261d164318c9bbd434394fe28cb719f216ea66a49101f9
3
+ size 9293
ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:539e3c1bea482bacd5ab0e6177e9c45ba71f0639fec27dbe5eab772b96b399e2
3
+ size 9387
ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7caf72f6597487410090f38a56899ff4c9e75f11e8832cbb3cf76dc2f31a2c
3
+ size 9293
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+
5
+ #include <ATen/core/boxing/KernelFunction.h>
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+
8
+ #include <torch/csrc/autograd/edge.h>
9
+ #include <torch/csrc/autograd/function.h>
10
+ #include <torch/csrc/autograd/functions/basic_ops.h>
11
+ #include <torch/csrc/autograd/functions/tensor.h>
12
+ #include <torch/csrc/autograd/grad_mode.h>
13
+ #include <torch/csrc/autograd/saved_variable.h>
14
+ #include <torch/csrc/autograd/variable.h>
15
+
16
+ #include <torch/csrc/autograd/functions/utils.h>
17
+ #include <torch/csrc/autograd/jit_decomp_interface.h>
18
+ #include <torch/csrc/utils/variadic.h>
19
+
20
+ #include <cstddef>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #ifdef _MSC_VER
27
+ #ifdef Type
28
+ #undef Type
29
+ #endif
30
+ #endif
31
+
32
+ namespace torch {
33
+ namespace autograd {
34
+ enum class can_mutate_inplace_result {
35
+ success,
36
+ non_default_backward_view,
37
+ view_of_leaf,
38
+ is_leaf,
39
+ };
40
+
41
+ // The requires_grad argument is used to know if the inplace operation needs
42
+ // gradient to be setup for it.
43
+ // In particular, we can have tensor.requires_grad() != requires_grad when
44
+ // writing a Tensor that requires gradients inplace into a Tensor that does not
45
+ // require gradients: a = torch.rand(2) b = torch.rand(2, requires_grad=True)
46
+ // a.copy_(b)
47
+ inline can_mutate_inplace_result can_mutate_inplace(
48
+ const at::Tensor& tensor,
49
+ bool requires_grad) {
50
+ if (!requires_grad || !GradMode::is_enabled()) {
51
+ return can_mutate_inplace_result::success;
52
+ }
53
+ auto diff_view_meta = impl::get_view_autograd_meta(tensor);
54
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
55
+ if (diff_view_meta->get_creation_meta() != CreationMeta::DEFAULT) {
56
+ return can_mutate_inplace_result::non_default_backward_view;
57
+ }
58
+ if (tensor.requires_grad() && tensor._base().is_leaf()) {
59
+ return can_mutate_inplace_result::view_of_leaf;
60
+ }
61
+ }
62
+ if (tensor.requires_grad() && tensor.is_leaf()) {
63
+ return can_mutate_inplace_result::is_leaf;
64
+ }
65
+ return can_mutate_inplace_result::success;
66
+ }
67
+
68
+ inline void check_inplace(const at::Tensor& tensor, bool requires_grad) {
69
+ switch (can_mutate_inplace(tensor, requires_grad)) {
70
+ case can_mutate_inplace_result::success:
71
+ return;
72
+ case can_mutate_inplace_result::non_default_backward_view: {
73
+ return handle_view_on_rebase(impl::get_view_autograd_meta(tensor));
74
+ }
75
+ case can_mutate_inplace_result::view_of_leaf:
76
+ TORCH_CHECK(
77
+ false,
78
+ "a view of a leaf Variable that requires grad is being used in an in-place operation.");
79
+ break;
80
+
81
+ case can_mutate_inplace_result::is_leaf:
82
+ TORCH_CHECK(
83
+ false,
84
+ "a leaf Variable that requires grad is being used in an in-place operation.");
85
+ break;
86
+ }
87
+ TORCH_INTERNAL_ASSERT(false);
88
+ }
89
+
90
+ inline void check_inplace(at::ITensorListRef tensors, bool requires_grad) {
91
+ for (const auto& tensor : tensors) {
92
+ check_inplace(tensor, requires_grad);
93
+ }
94
+ }
95
+
96
+ inline void throw_error_out_requires_grad(const char* name) {
97
+ AT_ERROR(
98
+ name,
99
+ "(): functions with out=... arguments don't support automatic differentiation, "
100
+ "but one of the arguments requires grad.");
101
+ }
102
+
103
+ inline void throw_error_for_complex_autograd(
104
+ const at::Tensor& tensor,
105
+ const char* name) {
106
+ if (tensor.requires_grad()) {
107
+ TORCH_CHECK(
108
+ !tensor.is_complex(),
109
+ name,
110
+ " does not support automatic differentiation for outputs with complex dtype.");
111
+ }
112
+ }
113
+
114
+ inline void throw_error_if_base_and_tensor_are_same(
115
+ const at::Tensor& base,
116
+ const at::Tensor& tensor) {
117
+ TORCH_CHECK(
118
+ base.unsafeGetTensorImpl() != tensor.unsafeGetTensorImpl(),
119
+ "View operation returned a tensor that is the same as the input base tensor. This "
120
+ "is no longer allowed; you must explicitly create a new tensor (e.g., using .detach()). "
121
+ "As a user, you could have made a mistake implementing __torch_dispatch__ or a Python "
122
+ "operator decomposition or meta registration; if that's not the case, please "
123
+ "report a bug to PyTorch or the backend you are using.");
124
+ }
125
+
126
+ inline void throw_error_for_complex_autograd(
127
+ at::ITensorListRef tensorlist,
128
+ const char* name) {
129
+ for (const auto& tensor : tensorlist) {
130
+ throw_error_for_complex_autograd(tensor, name);
131
+ }
132
+ }
133
+
134
+ // TODO: Blegh, bare references
135
+
136
+ inline void rebase_history(const Variable& var, std::shared_ptr<Node> grad_fn) {
137
+ if (grad_fn && var.defined()) {
138
+ grad_fn->add_input_metadata(var);
139
+ impl::rebase_history(var, {std::move(grad_fn), 0});
140
+ }
141
+ }
142
+
143
+ inline void rebase_history(
144
+ const std::vector<Variable>& vars,
145
+ const std::shared_ptr<Node>& grad_fn) {
146
+ if (grad_fn) {
147
+ for (auto& var : vars) {
148
+ if (var.defined()) {
149
+ auto output_nr = grad_fn->add_input_metadata(var);
150
+ impl::rebase_history(var, {grad_fn, output_nr});
151
+ } else {
152
+ grad_fn->add_input_metadata(Node::undefined_input());
153
+ }
154
+ }
155
+ }
156
+ }
157
+
158
+ inline void increment_version(const at::Tensor& t) {
159
+ impl::bump_version(t);
160
+ }
161
+
162
+ struct Flatten : IterArgs<Flatten> {
163
+ Flatten(variable_list& out) : out(out) {}
164
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
165
+ variable_list& out;
166
+ void operator()(const at::Tensor& x) {
167
+ out.emplace_back(x);
168
+ }
169
+ void operator()(const c10::optional<at::Tensor>& x) {
170
+ if (x.has_value())
171
+ out.emplace_back(x.value());
172
+ }
173
+ void operator()(at::ArrayRef<at::Tensor> xs) {
174
+ out.insert(out.end(), xs.begin(), xs.end());
175
+ }
176
+ };
177
+
178
+ template <typename... Args>
179
+ inline variable_list flatten_tensor_args(Args&&... args) {
180
+ variable_list out;
181
+ out.reserve(count_tensors(std::forward<Args>(args)...));
182
+ Flatten(out).apply(std::forward<Args>(args)...);
183
+ return out; // RVO
184
+ }
185
+
186
+ // See NOTE [ Autograd View Variables ] for details.
187
+ inline at::Tensor as_view(
188
+ const at::Tensor& base,
189
+ const at::Tensor& tensor,
190
+ bool is_bw_differentiable,
191
+ bool is_fw_differentiable,
192
+ std::unique_ptr<ViewFunc> view_func = nullptr,
193
+ std::function<at::Tensor(const at::Tensor&)> rev_view_func = nullptr,
194
+ CreationMeta creation_meta = CreationMeta::DEFAULT,
195
+ bool allow_tensor_metadata_change = true) {
196
+ // Note [View of inference tensor]
197
+ // For inference tensor this code can only be hit outside InferenceMode
198
+ // since ADInplaceOrView is in the default_included_set.
199
+ // If Inplace and View were separate dispatch keys we can just put Inplace
200
+ // in the default_included_set, so that view ops on inference tensor doesn't
201
+ // have to go through as_view even outside InferenceMode.
202
+ if (base.is_inference())
203
+ return tensor;
204
+
205
+ auto diff_view_meta = torch::autograd::impl::get_view_autograd_meta(base);
206
+
207
+ // To speed up the most common case, we specially handle when both the forward
208
+ // and backward view infos are the same, and so a single shared ViewInfo can
209
+ // be used for both of them.
210
+ if ((!diff_view_meta || diff_view_meta->shared_view_info()) &&
211
+ is_bw_differentiable && is_fw_differentiable) {
212
+ throw_error_if_base_and_tensor_are_same(base, tensor);
213
+ if (diff_view_meta) {
214
+ creation_meta = propagate_creation_meta(
215
+ diff_view_meta->get_creation_meta(), creation_meta);
216
+ return make_variable_differentiable_view(
217
+ tensor,
218
+ diff_view_meta->get_backward_view().chain(
219
+ base, tensor, std::move(view_func), std::move(rev_view_func)),
220
+ c10::nullopt,
221
+ /*shared_view_info*/ true,
222
+ creation_meta,
223
+ allow_tensor_metadata_change);
224
+ } else {
225
+ return make_variable_differentiable_view(
226
+ tensor,
227
+ ViewInfo(base, std::move(view_func), std::move(rev_view_func)),
228
+ c10::nullopt,
229
+ /*shared_view_info*/ true,
230
+ creation_meta,
231
+ allow_tensor_metadata_change);
232
+ }
233
+ }
234
+
235
+ // If they cannot be shared, create the required view infos
236
+ c10::optional<ViewInfo> new_bw_info;
237
+ c10::optional<ViewInfo> new_fw_info;
238
+
239
+ if (is_bw_differentiable) {
240
+ auto bw_view_func = view_func ? view_func->clone_and_set() : nullptr;
241
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
242
+ const auto& base_bw_info = diff_view_meta->get_backward_view();
243
+ new_bw_info = base_bw_info.chain(
244
+ base, tensor, std::move(bw_view_func), rev_view_func);
245
+ } else {
246
+ new_bw_info = ViewInfo(base, std::move(bw_view_func), rev_view_func);
247
+ }
248
+ } else {
249
+ TORCH_CHECK(
250
+ creation_meta == CreationMeta::DEFAULT,
251
+ "Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT");
252
+ }
253
+
254
+ if (is_fw_differentiable) {
255
+ // Check if base is a forward differentiable view
256
+ if (diff_view_meta && diff_view_meta->has_fw_view()) {
257
+ const auto& base_fw_info = diff_view_meta->get_forward_view();
258
+ new_fw_info = base_fw_info.chain(
259
+ base, tensor, std::move(view_func), std::move(rev_view_func));
260
+ } else {
261
+ new_fw_info =
262
+ ViewInfo(base, std::move(view_func), std::move(rev_view_func));
263
+ }
264
+ }
265
+
266
+ if (is_fw_differentiable || is_bw_differentiable) {
267
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
268
+ creation_meta = propagate_creation_meta(
269
+ diff_view_meta->get_creation_meta(), creation_meta);
270
+ }
271
+ throw_error_if_base_and_tensor_are_same(base, tensor);
272
+ return make_variable_differentiable_view(
273
+ tensor,
274
+ std::move(new_bw_info),
275
+ std::move(new_fw_info),
276
+ /*shared_view_info*/ false,
277
+ creation_meta,
278
+ allow_tensor_metadata_change);
279
+ } else {
280
+ return make_variable_non_differentiable_view(
281
+ base, tensor, allow_tensor_metadata_change);
282
+ }
283
+ }
284
+
285
+ inline void check_no_requires_grad(
286
+ const at::Tensor& tensor,
287
+ const char* name,
288
+ const char* fn_name = "",
289
+ bool check_grad_mode = true) {
290
+ TORCH_CHECK(
291
+ !(tensor.defined() && tensor.requires_grad()) ||
292
+ !(check_grad_mode && GradMode::is_enabled()),
293
+ "The function '",
294
+ fn_name,
295
+ "' is not differentiable with respect to argument '",
296
+ name,
297
+ "'. This input cannot have requires_grad True.");
298
+ }
299
+
300
+ inline void check_no_requires_grad(
301
+ const c10::optional<at::Tensor>& tensor,
302
+ const char* name,
303
+ const char* fn_name = "") {
304
+ if (tensor.has_value()) {
305
+ check_no_requires_grad(*tensor, name, fn_name);
306
+ }
307
+ }
308
+
309
+ inline void check_no_requires_grad(
310
+ at::ITensorListRef tensors,
311
+ const char* name,
312
+ const char* fn_name = "") {
313
+ // GradMode check is expensive, so check it only once for TensorLists
314
+ if (!GradMode::is_enabled()) {
315
+ return;
316
+ }
317
+ for (auto& tensor : tensors) {
318
+ check_no_requires_grad(tensor, name, fn_name, /*check_grad_mode*/ false);
319
+ }
320
+ }
321
+
322
+ inline void check_no_requires_grad(
323
+ const c10::List<c10::optional<at::Tensor>>& tensors,
324
+ const char* name,
325
+ const char* fn_name = "") {
326
+ // GradMode check is expensive, so check it only once for TensorLists
327
+ if (!GradMode::is_enabled()) {
328
+ return;
329
+ }
330
+ for (c10::optional<at::Tensor> tensor : tensors) {
331
+ if (tensor.has_value()) {
332
+ check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false);
333
+ }
334
+ }
335
+ }
336
+
337
+ // Assumed that saved tensor lists are never inplace outputs
338
+ inline std::vector<SavedVariable> make_saved_variable_list(
339
+ at::ITensorListRef tensors,
340
+ const bool is_output = false) {
341
+ return fmap(tensors, [&is_output](const at::Tensor& tensor) -> SavedVariable {
342
+ return SavedVariable{tensor, is_output /* is output */};
343
+ });
344
+ }
345
+
346
+ // Assumed that saved tensor lists are never inplace outputs
347
+ inline std::vector<SavedVariable> make_saved_variable_list(
348
+ const c10::List<c10::optional<at::Tensor>>& tensors,
349
+ const bool is_output = false) {
350
+ return fmap(
351
+ tensors,
352
+ [&is_output](const c10::optional<at::Tensor>& tensor) -> SavedVariable {
353
+ if (tensor.has_value()) {
354
+ return SavedVariable{*tensor, is_output /* is output */};
355
+ } else {
356
+ return SavedVariable{at::Tensor(), is_output /* is output */};
357
+ }
358
+ });
359
+ }
360
+
361
+ inline std::vector<std::vector<int64_t>> to_args_sizes(
362
+ at::ITensorListRef tensors) {
363
+ std::vector<std::vector<int64_t>> args_sizes(tensors.size());
364
+ size_t i = 0;
365
+ for (const auto& t : tensors) {
366
+ args_sizes[i++] = t.sizes().vec();
367
+ }
368
+ return args_sizes;
369
+ }
370
+
371
+ inline std::vector<std::vector<c10::SymInt>> to_args_sizes_symint(
372
+ at::ITensorListRef tensors) {
373
+ std::vector<std::vector<c10::SymInt>> args_sizes(tensors.size());
374
+ size_t i = 0;
375
+ for (const auto& t : tensors) {
376
+ args_sizes[i++] = t.sym_sizes().vec();
377
+ }
378
+ return args_sizes;
379
+ }
380
+
381
+ inline std::vector<c10::ScalarType> to_args_scalartypes(
382
+ at::ITensorListRef tensors) {
383
+ std::vector<c10::ScalarType> args_scalartypes(tensors.size());
384
+ size_t i = 0;
385
+ for (const auto& t : tensors) {
386
+ args_scalartypes[i++] = t.scalar_type();
387
+ }
388
+ return args_scalartypes;
389
+ }
390
+
391
+ namespace impl {
392
+
393
+ namespace {
394
+
395
+ // If run_jit_decomposition were not a member function, we would be able
396
+ // to pass this as a template parameter to c10::Boxedkernel::makeFromFunction.
397
+ // However, member functions cannot be passed this way - instead we wrap our
398
+ // call in this functor so it can be passed to c10::BoxedKernel::makeFromFunctor
399
+ class WrapperFunctor final : public c10::OperatorKernel {
400
+ public:
401
+ WrapperFunctor(JitDecompInterface* impl) : impl_(impl){};
402
+
403
+ void operator()(
404
+ const c10::OperatorHandle& op,
405
+ c10::DispatchKeySet ks,
406
+ torch::jit::Stack* stack) {
407
+ impl_->run_jit_decomposition(op, stack);
408
+ }
409
+ JitDecompInterface* impl_;
410
+ };
411
+
412
+ } // namespace
413
+
414
+ template <class Return, class... Args>
415
+ Return run_jit_decomposition_with_args_for_jvp(
416
+ c10::string_view name,
417
+ const c10::OperatorHandle& opHandle,
418
+ c10::DispatchKeySet dispatchKeySet,
419
+ Args&&... args) {
420
+ // see NOTE: [Jit Decomposition Interface]
421
+ JitDecompInterface* impl = getJitDecompImpl();
422
+
423
+ TORCH_CHECK_NOT_IMPLEMENTED(
424
+ impl && impl->has_jit_decomposition(opHandle.schema()),
425
+ "Trying to use forward AD with ",
426
+ name,
427
+ " that does not support it because it has not been implemented yet.\nPlease file an issue "
428
+ "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
429
+ "so that we can prioritize its implementation.\n"
430
+ "Note that forward AD support for some operators require PyTorch to be built with "
431
+ "TorchScript and for JIT to be enabled. "
432
+ "If the environment var PYTORCH_JIT=0 is set or if the library is not built with TorchScript, "
433
+ "some operators may no longer be used with forward AD.");
434
+
435
+ return c10::KernelFunction::makeFromBoxedKernel(
436
+ c10::BoxedKernel::makeFromFunctor(
437
+ std::make_unique<WrapperFunctor>(impl)))
438
+ .call<Return, Args...>(
439
+ opHandle, dispatchKeySet, std::forward<Args>(args)...);
440
+ }
441
+
442
+ } // namespace impl
443
+
444
+ } // namespace autograd
445
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <c10/core/SymInt.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/autograd/variable_info.h>
10
+ #include <vector>
11
+
12
+ namespace torch::autograd {
13
+
14
+ using optional_variable_list = std::vector<c10::optional<Variable>>;
15
+ using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
16
+ using _view_as_self_fn_t = std::function<at::Tensor(at::Tensor)>;
17
+
18
+ TORCH_API std::vector<c10::optional<Variable>> _wrap_outputs(
19
+ const variable_list& input_vars,
20
+ const std::unordered_set<at::TensorImpl*>& non_differentiable,
21
+ const std::unordered_set<at::TensorImpl*>& dirty_inputs,
22
+ const at::ArrayRef<c10::optional<Variable>> raw_outputs,
23
+ const std::shared_ptr<Node>& cdata,
24
+ const _jvp_fn_t& jvp_user_function,
25
+ const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context,
26
+ const _view_as_self_fn_t& view_as_self_fn);
27
+
28
+ TORCH_API void check_variable_result(
29
+ const at::TensorBase& original,
30
+ const at::TensorBase& result,
31
+ const std::string& hook_name);
32
+
33
+ // Get the return type of the forward function of the custom Function class X
34
+ template <typename X, typename... Args>
35
+ using forward_t = decltype(X::forward(nullptr, std::declval<Args>()...));
36
+
37
+ /// To use custom autograd operations, implement a Function subclass with
38
+ /// static forward and backward functions:
39
+ ///
40
+ /// `forward` can take as many arguments as you want and should return either a
41
+ /// variable list or a Variable. Use of any direct Variable arguments will be
42
+ /// registered in the graph but no vectors/sets or any other data structures
43
+ /// will be traversed. You can use c10::optional<Tensor> as one of the arguments
44
+ /// and it will be registered as a variable in the graph if the argument has a
45
+ /// value. It should take a pointer to `torch::autograd::AutogradContext` as the
46
+ /// first argument. Variables can be saved in the `ctx` using
47
+ /// `ctx->save_for_backward`
48
+ /// (see `torch::autograd::AutogradContext::save_for_backward`) and other data
49
+ /// can be saved in the `ctx->saved_data` map
50
+ /// (see `torch::autograd::AutogradContext::saved_data`)
51
+ /// in the form of `<std::string, at::IValue>` pairs.
52
+ ///
53
+ /// `backward` should take a pointer to `torch::autograd::AutogradContext`
54
+ /// and a variable list containing as many Variables as there were outputs from
55
+ /// `forward` as arguments. It should return as many Variables as there were
56
+ /// inputs with each of them containing the gradient w.r.t. its corresponding
57
+ /// input. Variables saved in `forward` can be accessed with
58
+ /// `ctx->get_saved_variables` (see
59
+ /// `torch::autograd::AutogradContext::get_saved_variables`) and other saved
60
+ /// data can be accessed from `ctx->saved_data`.
61
+ ///
62
+ /// For example:
63
+ /// ```
64
+ /// class MyFunction : public Function<MyFunction> {
65
+ /// public:
66
+ /// static variable_list forward(AutogradContext *ctx, int n, Variable var) {
67
+ /// // Save data for backward in context
68
+ /// ctx->saved_data["n"] = n;
69
+ /// var.mul_(2);
70
+ /// // Mark var as modified by inplace operation
71
+ /// ctx->mark_dirty({var});
72
+ /// return {var};
73
+ /// }
74
+ ///
75
+ /// static variable_list backward(AutogradContext *ctx, variable_list
76
+ /// grad_output) {
77
+ /// // Use data saved in forward
78
+ /// auto n = ctx->saved_data["n"].toInt();
79
+ /// return {grad_output[0]*n};
80
+ /// }
81
+ /// };
82
+ /// ```
83
+ ///
84
+ /// To use `MyFunction`:
85
+ /// ```
86
+ /// Variable x;
87
+ /// auto y = MyFunction::apply(6, x);
88
+ /// // Example backward call
89
+ /// y[0].sum().backward();
90
+ /// ```
91
+ template <class T>
92
+ struct TORCH_API Function {
93
+ // We need to use a different template parameter than T here because T will
94
+ // inherit from Function, and when Function<T> is instantiated, T::forward
95
+ // is not declared yet.
96
+ // The enable_if check is to ensure that the user doesn't explicitly provide
97
+ // the parameter X.
98
+ template <typename X = T, typename... Args>
99
+ static auto apply(Args&&... args)
100
+ -> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>>;
101
+ };
102
+
103
+ /// Context to save information during `forward` that can be accessed in
104
+ /// `backward` in custom autograd operations (see `torch::autograd::Function`
105
+ /// for details).
106
+ struct TORCH_API AutogradContext {
107
+ AutogradContext() = default;
108
+ AutogradContext(const AutogradContext& other) = delete;
109
+ AutogradContext& operator=(const AutogradContext& other) = delete;
110
+
111
+ /// Can be used to save non-variable data for `backward`.
112
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
113
+ ska::flat_hash_map<std::string, at::IValue> saved_data;
114
+
115
+ /// Saves the list of variables for a future call to `backward`. This
116
+ /// should be called at most once from inside of `forward`.
117
+ void save_for_backward(variable_list to_save);
118
+ /// Marks variables in the list as modified in an in-place operation. This
119
+ /// should be called at most once from inside of `forward` and all arguments
120
+ /// should be inputs.
121
+ void mark_dirty(const variable_list& inputs);
122
+ /// Marks outputs in the list as not requiring gradients. This should be
123
+ /// called at most once from inside of `forward` and all arguments should be
124
+ /// outputs.
125
+ void mark_non_differentiable(const variable_list& outputs);
126
+ // Sets whether undefined output grad tensors should be expanded to tensors
127
+ // full of zeros before calling backward function. Default value is true.
128
+ void set_materialize_grads(bool value);
129
+
130
+ /// Get the list of variables that were saved in `forward` using
131
+ /// `save_for_backward()`. Before returning them to the user, a check is made
132
+ /// to ensure that they were not modified by any in-place operations.
133
+ variable_list get_saved_variables() const;
134
+ const std::unordered_set<at::TensorImpl*>& get_and_bump_dirty() const;
135
+ const std::unordered_set<at::TensorImpl*>& get_non_differentiable() const;
136
+
137
+ /// Expose the Node's `task_should_compute_output` method to the cpp
138
+ /// custom autograd Function as `needs_input_grad`.
139
+ bool needs_input_grad(size_t output_edge_index) const;
140
+ bool needs_input_grad(std::initializer_list<IndexRange> idxs) const;
141
+
142
+ private:
143
+ std::unordered_set<at::TensorImpl*> non_differentiable_;
144
+ std::unordered_set<at::TensorImpl*> dirty_inputs_;
145
+ std::vector<torch::autograd::SavedVariable> saved_variables_;
146
+ variable_list to_save_;
147
+ bool materialize_grads_{true};
148
+
149
+ // The CppNode in the autograd graph that owns this AutogradContext. We need a
150
+ // weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
151
+ // will always be alive when we want to use it.
152
+ std::weak_ptr<Node> grad_fn_;
153
+ bool has_freed_buffers_{false};
154
+
155
+ void save_variables();
156
+
157
+ template <class T>
158
+ friend struct CppNode;
159
+ };
160
+
161
+ // CppNode<T> is the Node in the autograd graph that represents the user defined
162
+ // backward function for Function<T>. Calls to CppNode::apply are forward to
163
+ // T::backward().
164
+ template <class T>
165
+ struct CppNode : public Node {
166
+ variable_list apply(variable_list&& inputs) override;
167
+ AutogradContext ctx_;
168
+ std::vector<bool> is_variable_input_;
169
+ std::vector<VariableInfo> input_info_;
170
+ std::vector<VariableInfo> output_info_;
171
+
172
+ void release_variables() override;
173
+
174
+ void set_ctx_grad_fn(const std::shared_ptr<Node>& node);
175
+ void save_variables_to_ctx();
176
+ };
177
+
178
+ struct ExtractVariables : IterArgs<ExtractVariables> {
179
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
180
+ std::vector<bool>& is_var_;
181
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
182
+ variable_list& list_;
183
+ ExtractVariables(std::vector<bool>& is_var, variable_list& list)
184
+ : is_var_(is_var), list_(list) {}
185
+ void operator()(const c10::optional<at::Tensor>& x) {
186
+ // NOLINTNEXTLINE(bugprone-branch-clone)
187
+ if (x.has_value() && x.value().defined()) {
188
+ is_var_.push_back(true);
189
+ list_.emplace_back(x.value());
190
+ } else {
191
+ is_var_.push_back(false);
192
+ }
193
+ }
194
+ void operator()(const at::Tensor& x) {
195
+ is_var_.push_back(true);
196
+ list_.emplace_back(x);
197
+ }
198
+ void operator()(const at::TensorList& list) {
199
+ for (const at::Tensor& x : list) {
200
+ is_var_.push_back(true);
201
+ list_.emplace_back(x);
202
+ }
203
+ }
204
+ template <typename T>
205
+ void operator()(const T& x) {
206
+ is_var_.push_back(false);
207
+ }
208
+ };
209
+
210
+ template <typename... Args>
211
+ inline void extract_vars(
212
+ std::vector<bool>& is_var,
213
+ variable_list& list,
214
+ Args&&... args) {
215
+ ExtractVariables(is_var, list).apply(std::forward<Args>(args)...);
216
+ }
217
+
218
+ template <typename T>
219
+ std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
220
+ std::vector<c10::optional<Variable>>& output_list) {
221
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
222
+ variable_list result;
223
+ std::transform(
224
+ output_list.begin(),
225
+ output_list.end(),
226
+ std::back_inserter(result),
227
+ [](const c10::optional<Variable>& var) { return *var; });
228
+ return result;
229
+ }
230
+
231
+ template <typename T>
232
+ std::enable_if_t<std::is_same_v<T, Variable>, T> to_output_type(
233
+ std::vector<c10::optional<Variable>>& output_list) {
234
+ return *output_list[0];
235
+ }
236
+
237
+ inline std::vector<c10::optional<Variable>> to_optional(Variable& output) {
238
+ return std::vector<c10::optional<Variable>>{output};
239
+ }
240
+
241
+ inline std::vector<c10::optional<Variable>> to_optional(variable_list& output) {
242
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
243
+ std::vector<c10::optional<Variable>> result;
244
+ std::transform(
245
+ output.begin(),
246
+ output.end(),
247
+ std::back_inserter(result),
248
+ [](const Variable& var) { return var; });
249
+ return result;
250
+ }
251
+
252
+ template <class T>
253
+ template <typename X, typename... Args>
254
+ auto Function<T>::apply(Args&&... args)
255
+ -> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>> {
256
+ const auto& functorch_tls = at::functorch::functorchTLSAccessor();
257
+ if (functorch_tls) {
258
+ // Function support for functorch is handled in Python.
259
+ // Here we are dealing with a (C++) Function, which is not supported.
260
+ // Let's raise an error instead of being silently incorrect.
261
+ functorch_tls->checkSupportsCppAutogradFunction();
262
+ }
263
+
264
+ std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
265
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
266
+ variable_list input_vars;
267
+
268
+ const size_t num_inputs = sizeof...(Args);
269
+ input_vars.reserve(num_inputs);
270
+ node->is_variable_input_.reserve(num_inputs);
271
+ // TODO Add tracing here
272
+ extract_vars(node->is_variable_input_, input_vars, args...);
273
+
274
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
275
+ bool is_executable =
276
+ GradMode::is_enabled() && any_variable_requires_grad(input_vars);
277
+ auto next_edges =
278
+ (is_executable ? collect_next_edges(input_vars) : edge_list());
279
+ node->set_ctx_grad_fn(node);
280
+ node->set_next_edges(std::move(next_edges));
281
+ node->clear_input_metadata();
282
+
283
+ node->input_info_.reserve(input_vars.size());
284
+ for (auto& var : input_vars) {
285
+ node->input_info_.emplace_back(var);
286
+ }
287
+
288
+ using forward_return_t = forward_t<X, Args...>;
289
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
290
+ forward_return_t outputs;
291
+ {
292
+ AutoGradMode grad_mode(false);
293
+ outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
294
+ }
295
+
296
+ _jvp_fn_t jvp_fn = [](const variable_list& inputs,
297
+ const variable_list& gI) -> variable_list {
298
+ TORCH_CHECK(
299
+ false,
300
+ "jvp is not implemented for the c++ API of custom Function yet.",
301
+ "Please open a feature request on GitHub if you need this.");
302
+ };
303
+
304
+ auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
305
+ return x.view_as(x);
306
+ };
307
+
308
+ auto wrapped_outputs = _wrap_outputs(
309
+ input_vars,
310
+ node->ctx_.get_non_differentiable(),
311
+ node->ctx_.get_and_bump_dirty(),
312
+ to_optional(outputs),
313
+ is_executable ? node : nullptr,
314
+ jvp_fn,
315
+ {},
316
+ view_as_self_fn);
317
+
318
+ node->output_info_.reserve(wrapped_outputs.size());
319
+ for (auto& output : wrapped_outputs) {
320
+ if (is_executable && output.has_value()) {
321
+ node->output_info_.emplace_back(output.value());
322
+ } else if (is_executable) {
323
+ node->output_info_.emplace_back();
324
+ }
325
+ }
326
+
327
+ if (is_executable) {
328
+ node->save_variables_to_ctx();
329
+ }
330
+
331
+ // wrapped_outputs will be a variable_list so, convert it to the correct
332
+ // return type. Only Variable and variable_list are accepted as return types.
333
+ return to_output_type<forward_return_t>(wrapped_outputs);
334
+ }
335
+
336
+ // The logic here is the same as PyNode::apply, so changes to it should be done
337
+ // in both the places
338
+ template <class T>
339
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
340
+ variable_list CppNode<T>::apply(variable_list&& inputs) {
341
+ at::OptionalDeviceGuard _device_guard;
342
+
343
+ auto num_inputs = inputs.size();
344
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
345
+ variable_list backward_inputs;
346
+ backward_inputs.reserve(num_inputs);
347
+ for (const auto i : c10::irange(num_inputs)) {
348
+ if (inputs[i].defined() || !ctx_.materialize_grads_) {
349
+ backward_inputs.emplace_back(std::move(inputs[i]));
350
+ } else {
351
+ backward_inputs.emplace_back(output_info_[i].zeros(_device_guard));
352
+ }
353
+ }
354
+
355
+ // Acquire lock to here protect thread safety on custom C++ Autograd Node
356
+ // This is needed for the custom Autograd Node since we don't know if the
357
+ // user defined Node will write to the shared data during backward.
358
+ // see Note [Thread Safety on Autograd Node]
359
+ std::lock_guard<std::mutex> lock(mutex_);
360
+
361
+ auto outputs = T::backward(&ctx_, backward_inputs);
362
+
363
+ const auto num_forward_inputs =
364
+ static_cast<int64_t>(is_variable_input_.size());
365
+ auto num_outputs = static_cast<int64_t>(outputs.size());
366
+ // Returning too many results is ok, but only as long as they're all
367
+ // undefined. Truncate the result vector in that case.
368
+ if (num_outputs > num_forward_inputs) {
369
+ bool all_undef = true;
370
+ for (const auto i : c10::irange(num_forward_inputs, num_outputs)) {
371
+ all_undef &= (!outputs[i].defined());
372
+ }
373
+ if (all_undef) {
374
+ outputs.resize(num_forward_inputs);
375
+ num_outputs = num_forward_inputs;
376
+ }
377
+ }
378
+
379
+ if (num_outputs != num_forward_inputs) {
380
+ std::string msg("function ");
381
+ msg += name() + " returned an incorrect number of gradients (expected ";
382
+ msg += c10::to_string(num_forward_inputs) + ", got ";
383
+ msg += c10::to_string(num_outputs) + ")";
384
+ throw std::runtime_error(msg);
385
+ }
386
+
387
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
388
+ variable_list results;
389
+ results.reserve(num_outputs);
390
+ for (const auto i : c10::irange(num_outputs)) {
391
+ if (!is_variable_input_[i]) {
392
+ if (outputs[i].defined()) {
393
+ std::string msg("function ");
394
+ msg += name() +
395
+ " returned a gradient different that is defined at position ";
396
+ msg += c10::to_string(i + 1) +
397
+ ", but the corresponding forward input was not a Variable";
398
+ throw std::runtime_error(msg);
399
+ }
400
+ continue;
401
+ }
402
+ results.emplace_back(outputs[i]);
403
+ }
404
+ return results;
405
+ }
406
+
407
+ template <class T>
408
+ void CppNode<T>::release_variables() {
409
+ // lock to ensure thread safety, see [Thread Safety on Autograd Node]
410
+ std::lock_guard<std::mutex> lock(mutex_);
411
+ ctx_.saved_variables_.clear();
412
+ ctx_.has_freed_buffers_ = true;
413
+ }
414
+
415
+ template <class T>
416
+ void CppNode<T>::save_variables_to_ctx() {
417
+ ctx_.save_variables();
418
+ }
419
+
420
+ template <class T>
421
+ void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
422
+ ctx_.grad_fn_ = node;
423
+ }
424
+
425
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CachedTensorUtils.h>
4
+ #include <ATen/LegacyBatchedTensorImpl.h>
5
+ #include <ATen/TensorOperators.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/utils/grad_layout_contract.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/Functions.h>
13
+ #else
14
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
15
+ #endif
16
+
17
+ #include <mutex>
18
+
19
+ namespace torch {
20
+ namespace autograd {
21
+
22
+ #define CHECK_RESULT(RESULT, VAR) \
23
+ if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \
24
+ VAR.is_sparse_csr())) { \
25
+ if (!utils::obeys_layout_contract(RESULT, VAR)) { \
26
+ TORCH_WARN_ONCE( \
27
+ "grad and param do not obey the gradient layout contract. " \
28
+ "This is not an error, but may impair performance.\n" \
29
+ "grad.sizes() = ", \
30
+ RESULT.sizes(), \
31
+ ", strides() = ", \
32
+ RESULT.strides(), \
33
+ "\n", \
34
+ "param.sizes() = ", \
35
+ VAR.sizes(), \
36
+ ", strides() = ", \
37
+ VAR.strides()); \
38
+ } \
39
+ }
40
+
41
+ struct TORCH_API AccumulateGrad : public Node {
42
+ explicit AccumulateGrad(Variable variable_);
43
+
44
+ variable_list apply(variable_list&& grads) override;
45
+
46
+ std::vector<std::unique_ptr<FunctionPreHook>>& tensor_pre_hooks() noexcept
47
+ override {
48
+ // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
49
+ // it can be destroyed even though the Tensor is still alive (contrary
50
+ // to all other Nodes). So we must lazily read the Tensor hooks here.
51
+ return impl::hooks(variable);
52
+ }
53
+
54
+ std::unique_ptr<PostAccumulateGradHook>& tensor_post_acc_grad_hooks() noexcept
55
+ override {
56
+ // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
57
+ // it can be destroyed even though the Tensor is still alive (contrary
58
+ // to all other Nodes). So we must lazily read the Tensor hooks here.
59
+ return impl::post_acc_grad_hooks(variable);
60
+ }
61
+
62
+ // Given a variable with its current grad as variable_grad, accumulates
63
+ // new_grad into variable_grad if in place accumulation is possible.
64
+ // Otherwise, uses 'update_grad' to update the grad for the variable.
65
+
66
+ // "Gradient Layout Contract"
67
+ //
68
+ // AccumulateGrad tries to stash strided (non-sparse) grads with memory layout
69
+ // (strides) such that variables and grads interact efficiently in later
70
+ // optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp.
71
+ //
72
+ // Specifically, AccumulateGrad tries to ensure the following
73
+ // (cf torch/csrc/autograd/utils/grad_layout_contract.h):
74
+ // (1) if variable.is_non_overlapping_and_dense(), the stashed grad's
75
+ // strides match variable.
76
+ // (2) else, stashed grad is rowmajor contiguous.
77
+ // If variable's grad does not exist (!variable_grad.defined())
78
+ // AccumulateGrad steals new_grad if it's stealable and obeys the contract
79
+ // already, otherwise it deep copies new_grad into an obedient clone.
80
+ //
81
+ // If variable's grad already exists (variable_grad.defined()), new_grad must
82
+ // be added to variable_grad. If we aren't setting up for double backward
83
+ // (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad +=
84
+ // new_grad" in-place, which keeps variable_grad's layout. We assume (hope)
85
+ // variable_grad was created obeying (1) or (2) at some point in the past.
86
+ //
87
+ // If we are setting up for double backward, AccumulateGrad updates the grad
88
+ // out-of-place via "variable_grad + new_grad." TensorIterator operator+
89
+ // decides result's layout. Typically TensorIterator matches strides of the
90
+ // first arg, so we once again assume (hope) variable_grad was originally
91
+ // created obeying (1) or (2).
92
+ //
93
+ // AccumulateGrad does not enforce the contract with 100% certainty. Examples:
94
+ // - If a user manually permutes a param or its grad, then runs a fwd+bwd,
95
+ // variable_grad += new_grad keeps variable_grad's layout without
96
+ // rechecking the contract.
97
+ // - If TensorIterator changes its corner cases about operator+'s result
98
+ // (for example, giving more or less priority to channels_last inputs, see
99
+ // https://github.com/pytorch/pytorch/pull/37968) the result may not obey.
100
+ //
101
+ // Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is
102
+ // degraded performance in Reducer.cpp or optimizer kernels, not death by
103
+ // assert or silently bad numerics.
104
+
105
+ // variable: the variable whose grad we're accumulating.
106
+ // variable_grad: the current grad for the variable.
107
+ // new_grad: new grad we want to accumulate for the variable.
108
+ // num_expected_refs: the number of refs we expect to hold internally
109
+ // such that it is safe to avoid cloning the grad
110
+ // if use_count() of the grad is less than or equal
111
+ // to this value (in addition to post_hooks).
112
+ // update_grad: Function that is used to update grad for the variable.
113
+ // The argument to the function is a Tensor which
114
+ // is used to set a new value for the grad.
115
+ template <typename T>
116
+ static void accumulateGrad(
117
+ const Variable& variable,
118
+ at::Tensor& variable_grad,
119
+ const at::Tensor& new_grad,
120
+ size_t num_expected_refs,
121
+ const T& update_grad) {
122
+ if (!variable_grad.defined()) {
123
+ if (!GradMode::is_enabled() && !new_grad.is_sparse() &&
124
+ !new_grad.is_sparse_csr() &&
125
+ !(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) &&
126
+ at::caching::adjusted_use_count(new_grad) <= num_expected_refs &&
127
+ (new_grad.is_mkldnn() ||
128
+ utils::obeys_layout_contract(new_grad, variable))) {
129
+ // we aren't setting up for double-backward
130
+ // not sparse
131
+ // no other user-visible tensor references new_grad
132
+ // new_grad obeys the "Gradient Layout Contract", there has a special
133
+ // case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys
134
+ // layout_contract. Under these conditions, we can steal new_grad
135
+ // without a deep copy.
136
+ update_grad(new_grad.detach());
137
+ } else if (
138
+ !GradMode::is_enabled() && new_grad.is_sparse() &&
139
+ new_grad._indices().is_contiguous() &&
140
+ new_grad._values().is_contiguous() &&
141
+ // Use count for indices and values should always be <=1 since the
142
+ // SparseTensor should be the only one holding a reference to these.
143
+ new_grad._indices().use_count() <= 1 &&
144
+ new_grad._values().use_count() <= 1 &&
145
+ new_grad.use_count() <= num_expected_refs) {
146
+ // Can't detach sparse tensor (since metadata changes are not allowed
147
+ // after detach), so just create a new one for the grad which is a
148
+ // shallow copy. We need a shallow copy so that modifying the original
149
+ // grad tensor doesn't modify the grad we accumulate.
150
+ // We only skip clone if indices and values themselves are contiguous
151
+ // for backward compatibility reasons. Since without this optimization,
152
+ // earlier we would clone the entire SparseTensor which cloned indices
153
+ // and values.
154
+ // For details see https://github.com/pytorch/pytorch/issues/34375.
155
+
156
+ // No scenario where we expect this to be true currently
157
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
158
+ !at::caching::is_cached_tensor(new_grad._indices()) &&
159
+ !at::caching::is_cached_tensor(new_grad._values()) &&
160
+ !at::caching::is_cached_tensor(new_grad));
161
+
162
+ update_grad(at::_sparse_coo_tensor_unsafe(
163
+ new_grad._indices(),
164
+ new_grad._values(),
165
+ new_grad.sizes(),
166
+ new_grad.options()));
167
+ } else {
168
+ if (new_grad.is_sparse() || new_grad.is_sparse_csr() ||
169
+ new_grad.is_nested()) {
170
+ update_grad(new_grad.clone());
171
+ } else {
172
+ if (new_grad.is_mkldnn()) {
173
+ update_grad(new_grad.clone());
174
+ } else {
175
+ // Deep copies new_grad according to the "Gradient Layout Contract."
176
+ update_grad(utils::clone_obey_contract(new_grad, variable));
177
+ }
178
+ }
179
+ }
180
+ } else if (!GradMode::is_enabled()) {
181
+ // This case is not strictly necessary, but it makes the first-order only
182
+ // case slightly more efficient.
183
+ if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
184
+ // If `variable_grad` is sparse and `new_grad` is not sparse, their
185
+ // sum is not sparse, and we must change the TensorImpl type of
186
+ // `variable_grad` for it to store the result. However, changing the
187
+ // TensorImpl type of a tensor requires changing the tensor itself, and
188
+ // thus in this case we have to change the grad tensor.
189
+ auto result = new_grad + variable_grad;
190
+ CHECK_RESULT(result, variable);
191
+ update_grad(std::move(result));
192
+ } else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) {
193
+ // Ideally we'd perform an in-place operation to avoid changing
194
+ // the grad tensor. However, if that's impossible because the grads
195
+ // are vmap-incompatible (See NOTE: [vmap-incompatible in-place
196
+ // operations]), then we just add them out-of-place.
197
+ auto result = variable_grad + new_grad;
198
+ CHECK_RESULT(result, variable);
199
+ update_grad(std::move(result));
200
+ } else {
201
+ // In this case we can avoid changing the grad tensor. There are three
202
+ // scenarios when we'll hit this case:
203
+ //
204
+ // 1. `variable_grad` is sparse, and `new_grad` is sparse.
205
+ // 2. `variable_grad` is dense, and `new_grad` is sparse.
206
+ // 3. `variable_grad` is dense, and `new_grad` is dense.
207
+ // 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn.
208
+ //
209
+ // In all of these four cases, `variable_grad += new_grad` is a
210
+ // valid operation which adds `new_grad` to `variable_grad` in
211
+ // place. `variable_grad` is thus still referring to the same tensor
212
+ // after the operation.
213
+ // Also DistributedDataParallel(DDP) package relies on grad being
214
+ // mutated in place for saving peak memory usage. DDP will still
215
+ // work correctly if it is mutated out of place here, but DDP will
216
+ // maintain one extra copy of grad tensors in buffer and thus
217
+ // increase peak memory usage.
218
+ variable_grad += new_grad;
219
+ CHECK_RESULT(variable_grad, variable);
220
+ // ^ We could enforce the contract more aggressively here by writing:
221
+ // if (variable_grad.is_sparse() || new_grad.is_sparse()) {
222
+ // variable_grad += new_grad;
223
+ // } else if (obeys_layout_contract(variable_grad, variable)) {
224
+ // variable_grad += new_grad;
225
+ // } else {
226
+ // result = at::empty_strided(variable.sizes(), variable.strides(),
227
+ // variable.options().memory_format(c10::nullopt));
228
+ // update_grad(at::native::add_out(result, variable_grad,
229
+ // new_grad, 1.0);
230
+ // }
231
+ // However, that accumulation is sometimes in place and sometimes not,
232
+ // which may break user code.
233
+ }
234
+ } else {
235
+ at::Tensor result;
236
+ if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
237
+ // CPU backend throws an error on sparse + dense, so prefer dense +
238
+ // sparse here.
239
+ result = new_grad + variable_grad;
240
+ } else {
241
+ // Assumes operator+ result typically matches strides of first arg,
242
+ // and hopes variable_grad was originally created obeying layout
243
+ // contract.
244
+ result = variable_grad + new_grad;
245
+ }
246
+ CHECK_RESULT(result, variable);
247
+ update_grad(std::move(result));
248
+ // ^ We could enforce the contract more aggressively here by saying
249
+ // if (obeys_layout_contract(new_grad, variable)) {
250
+ // update_grad(new_grad + variable_grad);
251
+ // } else {
252
+ // update_grad(variable_grad + new_grad);
253
+ // }
254
+ // such that the stashed grad is likely to have the right strides if
255
+ // either variable_grad or new_grad already has the right strides.
256
+ // We could enforce the contract with certainty by saying
257
+ // auto result = variable_grad + new_grad (or vice versa), checking
258
+ // result's layout, and copying to an obedient clone if necessary before
259
+ // update_grad. The copy would require another gmem pass. We can't create
260
+ // empty result with the right layout then add_out into it with a single
261
+ // kernel, because GradMode is enabled in this branch, and add_out isn't
262
+ // differentiable. Maybe more trouble than it's worth.
263
+ }
264
+ }
265
+
266
+ void compiled_args(CompiledNodeArgs& args) override;
267
+ variable_list apply_with_saved(
268
+ const variable_list& inputs,
269
+ SwapSavedVariables& saved) override;
270
+
271
+ Variable variable;
272
+ };
273
+
274
+ #undef CHECK_RESULT
275
+
276
+ } // namespace autograd
277
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/autograd/function.h>
6
+ #include <torch/csrc/autograd/variable.h>
7
+
8
+ #include <memory>
9
+ #include <string>
10
+ #include <vector>
11
+
12
+ namespace torch {
13
+ namespace autograd {
14
+
15
+ struct TORCH_API Error : public Node {
16
+ Error(std::string msg, edge_list&& next_edges)
17
+ : Node(std::move(next_edges)), msg(std::move(msg)) {}
18
+
19
+ Error(std::string msg) : msg(std::move(msg)) {}
20
+
21
+ variable_list apply(variable_list&& inputs) override;
22
+
23
+ void compiled_args(CompiledNodeArgs& args) override;
24
+ variable_list apply_with_saved(
25
+ const variable_list& inputs,
26
+ SwapSavedVariables& saved) override;
27
+
28
+ std::string msg;
29
+ };
30
+
31
+ // We print grad_fn names in tensor printing. For functions with backward
32
+ // NYI, grad_fn=<Error> will be printed if we use Error, which is confusing. So
33
+ // special case with a new NotImplemented function here.
34
+ struct TORCH_API NotImplemented : public Error {
35
+ NotImplemented(const std::string& forward_fn, edge_list&& next_edges)
36
+ : Error(
37
+ "derivative for " + forward_fn + " is not implemented",
38
+ std::move(next_edges)) {}
39
+
40
+ NotImplemented(const std::string& forward_fn)
41
+ : Error("derivative for " + forward_fn + " is not implemented") {}
42
+ };
43
+
44
+ // Identity in forward, Error in backward. Used to implement
45
+ // @once_differentiable
46
+ struct TORCH_API DelayedError : public Node {
47
+ DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
48
+ // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
49
+ for (const auto i : c10::irange(num_inputs)) {
50
+ (void)i; // Suppress unused variable warning
51
+ add_input_metadata(Node::undefined_input());
52
+ }
53
+ }
54
+
55
+ variable_list apply(variable_list&& inputs) override;
56
+
57
+ std::string msg;
58
+ };
59
+
60
+ struct TORCH_API UndefinedGrad : public Node {
61
+ UndefinedGrad() {
62
+ add_input_metadata(Node::undefined_input());
63
+ }
64
+
65
+ variable_list apply(variable_list&& inputs) override;
66
+ };
67
+
68
+ struct TORCH_API UndefinedGradBackward : public Node {
69
+ UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {}
70
+
71
+ UndefinedGradBackward() = default;
72
+
73
+ variable_list apply(variable_list&& inputs) override;
74
+
75
+ void compiled_args(CompiledNodeArgs& args) override {}
76
+ variable_list apply_with_saved(
77
+ const variable_list& inputs,
78
+ SwapSavedVariables& saved) override {
79
+ return apply(variable_list(inputs));
80
+ }
81
+ };
82
+
83
+ struct TORCH_API GraphRoot : public Node {
84
+ GraphRoot(edge_list functions, variable_list inputs)
85
+ : Node(std::move(functions)), outputs(std::move(inputs)) {
86
+ // Ensures calls to stream() on a GraphRoot instance reflect current
87
+ // stream(s) on devices of root grad tensors at the time the instance is
88
+ // constructed.
89
+ for (const auto& t : outputs) {
90
+ add_input_metadata(t);
91
+ }
92
+ }
93
+
94
+ variable_list apply(variable_list&& inputs) override {
95
+ return outputs;
96
+ }
97
+
98
+ void compiled_args(CompiledNodeArgs& args) override;
99
+ variable_list apply_with_saved(
100
+ const variable_list& inputs,
101
+ SwapSavedVariables& saved) override;
102
+
103
+ variable_list outputs;
104
+ };
105
+
106
+ struct TORCH_API Identity : public Node {
107
+ variable_list apply(variable_list&& inputs) override;
108
+ };
109
+
110
+ } // namespace autograd
111
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/function.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+
7
+ #include <ATen/ATen.h>
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <c10/util/Optional.h>
10
+
11
+ #include <cstddef>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace autograd {
16
+
17
+ struct TORCH_CUDA_CU_API Scatter : public Node {
18
+ explicit Scatter(
19
+ std::vector<at::Device> devices,
20
+ c10::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
21
+ int64_t dim = 0,
22
+ c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
23
+ c10::nullopt,
24
+ bool unsqueeze_scalars = false);
25
+ ~Scatter() override;
26
+
27
+ variable_list apply(variable_list&& inputs) override;
28
+
29
+ std::vector<at::Device> devices_;
30
+ c10::optional<std::vector<int64_t>> chunk_sizes_;
31
+ int64_t dim_;
32
+ c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams_;
33
+ bool unsqueeze_scalars_;
34
+ };
35
+
36
+ struct TORCH_CUDA_CU_API Gather : public Node {
37
+ explicit Gather(const at::Device& destination_device, int64_t dim = 0);
38
+ ~Gather() override;
39
+
40
+ variable_list apply(variable_list&& inputs) override;
41
+
42
+ at::Device destination_device_;
43
+ int64_t dim_;
44
+ };
45
+
46
+ } // namespace autograd
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <pybind11/pybind11.h>
4
+ #include <pybind11/stl.h>
5
+ #include <torch/csrc/python_headers.h>
6
+ #include <torch/csrc/utils/pybind.h>
7
+
8
+ #include <torch/csrc/autograd/python_cpp_function.h>
9
+ #include <torch/csrc/autograd/python_function.h>
10
+
11
+ namespace py = pybind11;
12
+
13
+ namespace pybind11 {
14
+ namespace detail {}
15
+ } // namespace pybind11
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/function.h>
5
+ #include <torch/csrc/autograd/variable.h>
6
+
7
+ #include <ATen/TensorGeometry.h>
8
+ #include <ATen/core/DeprecatedTypeProperties.h>
9
+ #include <c10/util/Optional.h>
10
+
11
+ #include <cstdint>
12
+ #include <memory>
13
+
14
+ namespace torch {
15
+ namespace autograd {
16
+
17
+ struct TORCH_API CopyBackwards : public Node {
18
+ variable_list apply(variable_list&& grads) override;
19
+ void compiled_args(CompiledNodeArgs& args) override;
20
+ variable_list apply_with_saved(
21
+ const variable_list& inputs,
22
+ SwapSavedVariables& saved) override;
23
+
24
+ at::TensorOptions src_options;
25
+ };
26
+
27
+ // Note [View + Inplace update for base tensor]
28
+ //
29
+ // This note covers a few important topics related to view + inplace handling.
30
+ // - It explains what is the CopySlices Node and why we need it.
31
+ // - It explains the considerations on what is saved for backward in
32
+ // CopySlices.
33
+ // - It explains why we need to sometimes change the exec_info of the current
34
+ // backward
35
+ //
36
+ // What is CopySlices?
37
+ // ~~~~~~~~~~~~~~~~~~~
38
+ //
39
+ // We support autograd with inplace mutation; e.g., if you write x.mul_(2)
40
+ // the autograd will work as if you now had multiple Tensors under the hood and
41
+ // you did
42
+ // x = t.clone()
43
+ // x0 = x
44
+ // x1 = x0 * 2
45
+ // x = x1
46
+ // As you can see here, after this operation, x.grad_fn now points to x1.grad_fn
47
+ // (the MulBackward node) and this node points to x's original grad_fn (which is
48
+ // also x0.grad_fn). It is important to keep in mind that after the inplace,
49
+ // there is no Tensor object that represents the x0 state anymore. But the graph
50
+ // for it is still around in autograd (in case x was used before being modified
51
+ // inplace). See Example 1 in
52
+ // https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
53
+ // We call this rebasing the history of the Tensor.
54
+ //
55
+ // Now, a difficult situation is what happens if x is a differentiable view
56
+ // of a base b.
57
+ // b = t.clone()
58
+ // x = b.select(0, 0)
59
+ // x *= 2
60
+ // With the same approach as above, this will become
61
+ // b = t.clone()
62
+ // x = b.select(0, 0)
63
+ // b0 = b
64
+ // x0 = x
65
+ // x1 = x0 * 2
66
+ // b1 = b0.select_scatter(x1, 0, 0)
67
+ // x2 = b1.select(0, 0)
68
+ // x = x2
69
+ // b = b1
70
+ // As you can see here, not only we need to modify x's grad_fn, we also need to
71
+ // modify the one from b. We also need to ensure that the new grad_fn on x is
72
+ // linked to b's new grad_fn. The chain the select_scatter, multiplication and
73
+ // select is what CopySlices does, all wrapped into a single Node.
74
+ //
75
+ // See Example 1 in
76
+ // https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
77
+ //
78
+ // What do we need to save in CopySlices to run backward?
79
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80
+ //
81
+ // We need to perform grad_view = fn(grad_view), but out-of-place.
82
+ // view_fn_ is an optional function saved in DifferentiableViewMeta
83
+ // from forward pass, so that we can recover we when as_strided is not
84
+ // supported. It preserves the invariants:
85
+ // view = view_fn_(base)
86
+ // grad_view = view_fn_(grad_base)
87
+ //
88
+ // When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_
89
+ // is empty and we save TensorGeometry(view) instead.
90
+ // With the TensorGeometry information we can use `as_strided` call which
91
+ // is more efficient to recover views in backward.
92
+ //
93
+ // For example:
94
+ // view_1 = view_op_1(base)
95
+ // view_2 = view_op_2(view_1)
96
+ // ...
97
+ // view_n = view_op_n(view_n-1)
98
+ // view_n = inplace_op(view_n)
99
+ //
100
+ // In CPU/CUDA case where we support efficient as_strided implementation,
101
+ // grad_view_n can be calculated through 1 step.
102
+ //
103
+ // grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset);
104
+ //
105
+ // But in XLA backend where we don't have full support of as_strided,
106
+ // it has to save a chained lambda function view_fn_, to exactly
107
+ // replay how the view was done in forward.
108
+ //
109
+ // view_fn_ = view_op_n(...(view_op_2(view_op_1())))
110
+ // grad_view_n = view_fn_(grad_base)
111
+ //
112
+ // This chain view_fn_ works as long as forward view ops are implemented,
113
+ // e.g XLA simulates view without a real Storage behind Tensor, but it's less
114
+ // efficient than the as_strided one so we should be careful to only use it when
115
+ // necessary.
116
+ //
117
+ // - For CPU/CUDA we save TensorGeometry of both base and view tensors,
118
+ // That's all we need to pass into as_strided.
119
+ // E.g. int[] sizes, int[] strides, and int storage_offset.
120
+ // - For XLA we use view_fn_, which captures all forward view op arguments
121
+ // by **value**.
122
+ // E.g for at::narrow, int dim, int start, in length are saved.
123
+ //
124
+ // Theoretically we could also save Tensor `view` in CopySlices Node, but
125
+ // it's far more expensive than what we currently save.
126
+ // 1. We cannot afford keeping large tensors alive to recover views only.
127
+ // 2. There are inplace checks when Tensors are loaded back to make sure
128
+ // they haven't been changed (including size metadata).
129
+ // So saving metadata like TensorGeometry/view arguments is much better
130
+ // because it is minimal information needed to recover views, as well as it
131
+ // allows the user to modify the original Tensor without preventing the
132
+ // backward pass from running.
133
+ //
134
+ // Why do we manually change exec_info in the apply?
135
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
136
+ //
137
+ // Using the same example as before,
138
+ // b = t.clone()
139
+ // x = b.select(0, 0)
140
+ // x *= y
141
+ //
142
+ // You can see the visualization at
143
+ // https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs
144
+ // which contains the wrapped MulBackward Node and show what it links to.
145
+ // Since a backward can happen between any subset of the inputs (t and y) and
146
+ // outputs (o, x, b). It is possible to get into a state where CopySlices's 0th
147
+ // next function (CloneBackward) needs gradient but MulBackward's 0th next
148
+ // function (SelectBackward) is not. This happens if you do autograd.grad
149
+ // between x and t for example.
150
+ // In such a case, we do need to mark SelectBackward as requiring gradient such
151
+ // that, during the execution of MulBackward, we will actually compute gradient
152
+ // for the 0th input.
153
+ //
154
+ // All the other next functions are always shared (this is asserted in the apply
155
+ // code) and so nothing needs to be done for them.
156
+
157
+ // See Note [View + Inplace update for view tensor] for what we do to view
158
+ // tensor when an in-place operation happens.
159
+ struct TORCH_API CopySlices : public Node {
160
+ CopySlices(
161
+ const Variable& base_var,
162
+ at::TensorGeometry view_,
163
+ std::unique_ptr<ViewFunc> view_fn_,
164
+ std::shared_ptr<Node> fn_);
165
+
166
+ // common code between apply/apply_with_saved
167
+ template <typename T>
168
+ variable_list apply_impl(variable_list&& inputs, const T& call_fn);
169
+
170
+ variable_list apply(variable_list&& inputs) override;
171
+ void release_variables() override;
172
+ void compiled_args(CompiledNodeArgs& args) override;
173
+ variable_list apply_with_saved(
174
+ const variable_list& inputs,
175
+ SwapSavedVariables& saved) override;
176
+
177
+ at::TensorGeometry base;
178
+ // view and view_fn are redundant and view_fn will be used if available.
179
+ // See Note [View + Inplace update for base tensor] for details.
180
+ at::TensorGeometry view;
181
+ std::unique_ptr<ViewFunc> view_fn;
182
+ std::shared_ptr<Node> fn;
183
+ };
184
+
185
+ } // namespace autograd
186
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/autograd/InferenceMode.h>
5
+ #include <torch/csrc/autograd/autograd.h>
6
+ #include <torch/csrc/autograd/function.h>
7
+ #include <torch/csrc/autograd/variable.h>
8
+ #include <torch/csrc/utils/variadic.h>
9
+
10
+ #include <ATen/core/Tensor.h>
11
+
12
+ #include <functional>
13
+ #include <memory>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace autograd {
18
+
19
+ using function_constructor = std::function<std::shared_ptr<Node>(edge_list&&)>;
20
+
21
+ /**
22
+ * Wraps the tensor outputs in variables and creates the grad_fn and sets the
23
+ * grad_fn if necessary.
24
+ */
25
+ TORCH_API variable_list wrap_outputs(
26
+ const variable_list& inputs,
27
+ tensor_list&& outputs,
28
+ const function_constructor& ctr);
29
+
30
+ /// Checks that inputs contains exactly `args` items and that the first
31
+ /// `required_args`
32
+ /// items are not nullptr. If not specified, `required_args` defaults to `args`.
33
+ TORCH_API void check_input_variables(
34
+ const char* name,
35
+ const variable_list& inputs,
36
+ int args,
37
+ int required_args = -1,
38
+ bool allow_undefined = false);
39
+
40
+ struct ComputeRequiresGrad : IterArgs<ComputeRequiresGrad> {
41
+ bool out = false;
42
+ using IterArgs<ComputeRequiresGrad>::operator();
43
+ void operator()(const at::Tensor& tensor) {
44
+ const auto& var = static_cast<const Variable&>(tensor);
45
+ if (var.defined() && var.requires_grad()) {
46
+ out = true;
47
+ }
48
+ }
49
+ void operator()(const c10::optional<at::Tensor>& tensor) {
50
+ if (tensor.has_value()) {
51
+ (*this)(*tensor);
52
+ }
53
+ }
54
+ bool short_circuit() {
55
+ return out;
56
+ }
57
+ };
58
+
59
+ template <typename... Args>
60
+ inline bool compute_requires_grad(Args&&... args) {
61
+ if (!GradMode::is_enabled()) {
62
+ return false;
63
+ }
64
+ return ComputeRequiresGrad().apply(std::forward<Args>(args)...).out;
65
+ }
66
+
67
+ inline void set_history(
68
+ const at::Tensor& variable,
69
+ const std::shared_ptr<Node>& grad_fn) {
70
+ TORCH_CHECK(grad_fn != nullptr);
71
+ if (variable.defined()) {
72
+ // If the codegen triggers this, you most likely want to add your newly
73
+ // added function to the DONT_REQUIRE_DERIVATIVE list in
74
+ // tools/autograd/gen_variable_type.py
75
+ TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type()));
76
+ auto output_nr = grad_fn->add_input_metadata(variable);
77
+ impl::set_gradient_edge(variable, {grad_fn, output_nr});
78
+ } else {
79
+ grad_fn->add_input_metadata(Node::undefined_input());
80
+ }
81
+ }
82
+
83
+ inline void set_history(
84
+ const std::vector<Variable>& variables,
85
+ const std::shared_ptr<Node>& grad_fn) {
86
+ for (auto& variable : variables) {
87
+ set_history(variable, grad_fn);
88
+ }
89
+ }
90
+
91
+ inline bool isFwGradDefined(const c10::optional<at::Tensor>& t) {
92
+ return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined();
93
+ }
94
+
95
+ inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) {
96
+ bool ret = false;
97
+ for (auto& variable : variables) {
98
+ ret |= isFwGradDefined(variable);
99
+ }
100
+ return ret;
101
+ }
102
+
103
+ inline bool isFwGradDefinedTensorList(
104
+ const c10::List<c10::optional<at::Tensor>>& li) {
105
+ bool ret = false;
106
+ for (auto i : c10::irange(li.size())) {
107
+ auto t = li.get(i);
108
+ ret |= (t.has_value() && isFwGradDefined(t.value()));
109
+ }
110
+ return ret;
111
+ }
112
+
113
+ } // namespace autograd
114
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/VariableType.h
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/Context.h>
7
+
8
+ #include <c10/util/intrusive_ptr.h>
9
+
10
+ #include <torch/csrc/Export.h>
11
+ #include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
12
+
13
+ #include <cstdint> // for size_t
14
+ #include <functional> // for function
15
+ #include <memory> // for unique_ptr
16
+ #include <string>
17
+ #include <vector>
18
+
19
+ namespace at {
20
+ struct Quantizer;
21
+ };
22
+
23
+ namespace torch { namespace autograd {
24
+
25
+ using Variable = at::Tensor;
26
+ using at::Context;
27
+ using at::Device;
28
+ using at::Dimname;
29
+ using at::DimnameList;
30
+ using at::Generator;
31
+ using at::IntArrayRef;
32
+ using at::MemoryFormat;
33
+ using at::QScheme;
34
+ using at::Scalar;
35
+ using at::ScalarType;
36
+ using at::Storage;
37
+ using at::Tensor;
38
+ using at::TensorList;
39
+ using at::TensorOptions;
40
+ using at::Quantizer;
41
+ // This is temporary typedef to enable Quantizer in aten native function API
42
+ // we'll remove them when we are actually exposing Quantizer class
43
+ // to frontend
44
+ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
45
+ using c10::optional;
46
+
47
+ namespace VariableType {
48
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
49
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
50
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
51
+ TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types();
52
+
53
+ at::Tensor & unpack(Tensor & t, const char * name, int pos);
54
+ const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
55
+ at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
56
+ std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
57
+ };
58
+
59
+ }} // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/ViewFuncs.h ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/ViewFuncs.h
4
+
5
+ #include <torch/library.h>
6
+ #include <torch/csrc/autograd/variable.h>
7
+ #include <c10/core/SymIntArrayRef.h>
8
+
9
+ #ifndef AT_PER_OPERATOR_HEADERS
10
+ #include <ATen/Operators.h>
11
+ #else
12
+ #include <ATen/ops/_conj_ops.h>
13
+ #include <ATen/ops/_indices_ops.h>
14
+ #include <ATen/ops/_neg_view_ops.h>
15
+ #include <ATen/ops/_nested_get_values_ops.h>
16
+ #include <ATen/ops/_nested_view_from_buffer_ops.h>
17
+ #include <ATen/ops/_nested_view_from_jagged_ops.h>
18
+ #include <ATen/ops/_reshape_alias_ops.h>
19
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
20
+ #include <ATen/ops/_values_ops.h>
21
+ #include <ATen/ops/alias_ops.h>
22
+ #include <ATen/ops/as_strided_ops.h>
23
+ #include <ATen/ops/ccol_indices_ops.h>
24
+ #include <ATen/ops/chunk_ops.h>
25
+ #include <ATen/ops/col_indices_ops.h>
26
+ #include <ATen/ops/crow_indices_ops.h>
27
+ #include <ATen/ops/diagonal_ops.h>
28
+ #include <ATen/ops/expand_ops.h>
29
+ #include <ATen/ops/indices_ops.h>
30
+ #include <ATen/ops/narrow_ops.h>
31
+ #include <ATen/ops/permute_ops.h>
32
+ #include <ATen/ops/row_indices_ops.h>
33
+ #include <ATen/ops/select_ops.h>
34
+ #include <ATen/ops/slice_ops.h>
35
+ #include <ATen/ops/slice_inverse_ops.h>
36
+ #include <ATen/ops/split_ops.h>
37
+ #include <ATen/ops/split_with_sizes_ops.h>
38
+ #include <ATen/ops/squeeze_ops.h>
39
+ #include <ATen/ops/squeeze_ops.h>
40
+ #include <ATen/ops/squeeze_ops.h>
41
+ #include <ATen/ops/t_ops.h>
42
+ #include <ATen/ops/transpose_ops.h>
43
+ #include <ATen/ops/unbind_ops.h>
44
+ #include <ATen/ops/unfold_ops.h>
45
+ #include <ATen/ops/unsqueeze_ops.h>
46
+ #include <ATen/ops/values_ops.h>
47
+ #include <ATen/ops/view_ops.h>
48
+ #include <ATen/ops/view_ops.h>
49
+ #include <ATen/ops/view_as_complex_ops.h>
50
+ #include <ATen/ops/view_as_real_ops.h>
51
+ #endif
52
+
53
+ namespace torch::autograd::generated {
54
+
55
+ using at::Scalar;
56
+ using at::Tensor;
57
+ using at::IntArrayRef;
58
+ using at::ArrayRef;
59
+ using at::Type;
60
+ using at::ScalarType;
61
+ using c10::optional;
62
+ using c10::fmap;
63
+
64
+ #define _CONJ_VIEW_FUNC_AVAILABLE
65
+ struct _ConjViewFunc : public torch::autograd::ViewFunc {
66
+ _ConjViewFunc()
67
+ {};
68
+ virtual ~_ConjViewFunc() override {};
69
+ virtual std::vector<c10::SymInt> get_symints() const override;
70
+ virtual size_t num_symints() const override;
71
+ virtual std::vector<at::Tensor> get_tensors() const override;
72
+ virtual size_t num_tensors() const override;
73
+ virtual at::Tensor operator()(const at::Tensor&) const override;
74
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
75
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
76
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
77
+
78
+ protected:
79
+ virtual void set_symints(std::vector<c10::SymInt>) override;
80
+ virtual void set_tensors(std::vector<at::Tensor>) override;
81
+
82
+ private:
83
+
84
+ };
85
+
86
+ #define _INDICES_VIEW_FUNC_AVAILABLE
87
+ struct _IndicesViewFunc : public torch::autograd::ViewFunc {
88
+ _IndicesViewFunc()
89
+ {};
90
+ virtual ~_IndicesViewFunc() override {};
91
+ virtual std::vector<c10::SymInt> get_symints() const override;
92
+ virtual size_t num_symints() const override;
93
+ virtual std::vector<at::Tensor> get_tensors() const override;
94
+ virtual size_t num_tensors() const override;
95
+ virtual at::Tensor operator()(const at::Tensor&) const override;
96
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
97
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
98
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
99
+
100
+ protected:
101
+ virtual void set_symints(std::vector<c10::SymInt>) override;
102
+ virtual void set_tensors(std::vector<at::Tensor>) override;
103
+
104
+ private:
105
+
106
+ };
107
+
108
+ #define _NEG_VIEW_VIEW_FUNC_AVAILABLE
109
+ struct _NegViewViewFunc : public torch::autograd::ViewFunc {
110
+ _NegViewViewFunc()
111
+ {};
112
+ virtual ~_NegViewViewFunc() override {};
113
+ virtual std::vector<c10::SymInt> get_symints() const override;
114
+ virtual size_t num_symints() const override;
115
+ virtual std::vector<at::Tensor> get_tensors() const override;
116
+ virtual size_t num_tensors() const override;
117
+ virtual at::Tensor operator()(const at::Tensor&) const override;
118
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
119
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
120
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
121
+
122
+ protected:
123
+ virtual void set_symints(std::vector<c10::SymInt>) override;
124
+ virtual void set_tensors(std::vector<at::Tensor>) override;
125
+
126
+ private:
127
+
128
+ };
129
+
130
+ #define _NESTED_GET_VALUES_VIEW_FUNC_AVAILABLE
131
+ struct _NestedGetValuesViewFunc : public torch::autograd::ViewFunc {
132
+ _NestedGetValuesViewFunc()
133
+ {};
134
+ virtual ~_NestedGetValuesViewFunc() override {};
135
+ virtual std::vector<c10::SymInt> get_symints() const override;
136
+ virtual size_t num_symints() const override;
137
+ virtual std::vector<at::Tensor> get_tensors() const override;
138
+ virtual size_t num_tensors() const override;
139
+ virtual at::Tensor operator()(const at::Tensor&) const override;
140
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
141
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
142
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
143
+
144
+ protected:
145
+ virtual void set_symints(std::vector<c10::SymInt>) override;
146
+ virtual void set_tensors(std::vector<at::Tensor>) override;
147
+
148
+ private:
149
+
150
+ };
151
+
152
+ #define _NESTED_VIEW_FROM_BUFFER_VIEW_FUNC_AVAILABLE
153
+ struct _NestedViewFromBufferViewFunc : public torch::autograd::ViewFunc {
154
+ _NestedViewFromBufferViewFunc(const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) : nested_size(nested_size), nested_strides(nested_strides), offsets(offsets)
155
+ {};
156
+ virtual ~_NestedViewFromBufferViewFunc() override {};
157
+ virtual std::vector<c10::SymInt> get_symints() const override;
158
+ virtual size_t num_symints() const override;
159
+ virtual std::vector<at::Tensor> get_tensors() const override;
160
+ virtual size_t num_tensors() const override;
161
+ virtual at::Tensor operator()(const at::Tensor&) const override;
162
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
163
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
164
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
165
+
166
+ protected:
167
+ virtual void set_symints(std::vector<c10::SymInt>) override;
168
+ virtual void set_tensors(std::vector<at::Tensor>) override;
169
+
170
+ private:
171
+ at::Tensor nested_size;
172
+ at::Tensor nested_strides;
173
+ at::Tensor offsets;
174
+ };
175
+
176
+ #define _NESTED_VIEW_FROM_JAGGED_VIEW_FUNC_AVAILABLE
177
+ struct _NestedViewFromJaggedViewFunc : public torch::autograd::ViewFunc {
178
+ _NestedViewFromJaggedViewFunc(const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx) : offsets(offsets), dummy(dummy), lengths(lengths), ragged_idx(ragged_idx)
179
+ {};
180
+ virtual ~_NestedViewFromJaggedViewFunc() override {};
181
+ virtual std::vector<c10::SymInt> get_symints() const override;
182
+ virtual size_t num_symints() const override;
183
+ virtual std::vector<at::Tensor> get_tensors() const override;
184
+ virtual size_t num_tensors() const override;
185
+ virtual at::Tensor operator()(const at::Tensor&) const override;
186
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
187
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
188
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
189
+
190
+ protected:
191
+ virtual void set_symints(std::vector<c10::SymInt>) override;
192
+ virtual void set_tensors(std::vector<at::Tensor>) override;
193
+
194
+ private:
195
+ at::Tensor offsets;
196
+ at::Tensor dummy;
197
+ c10::optional<at::Tensor> lengths;
198
+ int64_t ragged_idx;
199
+ };
200
+
201
+ #define _RESHAPE_ALIAS_VIEW_FUNC_AVAILABLE
202
+ struct _ReshapeAliasViewFunc : public torch::autograd::ViewFunc {
203
+ _ReshapeAliasViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) : size(size.vec()), stride(stride.vec())
204
+ {};
205
+ virtual ~_ReshapeAliasViewFunc() override {};
206
+ virtual std::vector<c10::SymInt> get_symints() const override;
207
+ virtual size_t num_symints() const override;
208
+ virtual std::vector<at::Tensor> get_tensors() const override;
209
+ virtual size_t num_tensors() const override;
210
+ virtual at::Tensor operator()(const at::Tensor&) const override;
211
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
212
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
213
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
214
+
215
+ protected:
216
+ virtual void set_symints(std::vector<c10::SymInt>) override;
217
+ virtual void set_tensors(std::vector<at::Tensor>) override;
218
+
219
+ private:
220
+ ::std::vector<c10::SymInt> size;
221
+ ::std::vector<c10::SymInt> stride;
222
+ };
223
+
224
+ #define _TEST_AUTOGRAD_MULTIPLE_DISPATCH_VIEW_VIEW_FUNC_AVAILABLE
225
+ struct _TestAutogradMultipleDispatchViewViewFunc : public torch::autograd::ViewFunc {
226
+ _TestAutogradMultipleDispatchViewViewFunc()
227
+ {};
228
+ virtual ~_TestAutogradMultipleDispatchViewViewFunc() override {};
229
+ virtual std::vector<c10::SymInt> get_symints() const override;
230
+ virtual size_t num_symints() const override;
231
+ virtual std::vector<at::Tensor> get_tensors() const override;
232
+ virtual size_t num_tensors() const override;
233
+ virtual at::Tensor operator()(const at::Tensor&) const override;
234
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
235
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
236
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
237
+
238
+ protected:
239
+ virtual void set_symints(std::vector<c10::SymInt>) override;
240
+ virtual void set_tensors(std::vector<at::Tensor>) override;
241
+
242
+ private:
243
+
244
+ };
245
+
246
+ #define _VALUES_VIEW_FUNC_AVAILABLE
247
+ struct _ValuesViewFunc : public torch::autograd::ViewFunc {
248
+ _ValuesViewFunc()
249
+ {};
250
+ virtual ~_ValuesViewFunc() override {};
251
+ virtual std::vector<c10::SymInt> get_symints() const override;
252
+ virtual size_t num_symints() const override;
253
+ virtual std::vector<at::Tensor> get_tensors() const override;
254
+ virtual size_t num_tensors() const override;
255
+ virtual at::Tensor operator()(const at::Tensor&) const override;
256
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
257
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
258
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
259
+
260
+ protected:
261
+ virtual void set_symints(std::vector<c10::SymInt>) override;
262
+ virtual void set_tensors(std::vector<at::Tensor>) override;
263
+
264
+ private:
265
+
266
+ };
267
+
268
+ #define ALIAS_VIEW_FUNC_AVAILABLE
269
+ struct AliasViewFunc : public torch::autograd::ViewFunc {
270
+ AliasViewFunc()
271
+ {};
272
+ virtual ~AliasViewFunc() override {};
273
+ virtual std::vector<c10::SymInt> get_symints() const override;
274
+ virtual size_t num_symints() const override;
275
+ virtual std::vector<at::Tensor> get_tensors() const override;
276
+ virtual size_t num_tensors() const override;
277
+ virtual at::Tensor operator()(const at::Tensor&) const override;
278
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
279
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
280
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
281
+
282
+ protected:
283
+ virtual void set_symints(std::vector<c10::SymInt>) override;
284
+ virtual void set_tensors(std::vector<at::Tensor>) override;
285
+
286
+ private:
287
+
288
+ };
289
+
290
+ #define AS_STRIDED_VIEW_FUNC_AVAILABLE
291
+ struct AsStridedViewFunc : public torch::autograd::ViewFunc {
292
+ AsStridedViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) : size(size.vec()), stride(stride.vec()), storage_offset(storage_offset)
293
+ {};
294
+ virtual ~AsStridedViewFunc() override {};
295
+ virtual std::vector<c10::SymInt> get_symints() const override;
296
+ virtual size_t num_symints() const override;
297
+ virtual std::vector<at::Tensor> get_tensors() const override;
298
+ virtual size_t num_tensors() const override;
299
+ virtual at::Tensor operator()(const at::Tensor&) const override;
300
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
301
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
302
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
303
+
304
+ protected:
305
+ virtual void set_symints(std::vector<c10::SymInt>) override;
306
+ virtual void set_tensors(std::vector<at::Tensor>) override;
307
+
308
+ private:
309
+ ::std::vector<c10::SymInt> size;
310
+ ::std::vector<c10::SymInt> stride;
311
+ c10::optional<c10::SymInt> storage_offset;
312
+ };
313
+
314
+ #define CCOL_INDICES_VIEW_FUNC_AVAILABLE
315
+ struct CcolIndicesViewFunc : public torch::autograd::ViewFunc {
316
+ CcolIndicesViewFunc()
317
+ {};
318
+ virtual ~CcolIndicesViewFunc() override {};
319
+ virtual std::vector<c10::SymInt> get_symints() const override;
320
+ virtual size_t num_symints() const override;
321
+ virtual std::vector<at::Tensor> get_tensors() const override;
322
+ virtual size_t num_tensors() const override;
323
+ virtual at::Tensor operator()(const at::Tensor&) const override;
324
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
325
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
326
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
327
+
328
+ protected:
329
+ virtual void set_symints(std::vector<c10::SymInt>) override;
330
+ virtual void set_tensors(std::vector<at::Tensor>) override;
331
+
332
+ private:
333
+
334
+ };
335
+
336
+ #define CHUNK_VIEW_FUNC_AVAILABLE
337
+ struct ChunkViewFunc : public torch::autograd::ViewFunc {
338
+ ChunkViewFunc(int64_t chunks, int64_t dim, int64_t view_idx) : chunks(chunks), dim(dim), view_idx(view_idx)
339
+ {};
340
+ virtual ~ChunkViewFunc() override {};
341
+ virtual std::vector<c10::SymInt> get_symints() const override;
342
+ virtual size_t num_symints() const override;
343
+ virtual std::vector<at::Tensor> get_tensors() const override;
344
+ virtual size_t num_tensors() const override;
345
+ virtual at::Tensor operator()(const at::Tensor&) const override;
346
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
347
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
348
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
349
+
350
+ protected:
351
+ virtual void set_symints(std::vector<c10::SymInt>) override;
352
+ virtual void set_tensors(std::vector<at::Tensor>) override;
353
+
354
+ private:
355
+ int64_t chunks;
356
+ int64_t dim;
357
+ int64_t view_idx;
358
+ };
359
+
360
+ #define COL_INDICES_VIEW_FUNC_AVAILABLE
361
+ struct ColIndicesViewFunc : public torch::autograd::ViewFunc {
362
+ ColIndicesViewFunc()
363
+ {};
364
+ virtual ~ColIndicesViewFunc() override {};
365
+ virtual std::vector<c10::SymInt> get_symints() const override;
366
+ virtual size_t num_symints() const override;
367
+ virtual std::vector<at::Tensor> get_tensors() const override;
368
+ virtual size_t num_tensors() const override;
369
+ virtual at::Tensor operator()(const at::Tensor&) const override;
370
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
371
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
372
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
373
+
374
+ protected:
375
+ virtual void set_symints(std::vector<c10::SymInt>) override;
376
+ virtual void set_tensors(std::vector<at::Tensor>) override;
377
+
378
+ private:
379
+
380
+ };
381
+
382
+ #define CROW_INDICES_VIEW_FUNC_AVAILABLE
383
+ struct CrowIndicesViewFunc : public torch::autograd::ViewFunc {
384
+ CrowIndicesViewFunc()
385
+ {};
386
+ virtual ~CrowIndicesViewFunc() override {};
387
+ virtual std::vector<c10::SymInt> get_symints() const override;
388
+ virtual size_t num_symints() const override;
389
+ virtual std::vector<at::Tensor> get_tensors() const override;
390
+ virtual size_t num_tensors() const override;
391
+ virtual at::Tensor operator()(const at::Tensor&) const override;
392
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
393
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
394
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
395
+
396
+ protected:
397
+ virtual void set_symints(std::vector<c10::SymInt>) override;
398
+ virtual void set_tensors(std::vector<at::Tensor>) override;
399
+
400
+ private:
401
+
402
+ };
403
+
404
+ #define DIAGONAL_VIEW_FUNC_AVAILABLE
405
+ struct DiagonalViewFunc : public torch::autograd::ViewFunc {
406
+ DiagonalViewFunc(int64_t offset, int64_t dim1, int64_t dim2) : offset(offset), dim1(dim1), dim2(dim2)
407
+ {};
408
+ virtual ~DiagonalViewFunc() override {};
409
+ virtual std::vector<c10::SymInt> get_symints() const override;
410
+ virtual size_t num_symints() const override;
411
+ virtual std::vector<at::Tensor> get_tensors() const override;
412
+ virtual size_t num_tensors() const override;
413
+ virtual at::Tensor operator()(const at::Tensor&) const override;
414
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
415
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
416
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
417
+
418
+ protected:
419
+ virtual void set_symints(std::vector<c10::SymInt>) override;
420
+ virtual void set_tensors(std::vector<at::Tensor>) override;
421
+
422
+ private:
423
+ int64_t offset;
424
+ int64_t dim1;
425
+ int64_t dim2;
426
+ };
427
+
428
+ #define EXPAND_VIEW_FUNC_AVAILABLE
429
+ struct ExpandViewFunc : public torch::autograd::ViewFunc {
430
+ ExpandViewFunc(c10::SymIntArrayRef size, bool implicit) : size(size.vec()), implicit(implicit)
431
+ {};
432
+ virtual ~ExpandViewFunc() override {};
433
+ virtual std::vector<c10::SymInt> get_symints() const override;
434
+ virtual size_t num_symints() const override;
435
+ virtual std::vector<at::Tensor> get_tensors() const override;
436
+ virtual size_t num_tensors() const override;
437
+ virtual at::Tensor operator()(const at::Tensor&) const override;
438
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
439
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
440
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
441
+
442
+ protected:
443
+ virtual void set_symints(std::vector<c10::SymInt>) override;
444
+ virtual void set_tensors(std::vector<at::Tensor>) override;
445
+
446
+ private:
447
+ ::std::vector<c10::SymInt> size;
448
+ bool implicit;
449
+ };
450
+
451
+ #define INDICES_VIEW_FUNC_AVAILABLE
452
+ struct IndicesViewFunc : public torch::autograd::ViewFunc {
453
+ IndicesViewFunc()
454
+ {};
455
+ virtual ~IndicesViewFunc() override {};
456
+ virtual std::vector<c10::SymInt> get_symints() const override;
457
+ virtual size_t num_symints() const override;
458
+ virtual std::vector<at::Tensor> get_tensors() const override;
459
+ virtual size_t num_tensors() const override;
460
+ virtual at::Tensor operator()(const at::Tensor&) const override;
461
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
462
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
463
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
464
+
465
+ protected:
466
+ virtual void set_symints(std::vector<c10::SymInt>) override;
467
+ virtual void set_tensors(std::vector<at::Tensor>) override;
468
+
469
+ private:
470
+
471
+ };
472
+
473
+ #define NARROW_VIEW_FUNC_AVAILABLE
474
+ struct NarrowViewFunc : public torch::autograd::ViewFunc {
475
+ NarrowViewFunc(int64_t dim, c10::SymInt start, c10::SymInt length) : dim(dim), start(start), length(length)
476
+ {};
477
+ virtual ~NarrowViewFunc() override {};
478
+ virtual std::vector<c10::SymInt> get_symints() const override;
479
+ virtual size_t num_symints() const override;
480
+ virtual std::vector<at::Tensor> get_tensors() const override;
481
+ virtual size_t num_tensors() const override;
482
+ virtual at::Tensor operator()(const at::Tensor&) const override;
483
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
484
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
485
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
486
+
487
+ protected:
488
+ virtual void set_symints(std::vector<c10::SymInt>) override;
489
+ virtual void set_tensors(std::vector<at::Tensor>) override;
490
+
491
+ private:
492
+ int64_t dim;
493
+ c10::SymInt start;
494
+ c10::SymInt length;
495
+ };
496
+
497
+ #define PERMUTE_VIEW_FUNC_AVAILABLE
498
+ struct PermuteViewFunc : public torch::autograd::ViewFunc {
499
+ PermuteViewFunc(at::IntArrayRef dims) : dims(dims.vec())
500
+ {};
501
+ virtual ~PermuteViewFunc() override {};
502
+ virtual std::vector<c10::SymInt> get_symints() const override;
503
+ virtual size_t num_symints() const override;
504
+ virtual std::vector<at::Tensor> get_tensors() const override;
505
+ virtual size_t num_tensors() const override;
506
+ virtual at::Tensor operator()(const at::Tensor&) const override;
507
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
508
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
509
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
510
+
511
+ protected:
512
+ virtual void set_symints(std::vector<c10::SymInt>) override;
513
+ virtual void set_tensors(std::vector<at::Tensor>) override;
514
+
515
+ private:
516
+ ::std::vector<int64_t> dims;
517
+ };
518
+
519
+ #define ROW_INDICES_VIEW_FUNC_AVAILABLE
520
+ struct RowIndicesViewFunc : public torch::autograd::ViewFunc {
521
+ RowIndicesViewFunc()
522
+ {};
523
+ virtual ~RowIndicesViewFunc() override {};
524
+ virtual std::vector<c10::SymInt> get_symints() const override;
525
+ virtual size_t num_symints() const override;
526
+ virtual std::vector<at::Tensor> get_tensors() const override;
527
+ virtual size_t num_tensors() const override;
528
+ virtual at::Tensor operator()(const at::Tensor&) const override;
529
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
530
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
531
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
532
+
533
+ protected:
534
+ virtual void set_symints(std::vector<c10::SymInt>) override;
535
+ virtual void set_tensors(std::vector<at::Tensor>) override;
536
+
537
+ private:
538
+
539
+ };
540
+
541
+ #define SELECT_INT_VIEW_FUNC_AVAILABLE
542
+ struct SelectIntViewFunc : public torch::autograd::ViewFunc {
543
+ SelectIntViewFunc(int64_t dim, c10::SymInt index) : dim(dim), index(index)
544
+ {};
545
+ virtual ~SelectIntViewFunc() override {};
546
+ virtual std::vector<c10::SymInt> get_symints() const override;
547
+ virtual size_t num_symints() const override;
548
+ virtual std::vector<at::Tensor> get_tensors() const override;
549
+ virtual size_t num_tensors() const override;
550
+ virtual at::Tensor operator()(const at::Tensor&) const override;
551
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
552
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
553
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
554
+
555
+ protected:
556
+ virtual void set_symints(std::vector<c10::SymInt>) override;
557
+ virtual void set_tensors(std::vector<at::Tensor>) override;
558
+
559
+ private:
560
+ int64_t dim;
561
+ c10::SymInt index;
562
+ };
563
+
564
+ #define SLICE_TENSOR_VIEW_FUNC_AVAILABLE
565
+ struct SliceTensorViewFunc : public torch::autograd::ViewFunc {
566
+ SliceTensorViewFunc(int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) : dim(dim), start(start), end(end), step(step)
567
+ {};
568
+ virtual ~SliceTensorViewFunc() override {};
569
+ virtual std::vector<c10::SymInt> get_symints() const override;
570
+ virtual size_t num_symints() const override;
571
+ virtual std::vector<at::Tensor> get_tensors() const override;
572
+ virtual size_t num_tensors() const override;
573
+ virtual at::Tensor operator()(const at::Tensor&) const override;
574
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
575
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
576
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
577
+
578
+ protected:
579
+ virtual void set_symints(std::vector<c10::SymInt>) override;
580
+ virtual void set_tensors(std::vector<at::Tensor>) override;
581
+
582
+ private:
583
+ int64_t dim;
584
+ c10::optional<c10::SymInt> start;
585
+ c10::optional<c10::SymInt> end;
586
+ c10::SymInt step;
587
+ };
588
+
589
+ #define SLICE_INVERSE_VIEW_FUNC_AVAILABLE
590
+ struct SliceInverseViewFunc : public torch::autograd::ViewFunc {
591
+ SliceInverseViewFunc(const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) : src(src), dim(dim), start(start), end(end), step(step)
592
+ {};
593
+ virtual ~SliceInverseViewFunc() override {};
594
+ virtual std::vector<c10::SymInt> get_symints() const override;
595
+ virtual size_t num_symints() const override;
596
+ virtual std::vector<at::Tensor> get_tensors() const override;
597
+ virtual size_t num_tensors() const override;
598
+ virtual at::Tensor operator()(const at::Tensor&) const override;
599
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
600
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
601
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
602
+
603
+ protected:
604
+ virtual void set_symints(std::vector<c10::SymInt>) override;
605
+ virtual void set_tensors(std::vector<at::Tensor>) override;
606
+
607
+ private:
608
+ at::Tensor src;
609
+ int64_t dim;
610
+ c10::optional<c10::SymInt> start;
611
+ c10::optional<c10::SymInt> end;
612
+ c10::SymInt step;
613
+ };
614
+
615
+ #define SPLIT_TENSOR_VIEW_FUNC_AVAILABLE
616
+ struct SplitTensorViewFunc : public torch::autograd::ViewFunc {
617
+ SplitTensorViewFunc(c10::SymInt split_size, int64_t dim, int64_t view_idx) : split_size(split_size), dim(dim), view_idx(view_idx)
618
+ {};
619
+ virtual ~SplitTensorViewFunc() override {};
620
+ virtual std::vector<c10::SymInt> get_symints() const override;
621
+ virtual size_t num_symints() const override;
622
+ virtual std::vector<at::Tensor> get_tensors() const override;
623
+ virtual size_t num_tensors() const override;
624
+ virtual at::Tensor operator()(const at::Tensor&) const override;
625
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
626
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
627
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
628
+
629
+ protected:
630
+ virtual void set_symints(std::vector<c10::SymInt>) override;
631
+ virtual void set_tensors(std::vector<at::Tensor>) override;
632
+
633
+ private:
634
+ c10::SymInt split_size;
635
+ int64_t dim;
636
+ int64_t view_idx;
637
+ };
638
+
639
+ #define SPLIT_WITH_SIZES_VIEW_FUNC_AVAILABLE
640
+ struct SplitWithSizesViewFunc : public torch::autograd::ViewFunc {
641
+ SplitWithSizesViewFunc(c10::SymIntArrayRef split_sizes, int64_t dim, int64_t view_idx) : split_sizes(split_sizes.vec()), dim(dim), view_idx(view_idx)
642
+ {};
643
+ virtual ~SplitWithSizesViewFunc() override {};
644
+ virtual std::vector<c10::SymInt> get_symints() const override;
645
+ virtual size_t num_symints() const override;
646
+ virtual std::vector<at::Tensor> get_tensors() const override;
647
+ virtual size_t num_tensors() const override;
648
+ virtual at::Tensor operator()(const at::Tensor&) const override;
649
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
650
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
651
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
652
+
653
+ protected:
654
+ virtual void set_symints(std::vector<c10::SymInt>) override;
655
+ virtual void set_tensors(std::vector<at::Tensor>) override;
656
+
657
+ private:
658
+ ::std::vector<c10::SymInt> split_sizes;
659
+ int64_t dim;
660
+ int64_t view_idx;
661
+ };
662
+
663
+ #define SQUEEZE_VIEW_FUNC_AVAILABLE
664
+ struct SqueezeViewFunc : public torch::autograd::ViewFunc {
665
+ SqueezeViewFunc()
666
+ {};
667
+ virtual ~SqueezeViewFunc() override {};
668
+ virtual std::vector<c10::SymInt> get_symints() const override;
669
+ virtual size_t num_symints() const override;
670
+ virtual std::vector<at::Tensor> get_tensors() const override;
671
+ virtual size_t num_tensors() const override;
672
+ virtual at::Tensor operator()(const at::Tensor&) const override;
673
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
674
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
675
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
676
+
677
+ protected:
678
+ virtual void set_symints(std::vector<c10::SymInt>) override;
679
+ virtual void set_tensors(std::vector<at::Tensor>) override;
680
+
681
+ private:
682
+
683
+ };
684
+
685
+ #define SQUEEZE_DIM_VIEW_FUNC_AVAILABLE
686
+ struct SqueezeDimViewFunc : public torch::autograd::ViewFunc {
687
+ SqueezeDimViewFunc(int64_t dim) : dim(dim)
688
+ {};
689
+ virtual ~SqueezeDimViewFunc() override {};
690
+ virtual std::vector<c10::SymInt> get_symints() const override;
691
+ virtual size_t num_symints() const override;
692
+ virtual std::vector<at::Tensor> get_tensors() const override;
693
+ virtual size_t num_tensors() const override;
694
+ virtual at::Tensor operator()(const at::Tensor&) const override;
695
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
696
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
697
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
698
+
699
+ protected:
700
+ virtual void set_symints(std::vector<c10::SymInt>) override;
701
+ virtual void set_tensors(std::vector<at::Tensor>) override;
702
+
703
+ private:
704
+ int64_t dim;
705
+ };
706
+
707
+ #define SQUEEZE_DIMS_VIEW_FUNC_AVAILABLE
708
+ struct SqueezeDimsViewFunc : public torch::autograd::ViewFunc {
709
+ SqueezeDimsViewFunc(at::IntArrayRef dim) : dim(dim.vec())
710
+ {};
711
+ virtual ~SqueezeDimsViewFunc() override {};
712
+ virtual std::vector<c10::SymInt> get_symints() const override;
713
+ virtual size_t num_symints() const override;
714
+ virtual std::vector<at::Tensor> get_tensors() const override;
715
+ virtual size_t num_tensors() const override;
716
+ virtual at::Tensor operator()(const at::Tensor&) const override;
717
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
718
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
719
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
720
+
721
+ protected:
722
+ virtual void set_symints(std::vector<c10::SymInt>) override;
723
+ virtual void set_tensors(std::vector<at::Tensor>) override;
724
+
725
+ private:
726
+ ::std::vector<int64_t> dim;
727
+ };
728
+
729
+ #define T_VIEW_FUNC_AVAILABLE
730
+ struct TViewFunc : public torch::autograd::ViewFunc {
731
+ TViewFunc()
732
+ {};
733
+ virtual ~TViewFunc() override {};
734
+ virtual std::vector<c10::SymInt> get_symints() const override;
735
+ virtual size_t num_symints() const override;
736
+ virtual std::vector<at::Tensor> get_tensors() const override;
737
+ virtual size_t num_tensors() const override;
738
+ virtual at::Tensor operator()(const at::Tensor&) const override;
739
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
740
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
741
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
742
+
743
+ protected:
744
+ virtual void set_symints(std::vector<c10::SymInt>) override;
745
+ virtual void set_tensors(std::vector<at::Tensor>) override;
746
+
747
+ private:
748
+
749
+ };
750
+
751
+ #define TRANSPOSE_INT_VIEW_FUNC_AVAILABLE
752
+ struct TransposeIntViewFunc : public torch::autograd::ViewFunc {
753
+ TransposeIntViewFunc(int64_t dim0, int64_t dim1) : dim0(dim0), dim1(dim1)
754
+ {};
755
+ virtual ~TransposeIntViewFunc() override {};
756
+ virtual std::vector<c10::SymInt> get_symints() const override;
757
+ virtual size_t num_symints() const override;
758
+ virtual std::vector<at::Tensor> get_tensors() const override;
759
+ virtual size_t num_tensors() const override;
760
+ virtual at::Tensor operator()(const at::Tensor&) const override;
761
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
762
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
763
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
764
+
765
+ protected:
766
+ virtual void set_symints(std::vector<c10::SymInt>) override;
767
+ virtual void set_tensors(std::vector<at::Tensor>) override;
768
+
769
+ private:
770
+ int64_t dim0;
771
+ int64_t dim1;
772
+ };
773
+
774
+ #define UNBIND_INT_VIEW_FUNC_AVAILABLE
775
+ struct UnbindIntViewFunc : public torch::autograd::ViewFunc {
776
+ UnbindIntViewFunc(int64_t dim, int64_t view_idx) : dim(dim), view_idx(view_idx)
777
+ {};
778
+ virtual ~UnbindIntViewFunc() override {};
779
+ virtual std::vector<c10::SymInt> get_symints() const override;
780
+ virtual size_t num_symints() const override;
781
+ virtual std::vector<at::Tensor> get_tensors() const override;
782
+ virtual size_t num_tensors() const override;
783
+ virtual at::Tensor operator()(const at::Tensor&) const override;
784
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
785
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
786
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
787
+
788
+ protected:
789
+ virtual void set_symints(std::vector<c10::SymInt>) override;
790
+ virtual void set_tensors(std::vector<at::Tensor>) override;
791
+
792
+ private:
793
+ int64_t dim;
794
+ int64_t view_idx;
795
+ };
796
+
797
+ #define UNFOLD_VIEW_FUNC_AVAILABLE
798
+ struct UnfoldViewFunc : public torch::autograd::ViewFunc {
799
+ UnfoldViewFunc(int64_t dimension, int64_t size, int64_t step) : dimension(dimension), size(size), step(step)
800
+ {};
801
+ virtual ~UnfoldViewFunc() override {};
802
+ virtual std::vector<c10::SymInt> get_symints() const override;
803
+ virtual size_t num_symints() const override;
804
+ virtual std::vector<at::Tensor> get_tensors() const override;
805
+ virtual size_t num_tensors() const override;
806
+ virtual at::Tensor operator()(const at::Tensor&) const override;
807
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
808
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
809
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
810
+
811
+ protected:
812
+ virtual void set_symints(std::vector<c10::SymInt>) override;
813
+ virtual void set_tensors(std::vector<at::Tensor>) override;
814
+
815
+ private:
816
+ int64_t dimension;
817
+ int64_t size;
818
+ int64_t step;
819
+ };
820
+
821
+ #define UNSQUEEZE_VIEW_FUNC_AVAILABLE
822
+ struct UnsqueezeViewFunc : public torch::autograd::ViewFunc {
823
+ UnsqueezeViewFunc(int64_t dim) : dim(dim)
824
+ {};
825
+ virtual ~UnsqueezeViewFunc() override {};
826
+ virtual std::vector<c10::SymInt> get_symints() const override;
827
+ virtual size_t num_symints() const override;
828
+ virtual std::vector<at::Tensor> get_tensors() const override;
829
+ virtual size_t num_tensors() const override;
830
+ virtual at::Tensor operator()(const at::Tensor&) const override;
831
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
832
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
833
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
834
+
835
+ protected:
836
+ virtual void set_symints(std::vector<c10::SymInt>) override;
837
+ virtual void set_tensors(std::vector<at::Tensor>) override;
838
+
839
+ private:
840
+ int64_t dim;
841
+ };
842
+
843
+ #define VALUES_VIEW_FUNC_AVAILABLE
844
+ struct ValuesViewFunc : public torch::autograd::ViewFunc {
845
+ ValuesViewFunc()
846
+ {};
847
+ virtual ~ValuesViewFunc() override {};
848
+ virtual std::vector<c10::SymInt> get_symints() const override;
849
+ virtual size_t num_symints() const override;
850
+ virtual std::vector<at::Tensor> get_tensors() const override;
851
+ virtual size_t num_tensors() const override;
852
+ virtual at::Tensor operator()(const at::Tensor&) const override;
853
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
854
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
855
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
856
+
857
+ protected:
858
+ virtual void set_symints(std::vector<c10::SymInt>) override;
859
+ virtual void set_tensors(std::vector<at::Tensor>) override;
860
+
861
+ private:
862
+
863
+ };
864
+
865
+ #define VIEW_VIEW_FUNC_AVAILABLE
866
+ struct ViewViewFunc : public torch::autograd::ViewFunc {
867
+ ViewViewFunc(c10::SymIntArrayRef size) : size(size.vec())
868
+ {};
869
+ virtual ~ViewViewFunc() override {};
870
+ virtual std::vector<c10::SymInt> get_symints() const override;
871
+ virtual size_t num_symints() const override;
872
+ virtual std::vector<at::Tensor> get_tensors() const override;
873
+ virtual size_t num_tensors() const override;
874
+ virtual at::Tensor operator()(const at::Tensor&) const override;
875
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
876
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
877
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
878
+
879
+ protected:
880
+ virtual void set_symints(std::vector<c10::SymInt>) override;
881
+ virtual void set_tensors(std::vector<at::Tensor>) override;
882
+
883
+ private:
884
+ ::std::vector<c10::SymInt> size;
885
+ };
886
+
887
+ #define VIEW_DTYPE_VIEW_FUNC_AVAILABLE
888
+ struct ViewDtypeViewFunc : public torch::autograd::ViewFunc {
889
+ ViewDtypeViewFunc(at::ScalarType dtype) : dtype(dtype)
890
+ {};
891
+ virtual ~ViewDtypeViewFunc() override {};
892
+ virtual std::vector<c10::SymInt> get_symints() const override;
893
+ virtual size_t num_symints() const override;
894
+ virtual std::vector<at::Tensor> get_tensors() const override;
895
+ virtual size_t num_tensors() const override;
896
+ virtual at::Tensor operator()(const at::Tensor&) const override;
897
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
898
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
899
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
900
+
901
+ protected:
902
+ virtual void set_symints(std::vector<c10::SymInt>) override;
903
+ virtual void set_tensors(std::vector<at::Tensor>) override;
904
+
905
+ private:
906
+ at::ScalarType dtype;
907
+ };
908
+
909
+ #define VIEW_AS_COMPLEX_VIEW_FUNC_AVAILABLE
910
+ struct ViewAsComplexViewFunc : public torch::autograd::ViewFunc {
911
+ ViewAsComplexViewFunc()
912
+ {};
913
+ virtual ~ViewAsComplexViewFunc() override {};
914
+ virtual std::vector<c10::SymInt> get_symints() const override;
915
+ virtual size_t num_symints() const override;
916
+ virtual std::vector<at::Tensor> get_tensors() const override;
917
+ virtual size_t num_tensors() const override;
918
+ virtual at::Tensor operator()(const at::Tensor&) const override;
919
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
920
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
921
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
922
+
923
+ protected:
924
+ virtual void set_symints(std::vector<c10::SymInt>) override;
925
+ virtual void set_tensors(std::vector<at::Tensor>) override;
926
+
927
+ private:
928
+
929
+ };
930
+
931
+ #define VIEW_AS_REAL_VIEW_FUNC_AVAILABLE
932
+ struct ViewAsRealViewFunc : public torch::autograd::ViewFunc {
933
+ ViewAsRealViewFunc()
934
+ {};
935
+ virtual ~ViewAsRealViewFunc() override {};
936
+ virtual std::vector<c10::SymInt> get_symints() const override;
937
+ virtual size_t num_symints() const override;
938
+ virtual std::vector<at::Tensor> get_tensors() const override;
939
+ virtual size_t num_tensors() const override;
940
+ virtual at::Tensor operator()(const at::Tensor&) const override;
941
+ virtual std::unique_ptr<ViewFunc> clone_and_set(
942
+ std::optional<std::vector<c10::SymInt>> = c10::nullopt,
943
+ std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
944
+
945
+ protected:
946
+ virtual void set_symints(std::vector<c10::SymInt>) override;
947
+ virtual void set_tensors(std::vector<at::Tensor>) override;
948
+
949
+ private:
950
+
951
+ };
952
+
953
+ } // namespace torch::autograd::generated
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <Python.h>
4
+
5
+ // @generated from ../tools/autograd/templates/python_functions.h
6
+
7
+ // Python bindings for automatically generated autograd functions
8
+
9
+ namespace torch { namespace autograd { namespace generated {
10
+
11
+ void initialize_autogenerated_functions_0(PyObject* module);
12
+ void initialize_autogenerated_functions_1(PyObject* module);
13
+ void initialize_autogenerated_functions_2(PyObject* module);
14
+ void initialize_autogenerated_functions_3(PyObject* module);
15
+ void initialize_autogenerated_functions_4(PyObject* module);
16
+
17
+ inline void initialize_autogenerated_functions(PyObject* module) {
18
+ initialize_autogenerated_functions_0(module);
19
+ initialize_autogenerated_functions_1(module);
20
+ initialize_autogenerated_functions_2(module);
21
+ initialize_autogenerated_functions_3(module);
22
+ initialize_autogenerated_functions_4(module);
23
+ }
24
+
25
+ }}} // namespace torch::autograd::generated
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch {
4
+ namespace autograd {
5
+ namespace generated {
6
+
7
+ PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_structseq();
8
+ PyTypeObject* get__fused_moving_avg_obs_fq_helper_structseq();
9
+ PyTypeObject* get__linalg_det_structseq();
10
+ PyTypeObject* get__linalg_det_out_structseq();
11
+ PyTypeObject* get__linalg_eigh_structseq();
12
+ PyTypeObject* get__linalg_eigh_out_structseq();
13
+ PyTypeObject* get__linalg_slogdet_structseq();
14
+ PyTypeObject* get__linalg_slogdet_out_structseq();
15
+ PyTypeObject* get__linalg_solve_ex_structseq();
16
+ PyTypeObject* get__linalg_solve_ex_out_structseq();
17
+ PyTypeObject* get__linalg_svd_structseq();
18
+ PyTypeObject* get__linalg_svd_out_structseq();
19
+ PyTypeObject* get__lu_with_info_structseq();
20
+ PyTypeObject* get__scaled_dot_product_cudnn_attention_structseq();
21
+ PyTypeObject* get__scaled_dot_product_efficient_attention_structseq();
22
+ PyTypeObject* get__scaled_dot_product_flash_attention_structseq();
23
+ PyTypeObject* get__scaled_dot_product_flash_attention_for_cpu_structseq();
24
+ PyTypeObject* get__unpack_dual_structseq();
25
+ PyTypeObject* get_aminmax_structseq();
26
+ PyTypeObject* get_aminmax_out_structseq();
27
+ PyTypeObject* get_cummax_structseq();
28
+ PyTypeObject* get_cummax_out_structseq();
29
+ PyTypeObject* get_cummin_structseq();
30
+ PyTypeObject* get_cummin_out_structseq();
31
+ PyTypeObject* get_frexp_structseq();
32
+ PyTypeObject* get_frexp_out_structseq();
33
+ PyTypeObject* get_geqrf_out_structseq();
34
+ PyTypeObject* get_geqrf_structseq();
35
+ PyTypeObject* get_histogram_out_structseq();
36
+ PyTypeObject* get_histogram_structseq();
37
+ PyTypeObject* get_histogramdd_structseq();
38
+ PyTypeObject* get_kthvalue_structseq();
39
+ PyTypeObject* get_kthvalue_out_structseq();
40
+ PyTypeObject* get_linalg_cholesky_ex_structseq();
41
+ PyTypeObject* get_linalg_cholesky_ex_out_structseq();
42
+ PyTypeObject* get_linalg_eig_structseq();
43
+ PyTypeObject* get_linalg_eig_out_structseq();
44
+ PyTypeObject* get_linalg_eigh_structseq();
45
+ PyTypeObject* get_linalg_eigh_out_structseq();
46
+ PyTypeObject* get_linalg_inv_ex_structseq();
47
+ PyTypeObject* get_linalg_inv_ex_out_structseq();
48
+ PyTypeObject* get_linalg_ldl_factor_structseq();
49
+ PyTypeObject* get_linalg_ldl_factor_out_structseq();
50
+ PyTypeObject* get_linalg_ldl_factor_ex_structseq();
51
+ PyTypeObject* get_linalg_ldl_factor_ex_out_structseq();
52
+ PyTypeObject* get_linalg_lstsq_structseq();
53
+ PyTypeObject* get_linalg_lstsq_out_structseq();
54
+ PyTypeObject* get_linalg_lu_structseq();
55
+ PyTypeObject* get_linalg_lu_out_structseq();
56
+ PyTypeObject* get_linalg_lu_factor_structseq();
57
+ PyTypeObject* get_linalg_lu_factor_out_structseq();
58
+ PyTypeObject* get_linalg_lu_factor_ex_structseq();
59
+ PyTypeObject* get_linalg_lu_factor_ex_out_structseq();
60
+ PyTypeObject* get_linalg_qr_structseq();
61
+ PyTypeObject* get_linalg_qr_out_structseq();
62
+ PyTypeObject* get_linalg_slogdet_structseq();
63
+ PyTypeObject* get_linalg_slogdet_out_structseq();
64
+ PyTypeObject* get_linalg_solve_ex_structseq();
65
+ PyTypeObject* get_linalg_solve_ex_out_structseq();
66
+ PyTypeObject* get_linalg_svd_structseq();
67
+ PyTypeObject* get_linalg_svd_out_structseq();
68
+ PyTypeObject* get_lu_unpack_structseq();
69
+ PyTypeObject* get_lu_unpack_out_structseq();
70
+ PyTypeObject* get_max_structseq();
71
+ PyTypeObject* get_max_out_structseq();
72
+ PyTypeObject* get_median_structseq();
73
+ PyTypeObject* get_median_out_structseq();
74
+ PyTypeObject* get_min_structseq();
75
+ PyTypeObject* get_min_out_structseq();
76
+ PyTypeObject* get_mode_structseq();
77
+ PyTypeObject* get_mode_out_structseq();
78
+ PyTypeObject* get_nanmedian_structseq();
79
+ PyTypeObject* get_nanmedian_out_structseq();
80
+ PyTypeObject* get_qr_out_structseq();
81
+ PyTypeObject* get_qr_structseq();
82
+ PyTypeObject* get_slogdet_structseq();
83
+ PyTypeObject* get_slogdet_out_structseq();
84
+ PyTypeObject* get_sort_out_structseq();
85
+ PyTypeObject* get_sort_structseq();
86
+ PyTypeObject* get_svd_out_structseq();
87
+ PyTypeObject* get_svd_structseq();
88
+ PyTypeObject* get_topk_out_structseq();
89
+ PyTypeObject* get_topk_structseq();
90
+ PyTypeObject* get_triangular_solve_out_structseq();
91
+ PyTypeObject* get_triangular_solve_structseq();
92
+
93
+ }
94
+
95
+ void initReturnTypes(PyObject* module);
96
+
97
+ } // namespace autograd
98
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated from ../tools/autograd/templates/variable_factories.h
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/TracerMode.h>
7
+ #include <ATen/core/grad_mode.h>
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <c10/core/MemoryFormat.h>
10
+ #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
11
+ #include <torch/csrc/autograd/variable.h>
12
+
13
+ #ifndef AT_PER_OPERATOR_HEADERS
14
+ #include <ATen/Functions.h>
15
+ #else
16
+ #include <ATen/ops/from_blob.h>
17
+ #include <ATen/ops/_make_dep_token.h>
18
+ #include <ATen/ops/_cudnn_init_dropout_state.h>
19
+ #include <ATen/ops/arange.h>
20
+ #include <ATen/ops/arange.h>
21
+ #include <ATen/ops/arange.h>
22
+ #include <ATen/ops/bartlett_window.h>
23
+ #include <ATen/ops/bartlett_window.h>
24
+ #include <ATen/ops/blackman_window.h>
25
+ #include <ATen/ops/blackman_window.h>
26
+ #include <ATen/ops/empty.h>
27
+ #include <ATen/ops/empty.h>
28
+ #include <ATen/ops/empty_permuted.h>
29
+ #include <ATen/ops/_empty_affine_quantized.h>
30
+ #include <ATen/ops/_empty_per_channel_affine_quantized.h>
31
+ #include <ATen/ops/empty_quantized.h>
32
+ #include <ATen/ops/empty_like.h>
33
+ #include <ATen/ops/empty_strided.h>
34
+ #include <ATen/ops/eye.h>
35
+ #include <ATen/ops/eye.h>
36
+ #include <ATen/ops/full.h>
37
+ #include <ATen/ops/full.h>
38
+ #include <ATen/ops/full_like.h>
39
+ #include <ATen/ops/from_file.h>
40
+ #include <ATen/ops/hann_window.h>
41
+ #include <ATen/ops/hann_window.h>
42
+ #include <ATen/ops/hamming_window.h>
43
+ #include <ATen/ops/hamming_window.h>
44
+ #include <ATen/ops/hamming_window.h>
45
+ #include <ATen/ops/hamming_window.h>
46
+ #include <ATen/ops/kaiser_window.h>
47
+ #include <ATen/ops/kaiser_window.h>
48
+ #include <ATen/ops/kaiser_window.h>
49
+ #include <ATen/ops/linspace.h>
50
+ #include <ATen/ops/linspace.h>
51
+ #include <ATen/ops/linspace.h>
52
+ #include <ATen/ops/linspace.h>
53
+ #include <ATen/ops/logspace.h>
54
+ #include <ATen/ops/logspace.h>
55
+ #include <ATen/ops/logspace.h>
56
+ #include <ATen/ops/logspace.h>
57
+ #include <ATen/ops/ones.h>
58
+ #include <ATen/ops/ones.h>
59
+ #include <ATen/ops/ones_like.h>
60
+ #include <ATen/ops/scalar_tensor.h>
61
+ #include <ATen/ops/rand.h>
62
+ #include <ATen/ops/rand.h>
63
+ #include <ATen/ops/rand.h>
64
+ #include <ATen/ops/rand.h>
65
+ #include <ATen/ops/rand_like.h>
66
+ #include <ATen/ops/randint.h>
67
+ #include <ATen/ops/randint.h>
68
+ #include <ATen/ops/randint.h>
69
+ #include <ATen/ops/randint.h>
70
+ #include <ATen/ops/randint_like.h>
71
+ #include <ATen/ops/randint_like.h>
72
+ #include <ATen/ops/randn.h>
73
+ #include <ATen/ops/randn.h>
74
+ #include <ATen/ops/randn.h>
75
+ #include <ATen/ops/randn.h>
76
+ #include <ATen/ops/randn_like.h>
77
+ #include <ATen/ops/randperm.h>
78
+ #include <ATen/ops/randperm.h>
79
+ #include <ATen/ops/range.h>
80
+ #include <ATen/ops/range.h>
81
+ #include <ATen/ops/zeros.h>
82
+ #include <ATen/ops/_efficientzerotensor.h>
83
+ #include <ATen/ops/zeros.h>
84
+ #include <ATen/ops/zeros_like.h>
85
+ #include <ATen/ops/sparse_compressed_tensor.h>
86
+ #include <ATen/ops/sparse_csr_tensor.h>
87
+ #include <ATen/ops/sparse_csc_tensor.h>
88
+ #include <ATen/ops/sparse_bsr_tensor.h>
89
+ #include <ATen/ops/sparse_bsc_tensor.h>
90
+ #include <ATen/ops/sparse_compressed_tensor.h>
91
+ #include <ATen/ops/sparse_csr_tensor.h>
92
+ #include <ATen/ops/sparse_csc_tensor.h>
93
+ #include <ATen/ops/sparse_bsr_tensor.h>
94
+ #include <ATen/ops/sparse_bsc_tensor.h>
95
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
96
+ #include <ATen/ops/_sparse_csr_tensor_unsafe.h>
97
+ #include <ATen/ops/_sparse_csc_tensor_unsafe.h>
98
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
99
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
100
+ #include <ATen/ops/sparse_coo_tensor.h>
101
+ #include <ATen/ops/sparse_coo_tensor.h>
102
+ #include <ATen/ops/sparse_coo_tensor.h>
103
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
104
+ #include <ATen/ops/_sparse_coo_tensor_with_dims.h>
105
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
106
+ #include <ATen/ops/_to_copy.h>
107
+ #include <ATen/ops/tril_indices.h>
108
+ #include <ATen/ops/triu_indices.h>
109
+ #include <ATen/ops/normal.h>
110
+ #include <ATen/ops/fft_fftfreq.h>
111
+ #include <ATen/ops/fft_rfftfreq.h>
112
+ #endif
113
+
114
+ #include <functional>
115
+ #include <initializer_list>
116
+ #include <utility>
117
+
118
+ namespace torch {
119
+
120
+ /// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
121
+ /// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
122
+ /// support it in the future by iterating over all sub-lists to find
123
+ /// the largest data type that can represent all of the elements, or by using
124
+ /// variadic templates.
125
+ ///
126
+ /// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
127
+ /// (nested) braced-init-list of floating-point types always produces a tensor of dtype
128
+ /// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
129
+ ///
130
+ /// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
131
+ /// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
132
+ /// (aka. int64_t), matching Python `torch.tensor` behavior.
133
+ ///
134
+ /// NOTE: The following dtypes are not supported by `torch::tensor` currently:
135
+ /// - `unsigned int`
136
+ /// - `unsigned long int`
137
+ /// - `unsigned long long int`
138
+ /// - `long long int`
139
+ inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
140
+ return autograd::make_variable(
141
+ // note: we remove the requires_grad setting from the TensorOptions because
142
+ // it is ignored anyways (and we actually have an assertion that it isn't set
143
+ // which would fail otherwise). We handle requires_grad explicitly here
144
+ // instead of passing it through to the kernel.
145
+ tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)),
146
+ options.requires_grad());
147
+ }
148
+
149
+ /// A generic deleter function.
150
+ using Deleter = std::function<void(void*)>;
151
+ using at::MemoryFormat;
152
+
153
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
154
+ /// original data. `sizes` should specify the shape of the tensor, `strides` the
155
+ /// stride in each dimension. The `deleter` function (a
156
+ /// `std::function<void(void*)>`) will be called on the `data` when the Tensor
157
+ /// data would normally be deallocated. The `TensorOptions` specify additional
158
+ /// configuration options for the returned tensor, such as what type to
159
+ /// interpret the `data` as.
160
+ inline at::Tensor from_blob(
161
+ void* data,
162
+ at::IntArrayRef sizes,
163
+ at::IntArrayRef strides,
164
+ const Deleter& deleter,
165
+ const at::TensorOptions& options = at::TensorOptions()) {
166
+ at::Tensor tensor = ([&]() {
167
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
168
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
169
+ return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt));
170
+ })();
171
+ return autograd::make_variable(tensor, options.requires_grad());
172
+ }
173
+
174
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
175
+ /// original data. `sizes` should specify the shape of the tensor, `strides` the
176
+ /// stride in each dimension. The `TensorOptions`
177
+ /// specify additional configuration options for the returned tensor, such as
178
+ /// what type to interpret the `data` as.
179
+ inline at::Tensor from_blob(
180
+ void* data,
181
+ at::IntArrayRef sizes,
182
+ at::IntArrayRef strides,
183
+ const at::TensorOptions& options = at::TensorOptions()) {
184
+ at::Tensor tensor = ([&]() {
185
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
186
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
187
+ return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt));
188
+ })();
189
+ return autograd::make_variable(tensor, options.requires_grad());
190
+ }
191
+
192
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
193
+ /// original data. `sizes` should specify the shape of the tensor. The `deleter`
194
+ /// (a `std::function<void(void*)>`) function will be called on the `data` when
195
+ /// the Tensor data would normally be deallocated. The `TensorOptions` specify
196
+ /// additional configuration options for the returned tensor, such as what type
197
+ /// to interpret the `data` as.
198
+ inline at::Tensor from_blob(
199
+ void* data,
200
+ at::IntArrayRef sizes,
201
+ const Deleter& deleter,
202
+ const at::TensorOptions& options = at::TensorOptions()) {
203
+ at::Tensor tensor = ([&]() {
204
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
205
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
206
+ return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt));
207
+ })();
208
+ return autograd::make_variable(tensor, options.requires_grad());
209
+ }
210
+
211
+ /// Exposes the given `data` as a `Tensor` without taking ownership of the
212
+ /// original data. `sizes` should specify the shape of the tensor. The
213
+ /// `TensorOptions` specify additional configuration options for the returned
214
+ /// tensor, such as what type to interpret the `data` as.
215
+ inline at::Tensor from_blob(
216
+ void* data,
217
+ at::IntArrayRef sizes,
218
+ const at::TensorOptions& options = at::TensorOptions()) {
219
+ at::Tensor tensor = ([&]() {
220
+ at::AutoDispatchBelowAutograd guard; // TODO: remove
221
+ at::tracer::impl::NoTracerDispatchMode tracer_guard;
222
+ return at::from_blob(data, sizes, options.requires_grad(c10::nullopt));
223
+ })();
224
+ return autograd::make_variable(tensor, options.requires_grad());
225
+ }
226
+
227
+ inline at::Tensor _make_dep_token(at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
228
+ at::AutoDispatchBelowADInplaceOrView guard;
229
+ return autograd::make_variable(at::_make_dep_token(at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
230
+ }
231
+ inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
232
+ at::AutoDispatchBelowADInplaceOrView guard;
233
+ return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
234
+ }
235
+ inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) {
236
+ at::AutoDispatchBelowADInplaceOrView guard;
237
+ return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
238
+ }
239
+ inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
240
+ at::AutoDispatchBelowADInplaceOrView guard;
241
+ return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
242
+ }
243
+ inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) {
244
+ at::AutoDispatchBelowADInplaceOrView guard;
245
+ return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
246
+ }
247
+ inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) {
248
+ at::AutoDispatchBelowADInplaceOrView guard;
249
+ return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
250
+ }
251
+ inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
252
+ at::AutoDispatchBelowADInplaceOrView guard;
253
+ return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
254
+ }
255
+ inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) {
256
+ at::AutoDispatchBelowADInplaceOrView guard;
257
+ return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
258
+ }
259
+ inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
260
+ at::AutoDispatchBelowADInplaceOrView guard;
261
+ return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
262
+ }
263
+ inline at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
264
+ at::AutoDispatchBelowADInplaceOrView guard;
265
+ return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
266
+ }
267
+ inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
268
+ at::AutoDispatchBelowADInplaceOrView guard;
269
+ return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
270
+ }
271
+ inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
272
+ at::AutoDispatchBelowADInplaceOrView guard;
273
+ return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
274
+ }
275
+ inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
276
+ at::AutoDispatchBelowADInplaceOrView guard;
277
+ return autograd::make_variable(at::empty_permuted(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
278
+ }
279
+ inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
280
+ at::AutoDispatchBelowADInplaceOrView guard;
281
+ return autograd::make_variable(at::empty_permuted_symint(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
282
+ }
283
+ inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
284
+ at::AutoDispatchBelowADInplaceOrView guard;
285
+ return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
286
+ }
287
+ inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
288
+ at::AutoDispatchBelowADInplaceOrView guard;
289
+ return autograd::make_variable(at::_empty_affine_quantized_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
290
+ }
291
+ inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
292
+ at::AutoDispatchBelowADInplaceOrView guard;
293
+ return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
294
+ }
295
+ inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
296
+ at::AutoDispatchBelowADInplaceOrView guard;
297
+ return autograd::make_variable(at::_empty_per_channel_affine_quantized_symint(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
298
+ }
299
+ inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
300
+ at::AutoDispatchBelowADInplaceOrView guard;
301
+ return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
302
+ }
303
+ inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
304
+ at::AutoDispatchBelowADInplaceOrView guard;
305
+ return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
306
+ }
307
+ inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) {
308
+ at::AutoDispatchBelowADInplaceOrView guard;
309
+ return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
310
+ }
311
+ inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) {
312
+ at::AutoDispatchBelowADInplaceOrView guard;
313
+ return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
314
+ }
315
+ inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) {
316
+ at::AutoDispatchBelowADInplaceOrView guard;
317
+ return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
318
+ }
319
+ inline at::Tensor eye_symint(c10::SymInt n, at::TensorOptions options = {}) {
320
+ at::AutoDispatchBelowADInplaceOrView guard;
321
+ return autograd::make_variable(at::eye_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
322
+ }
323
+ inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) {
324
+ at::AutoDispatchBelowADInplaceOrView guard;
325
+ return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
326
+ }
327
+ inline at::Tensor eye_symint(c10::SymInt n, c10::SymInt m, at::TensorOptions options = {}) {
328
+ at::AutoDispatchBelowADInplaceOrView guard;
329
+ return autograd::make_variable(at::eye_symint(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
330
+ }
331
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
332
+ at::AutoDispatchBelowADInplaceOrView guard;
333
+ return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
334
+ }
335
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
336
+ at::AutoDispatchBelowADInplaceOrView guard;
337
+ return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
338
+ }
339
+ inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
340
+ at::AutoDispatchBelowADInplaceOrView guard;
341
+ return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
342
+ }
343
+ inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
344
+ at::AutoDispatchBelowADInplaceOrView guard;
345
+ return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
346
+ }
347
+ inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared = c10::nullopt, c10::optional<int64_t> size = 0, at::TensorOptions options = {}) {
348
+ at::AutoDispatchBelowADInplaceOrView guard;
349
+ return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
350
+ }
351
+ inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) {
352
+ at::AutoDispatchBelowADInplaceOrView guard;
353
+ return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
354
+ }
355
+ inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
356
+ at::AutoDispatchBelowADInplaceOrView guard;
357
+ return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
358
+ }
359
+ inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) {
360
+ at::AutoDispatchBelowADInplaceOrView guard;
361
+ return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
362
+ }
363
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
364
+ at::AutoDispatchBelowADInplaceOrView guard;
365
+ return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
366
+ }
367
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) {
368
+ at::AutoDispatchBelowADInplaceOrView guard;
369
+ return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
370
+ }
371
+ inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) {
372
+ at::AutoDispatchBelowADInplaceOrView guard;
373
+ return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
374
+ }
375
+ inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) {
376
+ at::AutoDispatchBelowADInplaceOrView guard;
377
+ return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
378
+ }
379
+ inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
380
+ at::AutoDispatchBelowADInplaceOrView guard;
381
+ return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
382
+ }
383
+ inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) {
384
+ at::AutoDispatchBelowADInplaceOrView guard;
385
+ return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
386
+ }
387
+ inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
388
+ at::AutoDispatchBelowADInplaceOrView guard;
389
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
390
+ }
391
+ inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
392
+ at::AutoDispatchBelowADInplaceOrView guard;
393
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
394
+ }
395
+ inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
396
+ at::AutoDispatchBelowADInplaceOrView guard;
397
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
398
+ }
399
+ inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
400
+ at::AutoDispatchBelowADInplaceOrView guard;
401
+ return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
402
+ }
403
+ inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
404
+ at::AutoDispatchBelowADInplaceOrView guard;
405
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
406
+ }
407
+ inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
408
+ at::AutoDispatchBelowADInplaceOrView guard;
409
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
410
+ }
411
+ inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
412
+ at::AutoDispatchBelowADInplaceOrView guard;
413
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
414
+ }
415
+ inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
416
+ at::AutoDispatchBelowADInplaceOrView guard;
417
+ return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
418
+ }
419
+ inline at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
420
+ at::AutoDispatchBelowADInplaceOrView guard;
421
+ return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
422
+ }
423
+ inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) {
424
+ at::AutoDispatchBelowADInplaceOrView guard;
425
+ return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
426
+ }
427
+ inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
428
+ at::AutoDispatchBelowADInplaceOrView guard;
429
+ return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
430
+ }
431
+ inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
432
+ at::AutoDispatchBelowADInplaceOrView guard;
433
+ return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
434
+ }
435
+ inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) {
436
+ at::AutoDispatchBelowADInplaceOrView guard;
437
+ return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
438
+ }
439
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
440
+ at::AutoDispatchBelowADInplaceOrView guard;
441
+ return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
442
+ }
443
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
444
+ at::AutoDispatchBelowADInplaceOrView guard;
445
+ return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
446
+ }
447
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
448
+ at::AutoDispatchBelowADInplaceOrView guard;
449
+ return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
450
+ }
451
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
452
+ at::AutoDispatchBelowADInplaceOrView guard;
453
+ return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
454
+ }
455
+ inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) {
456
+ at::AutoDispatchBelowADInplaceOrView guard;
457
+ return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
458
+ }
459
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
460
+ at::AutoDispatchBelowADInplaceOrView guard;
461
+ return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
462
+ }
463
+ inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
464
+ at::AutoDispatchBelowADInplaceOrView guard;
465
+ return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
466
+ }
467
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
468
+ at::AutoDispatchBelowADInplaceOrView guard;
469
+ return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
470
+ }
471
+ inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
472
+ at::AutoDispatchBelowADInplaceOrView guard;
473
+ return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
474
+ }
475
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
476
+ at::AutoDispatchBelowADInplaceOrView guard;
477
+ return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
478
+ }
479
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
480
+ at::AutoDispatchBelowADInplaceOrView guard;
481
+ return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
482
+ }
483
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
484
+ at::AutoDispatchBelowADInplaceOrView guard;
485
+ return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
486
+ }
487
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
488
+ at::AutoDispatchBelowADInplaceOrView guard;
489
+ return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
490
+ }
491
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
492
+ at::AutoDispatchBelowADInplaceOrView guard;
493
+ return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
494
+ }
495
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
496
+ at::AutoDispatchBelowADInplaceOrView guard;
497
+ return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
498
+ }
499
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
500
+ at::AutoDispatchBelowADInplaceOrView guard;
501
+ return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
502
+ }
503
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
504
+ at::AutoDispatchBelowADInplaceOrView guard;
505
+ return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
506
+ }
507
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
508
+ at::AutoDispatchBelowADInplaceOrView guard;
509
+ return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
510
+ }
511
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
512
+ at::AutoDispatchBelowADInplaceOrView guard;
513
+ return autograd::make_variable(at::randint_like_symint(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
514
+ }
515
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
516
+ at::AutoDispatchBelowADInplaceOrView guard;
517
+ return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
518
+ }
519
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
520
+ at::AutoDispatchBelowADInplaceOrView guard;
521
+ return autograd::make_variable(at::randint_like_symint(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
522
+ }
523
+ inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) {
524
+ at::AutoDispatchBelowADInplaceOrView guard;
525
+ return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
526
+ }
527
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
528
+ at::AutoDispatchBelowADInplaceOrView guard;
529
+ return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
530
+ }
531
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
532
+ at::AutoDispatchBelowADInplaceOrView guard;
533
+ return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
534
+ }
535
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
536
+ at::AutoDispatchBelowADInplaceOrView guard;
537
+ return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
538
+ }
539
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
540
+ at::AutoDispatchBelowADInplaceOrView guard;
541
+ return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
542
+ }
543
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
544
+ at::AutoDispatchBelowADInplaceOrView guard;
545
+ return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
546
+ }
547
+ inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
548
+ at::AutoDispatchBelowADInplaceOrView guard;
549
+ return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
550
+ }
551
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
552
+ at::AutoDispatchBelowADInplaceOrView guard;
553
+ return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
554
+ }
555
+ inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
556
+ at::AutoDispatchBelowADInplaceOrView guard;
557
+ return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
558
+ }
559
+ inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) {
560
+ at::AutoDispatchBelowADInplaceOrView guard;
561
+ return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
562
+ }
563
+ inline at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options = at::kLong) {
564
+ at::AutoDispatchBelowADInplaceOrView guard;
565
+ return autograd::make_variable(at::randperm_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
566
+ }
567
+ inline at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
568
+ at::AutoDispatchBelowADInplaceOrView guard;
569
+ return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
570
+ }
571
+ inline at::Tensor randperm_symint(c10::SymInt n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
572
+ at::AutoDispatchBelowADInplaceOrView guard;
573
+ return autograd::make_variable(at::randperm_symint(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
574
+ }
575
+ inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) {
576
+ at::AutoDispatchBelowADInplaceOrView guard;
577
+ return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
578
+ }
579
+ inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
580
+ at::AutoDispatchBelowADInplaceOrView guard;
581
+ return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
582
+ }
583
+ inline at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
584
+ at::AutoDispatchBelowADInplaceOrView guard;
585
+ return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
586
+ }
587
+ inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) {
588
+ at::AutoDispatchBelowADInplaceOrView guard;
589
+ return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
590
+ }
591
+ inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
592
+ at::AutoDispatchBelowADInplaceOrView guard;
593
+ return autograd::make_variable(at::_efficientzerotensor_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
594
+ }
595
+ inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) {
596
+ at::AutoDispatchBelowADInplaceOrView guard;
597
+ return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
598
+ }
599
+ inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
600
+ at::AutoDispatchBelowADInplaceOrView guard;
601
+ return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
602
+ }
603
+ inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
604
+ at::AutoDispatchBelowADInplaceOrView guard;
605
+ return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
606
+ }
607
+ inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
608
+ at::AutoDispatchBelowADInplaceOrView guard;
609
+ return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
610
+ }
611
+ inline at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) {
612
+ at::AutoDispatchBelowADInplaceOrView guard;
613
+ return autograd::make_variable(at::sparse_compressed_tensor_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
614
+ }
615
+ inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
616
+ at::AutoDispatchBelowADInplaceOrView guard;
617
+ return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
618
+ }
619
+ inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
620
+ at::AutoDispatchBelowADInplaceOrView guard;
621
+ return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
622
+ }
623
+ inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
624
+ at::AutoDispatchBelowADInplaceOrView guard;
625
+ return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
626
+ }
627
+ inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
628
+ at::AutoDispatchBelowADInplaceOrView guard;
629
+ return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
630
+ }
631
+ inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
632
+ at::AutoDispatchBelowADInplaceOrView guard;
633
+ return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
634
+ }
635
+ inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
636
+ at::AutoDispatchBelowADInplaceOrView guard;
637
+ return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
638
+ }
639
+ inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
640
+ at::AutoDispatchBelowADInplaceOrView guard;
641
+ return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
642
+ }
643
+ inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
644
+ at::AutoDispatchBelowADInplaceOrView guard;
645
+ return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
646
+ }
647
+ inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
648
+ at::AutoDispatchBelowADInplaceOrView guard;
649
+ return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
650
+ }
651
+ inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
652
+ at::AutoDispatchBelowADInplaceOrView guard;
653
+ return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
654
+ }
655
+ inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}) {
656
+ at::AutoDispatchBelowADInplaceOrView guard;
657
+ return autograd::make_variable(at::_sparse_compressed_tensor_unsafe_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
658
+ }
659
+ inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
660
+ at::AutoDispatchBelowADInplaceOrView guard;
661
+ return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
662
+ }
663
+ inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
664
+ at::AutoDispatchBelowADInplaceOrView guard;
665
+ return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
666
+ }
667
+ inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
668
+ at::AutoDispatchBelowADInplaceOrView guard;
669
+ return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
670
+ }
671
+ inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
672
+ at::AutoDispatchBelowADInplaceOrView guard;
673
+ return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
674
+ }
675
+ inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
676
+ at::AutoDispatchBelowADInplaceOrView guard;
677
+ return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
678
+ }
679
+ inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
680
+ at::AutoDispatchBelowADInplaceOrView guard;
681
+ return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
682
+ }
683
+ inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
684
+ at::AutoDispatchBelowADInplaceOrView guard;
685
+ return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
686
+ }
687
+ inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
688
+ at::AutoDispatchBelowADInplaceOrView guard;
689
+ return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
690
+ }
691
+ inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
692
+ at::AutoDispatchBelowADInplaceOrView guard;
693
+ return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
694
+ }
695
+ inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
696
+ at::AutoDispatchBelowADInplaceOrView guard;
697
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
698
+ }
699
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
700
+ at::AutoDispatchBelowADInplaceOrView guard;
701
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
702
+ }
703
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
704
+ at::AutoDispatchBelowADInplaceOrView guard;
705
+ return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
706
+ }
707
+ inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
708
+ at::AutoDispatchBelowADInplaceOrView guard;
709
+ return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(c10::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad());
710
+ }
711
+ inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
712
+ at::AutoDispatchBelowADInplaceOrView guard;
713
+ return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
714
+ }
715
+ inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
716
+ at::AutoDispatchBelowADInplaceOrView guard;
717
+ return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
718
+ }
719
+ inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
720
+ at::AutoDispatchBelowADInplaceOrView guard;
721
+ return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
722
+ }
723
+ inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
724
+ at::AutoDispatchBelowADInplaceOrView guard;
725
+ return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
726
+ }
727
+ inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
728
+ at::AutoDispatchBelowADInplaceOrView guard;
729
+ return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
730
+ }
731
+ inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
732
+ at::AutoDispatchBelowADInplaceOrView guard;
733
+ return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
734
+ }
735
+
736
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/ThreadLocalState.h>
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/util/ThreadLocal.h>
5
+ #include <torch/csrc/autograd/input_buffer.h>
6
+ #include <torch/csrc/autograd/utils/warnings.h>
7
+ #include <vector>
8
+
9
+ namespace torch::autograd {
10
+
11
+ using edge_list = std::vector<Edge>;
12
+ struct ReadyQueue;
13
+
14
+ static constexpr int NO_DEVICE = -2;
15
+ static constexpr int CPU_DEVICE = -1;
16
+
17
+ // GraphTask holds metadata needed for a single execution of backward()
18
+ struct GraphTask : std::enable_shared_from_this<GraphTask> {
19
+ std::atomic<uint64_t> outstanding_tasks_{0};
20
+ // Indicates if an error occurred while executing any task. When this is
21
+ // true, it signals all threads to stop executing.
22
+ std::atomic_bool has_error_{false};
23
+ std::atomic_bool future_completed_{false};
24
+ // It is safe to read keep_graph_ without synchronization
25
+ bool keep_graph_;
26
+
27
+ // To protect reads/writes to not_ready_, dependencies_, captured_vars_,
28
+ // has_error_, future_result_, cpu_ready_queue_, and leaf_streams.
29
+ std::mutex mutex_;
30
+ std::unordered_map<Node*, InputBuffer> not_ready_;
31
+ std::unordered_map<Node*, int> dependencies_;
32
+
33
+ // Records the nodes that are in the graph
34
+ std::unordered_set<Node*> nodes_in_graph_;
35
+ c10::SmallVector<Node*, 4> graph_roots_;
36
+ // Note [Exec info]
37
+ // Exec info is created for each GraphTask, which allows filtering paths on
38
+ // the graph that are not needed. It has a bit complicated semantics. If it's
39
+ // empty, it means the task is run in a "default" mode, which means that all
40
+ // next_edges we encounter should get executed. If it's not empty, only
41
+ // functions that have an entry and this entry has needed == True should be
42
+ // executed. exec_info is only empty when the graph is executed via
43
+ // .backward() and the inputs parameter is not passed. Otherwise, when
44
+ // executed through .grad(), or when inputs arg is specified for .backward(),
45
+ // exec_info will be non-empty.
46
+ //
47
+ struct ExecInfo {
48
+ struct Capture {
49
+ Capture(const Capture&) = delete;
50
+ Capture(Capture&&) = default;
51
+
52
+ Capture(int input_idx, int output_idx)
53
+ : input_idx_(input_idx), output_idx_(output_idx) {}
54
+ int input_idx_; // within Node inputs
55
+ int output_idx_; // within the output vector of a GraphTask
56
+
57
+ // This hook will be executed after a grad is captured. The captured
58
+ // grad will be replaced by the return value of the hook.
59
+ struct GradCaptureHook {
60
+ virtual ~GradCaptureHook() = default;
61
+ virtual at::Tensor operator()(const at::Tensor& grad) = 0;
62
+ };
63
+ // NOTE [Deprecated capture hooks]
64
+ //
65
+ // The current status of capture hooks is that we continue to support
66
+ // the single usage of it by distributed in the dist_engine. If anyone
67
+ // else needs to use it for other purposes, they should file an issue.
68
+ //
69
+ // Capture hooks were originally created because there did not exist
70
+ // any way to register pre/post hooks to grad_fn in a way such that it
71
+ // would still be executed even if that is the grad_fn of a Tensor
72
+ // passed as input= of .grad. As far as I know, only dist_engine uses
73
+ // this hook.
74
+ //
75
+ // However, there are other alternatives today like tensor hooks that can
76
+ // replace the usage that originally motivated its creation. Also,
77
+ // Captures hooks are an outlier in terms of the types of hook that
78
+ // autograd offers in how it is registered and behaves, e.g. it is a hook
79
+ // registered not to the graph, but to a particular graph_task! This makes
80
+ // it a burden to maintain.
81
+ //
82
+ // It would be very nice to clean up/do a migration from pre/post
83
+ // hooks used in distributed to use tensor hooks, but for now we just
84
+ // mark this method as deprecated to prevent additional usage.
85
+ //
86
+ // If you still think you really need to capture hooks, please file an
87
+ // issue (and tag autograd).
88
+ const std::vector<std::unique_ptr<GradCaptureHook>>&
89
+ DO_NOT_USE_DEPRECATED_get_capture_hooks() const {
90
+ return hooks_;
91
+ }
92
+ // See NOTE [deprecated capture hooks]
93
+ void DO_NOT_USE_DEPRECATED_register_capture_hook(
94
+ std::unique_ptr<GradCaptureHook> hook) {
95
+ hooks_.push_back(std::move(hook));
96
+ }
97
+
98
+ private:
99
+ // The hooks will be called one by one in the order as they were added.
100
+ // The input grad of a hook will be the output of its preceding hook. The
101
+ // first hook will take the captured grad as the input. The output of the
102
+ // last hook will replace the captured grad.
103
+ std::vector<std::unique_ptr<GradCaptureHook>> hooks_;
104
+ };
105
+
106
+ bool should_execute() const {
107
+ return needed_ || captures_;
108
+ }
109
+
110
+ bool needed_ = false;
111
+ std::unique_ptr<std::vector<Capture>> captures_;
112
+ };
113
+ // exec_info_ is safe to read without synchronization
114
+ std::unordered_map<Node*, ExecInfo> exec_info_;
115
+ // Captures variables are grads captured that we return to the user. After
116
+ // execution of the GraphTask is completed, the captured_vars_ are moved
117
+ // out of the GraphTask and are no longer valid.
118
+ std::vector<Variable> captured_vars_;
119
+
120
+ // Note: this field is not ready to be used until the proper
121
+ // `thread_locals_.set_grad_mode()` call in the constructor.
122
+ at::ThreadLocalState thread_locals_ = at::ThreadLocalState();
123
+
124
+ std::unordered_set<c10::Stream> leaf_streams;
125
+
126
+ // Per-device current streams of the execute() that called this GraphTask.
127
+ // These will be synced with leaf_streams in exec_post_processing.
128
+ std::vector<c10::optional<c10::Stream>> caller_current_streams_;
129
+
130
+ // Collects caller_current_streams_ for the accelerator device.
131
+ void stash_current_streams();
132
+
133
+ void init_to_execute(
134
+ Node& graph_root,
135
+ const edge_list& outputs,
136
+ bool accumulate_grad,
137
+ uint64_t min_topo_nr);
138
+
139
+ // The value of worker_device in the thread that created this task.
140
+ // See Note [Reentrant backwards]
141
+ // Safe to read owner_ and reentrant_depth_ without synchronization
142
+ int owner_;
143
+ // The number of parent graph tasks for this graph task
144
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
145
+ const int reentrant_depth_;
146
+
147
+ bool can_checkpoint() const {
148
+ return exec_info_.empty();
149
+ }
150
+
151
+ // check if the GraphTask is completed or not
152
+ bool completed();
153
+ // mark the graph task as completed and trigger post processing
154
+ void mark_as_completed_and_run_post_processing();
155
+
156
+ // Set an appropriate exception on this graph_task which was encountered while
157
+ // running the provided function.
158
+ void set_exception(std::exception_ptr eptr, const std::shared_ptr<Node>& fn);
159
+
160
+ // Set an appropriate exception on this graph_task which was encountered while
161
+ // running the provided function. But doesn't signal completion on
162
+ // 'future_result_' right away. The user needs to explicitly mark
163
+ // 'future_result_' completed with an appropriate exception.
164
+ void set_exception_without_signal(const std::shared_ptr<Node>& fn);
165
+
166
+ // Whether or not to stop execution for this GraphTask when an error is
167
+ // encountered. When set to true, this would cause Engine::execute() to throw
168
+ // an exception as soon as the autograd engine receives an exception.
169
+ bool exit_on_error_;
170
+
171
+ // CPU threads are dedicated to processing CPU work for the backward they
172
+ // invoked. So any given graph task maintains its own cpu_ready_queue_ where
173
+ // you should send work for it to be done. We memoize the cpu_ready_queue_ per
174
+ // GraphTask so that we know which ready queue we should push to if we are on
175
+ // device thread (i.e. GPU) and but next NodeTask should be run on CPU.
176
+ std::shared_ptr<ReadyQueue> cpu_ready_queue_;
177
+
178
+ // Future representing the completion of the graph task. Notified when all
179
+ // tasks are done.
180
+ c10::intrusive_ptr<at::ivalue::Future> future_result_;
181
+
182
+ // Final callbacks installed during execution of this GraphTask
183
+ std::vector<std::function<void()>> final_callbacks_;
184
+ // To protect reads and writes to final_callbacks_. Intentionally no reusing
185
+ // mutex_ as the two are protecting different data structures.
186
+ std::mutex final_callbacks_lock_;
187
+
188
+ utils::DelayWarningHandler warning_handler_;
189
+
190
+ uint64_t id_;
191
+
192
+ GraphTask(
193
+ bool keep_graph,
194
+ bool grad_mode,
195
+ int reentrant_depth,
196
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
197
+ c10::SmallVector<Node*, 4> graph_roots,
198
+ bool exit_on_error = false);
199
+
200
+ private:
201
+ // run GraphTask post processing
202
+ void exec_post_processing();
203
+ };
204
+
205
+ // The guard that sets and restores current_graph_task.
206
+ class GraphTaskGuard {
207
+ public:
208
+ explicit GraphTaskGuard(std::shared_ptr<GraphTask> graph_task);
209
+ ~GraphTaskGuard();
210
+
211
+ void restore_current_graph_task();
212
+
213
+ private:
214
+ std::shared_ptr<GraphTask> last_graph_task_;
215
+ };
216
+
217
+ TORCH_API const std::unordered_map<Node*, GraphTask::ExecInfo>*
218
+ get_current_graph_task_exec_info();
219
+ TORCH_API const std::unordered_set<Node*>*
220
+ get_current_graph_task_nodes_in_graph();
221
+ TORCH_API bool get_current_graph_task_keep_graph();
222
+ TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
223
+ TORCH_API int get_current_graph_task_id();
224
+ void add_node_to_current_graph_task_exec_info(Node* fn);
225
+
226
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // The InputBuffer class accumulates a list of Variables for use by a
4
+ // function. It implements logic to avoid modifying the passed
5
+ // values in-place (adding an input twice will accumulate the result).
6
+ // This behaviour is needed and used only in backward graphs.
7
+
8
+ #include <utility>
9
+ #include <vector>
10
+
11
+ #include <c10/core/Stream.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <torch/csrc/autograd/variable.h>
14
+
15
+ namespace torch::autograd {
16
+
17
+ struct InputBuffer {
18
+ explicit InputBuffer(size_t size) : buffer(size) {}
19
+ InputBuffer(const InputBuffer& other) = delete;
20
+ InputBuffer(InputBuffer&& other) = default;
21
+ explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)){};
22
+ InputBuffer& operator=(InputBuffer&& other) = default;
23
+
24
+ // Accumulates the variable at a specified index.
25
+ // The optional CUDA streams determine which stream the accumulation
26
+ // is run on and how the addition is synchronized.
27
+ TORCH_API void add(
28
+ size_t pos,
29
+ Variable&& var,
30
+ const c10::optional<c10::Stream>& opt_producer_stream,
31
+ const c10::optional<c10::Stream>& opt_consumer_stream);
32
+
33
+ at::Device device() const;
34
+
35
+ Variable operator[](size_t pos) {
36
+ return buffer[pos];
37
+ }
38
+
39
+ // Returns the inputs as a list of variables. Destroys given InputBuffer.
40
+ static std::vector<Variable> variables(InputBuffer&& g);
41
+
42
+ std::vector<Variable> buffer;
43
+ };
44
+
45
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ExpandUtils.h>
4
+ #include <ATen/NestedTensorImpl.h>
5
+ #include <ATen/core/Tensor.h>
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DeviceType.h>
8
+ #include <c10/core/Stream.h>
9
+ #include <c10/core/SymIntArrayRef.h>
10
+ #include <c10/core/TensorImpl.h>
11
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
12
+ #include <c10/util/DimVector.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/SmallVector.h>
15
+
16
+ #ifndef AT_PER_OPERATOR_HEADERS
17
+ #include <ATen/Functions.h>
18
+ #else
19
+ #include <ATen/ops/zeros.h>
20
+ #endif
21
+
22
+ namespace torch::autograd {
23
+
24
+ using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
25
+ using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
26
+
27
+ /**
28
+ * Records TensorOptions, shape of the tensor, whether or not the Python
29
+ * dispatch key is set (tensor subclass), and, where applicable, the stream the
30
+ * corresponding operation took place on.
31
+ *
32
+ * If is_valid() is false, then the corresponding input is not used and may be
33
+ * an undefined tensor.
34
+ */
35
+ struct TORCH_API InputMetadata {
36
+ InputMetadata() = default;
37
+ InputMetadata(
38
+ const at::TensorOptions& options,
39
+ MetadataShape input_shape,
40
+ bool is_tensor_subclass,
41
+ bool is_nested);
42
+ InputMetadata(const at::Tensor& t);
43
+
44
+ const at::TensorOptions& options() const {
45
+ return options_;
46
+ }
47
+
48
+ caffe2::TypeMeta dtype() const {
49
+ return options_.dtype();
50
+ }
51
+
52
+ at::Device device() const {
53
+ return options_.device();
54
+ }
55
+
56
+ at::Layout layout() const {
57
+ return options_.layout();
58
+ }
59
+
60
+ c10::Stream stream() const {
61
+ return stream_;
62
+ }
63
+
64
+ bool is_tensor_subclass() const {
65
+ return is_tensor_subclass_;
66
+ }
67
+
68
+ at::Tensor zeros_like() const;
69
+
70
+ bool is_same_shape(const at::Tensor& grad) const;
71
+
72
+ bool is_expandable_to_shape(const at::Tensor& grad) const;
73
+
74
+ at::Tensor reduce_grad(at::Tensor& grad) const;
75
+
76
+ at::Tensor maybe_reduce(
77
+ const size_t index,
78
+ at::Tensor grad,
79
+ const std::function<std::string(const std::string&)>& format_error) const;
80
+
81
+ std::stringstream incompatible_shape_error_message(
82
+ const size_t index,
83
+ const at::Tensor& grad) const;
84
+
85
+ bool was_default_constructed() const {
86
+ return was_default_constructed_;
87
+ }
88
+
89
+ bool is_cpp_nested_tensor() const;
90
+
91
+ bool is_nested_tensor() const {
92
+ return is_nested_;
93
+ }
94
+
95
+ c10::SymIntArrayRef shape_as_dim_vector() const;
96
+
97
+ // Danger: not thread safe, caller must protect with lock
98
+ SymIntSmallVec& mutable_shape_as_dim_vector();
99
+
100
+ private:
101
+ at::Tensor shape_as_tensor() const;
102
+ bool is_nestedness_same(const at::Tensor& grad) const;
103
+ bool maybe_expandable_to(const at::Tensor& grad) const;
104
+
105
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
106
+ const at::TensorOptions options_;
107
+ MetadataShape shape_;
108
+ c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device());
109
+ bool is_tensor_subclass_ = false;
110
+ bool is_nested_ = false;
111
+ bool was_default_constructed_ = true;
112
+ };
113
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <iostream>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ #include <torch/csrc/Export.h>
11
+ #include <torch/csrc/profiler/api.h>
12
+ #include <torch/csrc/profiler/stubs/base.h>
13
+ #include <torch/csrc/profiler/util.h>
14
+
15
+ namespace torch::autograd {
16
+
17
+ struct Node;
18
+
19
+ namespace profiler {
20
+
21
+ enum class C10_API_ENUM EventKind : uint16_t {
22
+ Mark,
23
+ PushRange,
24
+ PopRange,
25
+ MemoryAlloc,
26
+ };
27
+
28
+ // To be deprecated, once we switch to Kineto profiling
29
+ struct TORCH_API LegacyEvent {
30
+ LegacyEvent(
31
+ EventKind kind,
32
+ at::StringView name,
33
+ uint16_t thread_id,
34
+ bool record_cuda,
35
+ at::RecordFunctionHandle handle = 0,
36
+ std::vector<std::vector<int64_t>>&& shapes = {},
37
+ int64_t node_id = -1,
38
+ bool is_async = false)
39
+ : name_(std::move(name)),
40
+ kind_(kind),
41
+ thread_id_(thread_id),
42
+ handle_(handle),
43
+ shapes_(std::move(shapes)),
44
+ node_id_(node_id),
45
+ is_async_(is_async) {
46
+ record(record_cuda);
47
+ }
48
+
49
+ // Constructor to be used in conjunction with LegacyEvent::fromIValue.
50
+ LegacyEvent(
51
+ EventKind kind,
52
+ at::StringView name,
53
+ uint16_t thread_id,
54
+ at::RecordFunctionHandle handle,
55
+ std::vector<std::vector<int64_t>>&& shapes,
56
+ int64_t node_id,
57
+ bool is_remote,
58
+ int64_t cpu_memory_usage,
59
+ int64_t cpu_ns,
60
+ bool cuda_recorded,
61
+ int64_t cuda_memory_usage = 0,
62
+ c10::DeviceIndex device = -1,
63
+ double cuda_us = -1)
64
+ : cpu_ns_(cpu_ns),
65
+ name_(std::move(name)),
66
+ kind_(kind),
67
+ thread_id_(thread_id),
68
+ handle_(handle),
69
+ shapes_(std::move(shapes)),
70
+ cpu_memory_usage_(cpu_memory_usage),
71
+ cuda_memory_usage_(cuda_memory_usage),
72
+ device_(device),
73
+ node_id_(node_id),
74
+ is_remote_(is_remote),
75
+ cuda_us_(static_cast<int64_t>(cuda_us)) {
76
+ // Sanity check values that were deserialized
77
+ TORCH_INTERNAL_ASSERT(cpu_ns_ > 0);
78
+ if (cuda_recorded) {
79
+ TORCH_INTERNAL_ASSERT(device_ >= 0);
80
+ TORCH_INTERNAL_ASSERT(cuda_us_ >= 0);
81
+ }
82
+ }
83
+
84
+ // Returns IValues corresponding to event structure, to be used for
85
+ // serialization.
86
+ at::IValue toIValue() const;
87
+
88
+ // Reconstructs an event from IValues given by toIValue.
89
+ static LegacyEvent fromIValue(const at::IValue& eventIValue);
90
+
91
+ void record(bool record_cuda);
92
+
93
+ std::string kindStr() const {
94
+ switch (kind_) {
95
+ case EventKind::Mark:
96
+ return "mark";
97
+ case EventKind::PushRange:
98
+ return "push";
99
+ case EventKind::PopRange:
100
+ return "pop";
101
+ case EventKind::MemoryAlloc:
102
+ return "memory_alloc";
103
+ }
104
+ throw std::runtime_error("unknown event kind");
105
+ }
106
+
107
+ EventKind kind() const {
108
+ return kind_;
109
+ }
110
+
111
+ const char* name() const {
112
+ return name_.str();
113
+ }
114
+
115
+ uint64_t threadId() const {
116
+ return thread_id_;
117
+ }
118
+
119
+ std::vector<std::vector<int64_t>> shapes() const {
120
+ return shapes_;
121
+ }
122
+
123
+ double cpuElapsedUs(const LegacyEvent& e) const {
124
+ return static_cast<double>(e.cpu_ns_ - cpu_ns_) / (1000.0);
125
+ }
126
+
127
+ void setCpuUs(int64_t cpu_us) {
128
+ cpu_ns_ = cpu_us * 1000;
129
+ }
130
+
131
+ double cpuUs() const {
132
+ return static_cast<double>(cpu_ns_) / (1000.0);
133
+ }
134
+
135
+ double cudaElapsedUs(const LegacyEvent& e) const;
136
+
137
+ bool hasCuda() const {
138
+ return cuda_event != nullptr || (isRemote() && device_ != -1);
139
+ }
140
+
141
+ c10::DeviceIndex device() const {
142
+ return device_;
143
+ }
144
+
145
+ void updateMemoryStats(int64_t alloc_size, c10::Device device) {
146
+ if (device.is_cuda() || device.type() == c10::DeviceType::HIP) {
147
+ cuda_memory_usage_ = alloc_size;
148
+ } else if (
149
+ device.is_cpu() || device.type() == c10::DeviceType::MKLDNN ||
150
+ device.type() == c10::DeviceType::IDEEP) {
151
+ cpu_memory_usage_ = alloc_size;
152
+ } else {
153
+ LOG(WARNING) << "Unsupported memory profiling device: " << device;
154
+ }
155
+ }
156
+
157
+ int64_t cpuMemoryUsage() const {
158
+ return cpu_memory_usage_;
159
+ }
160
+
161
+ int64_t cudaMemoryUsage() const {
162
+ return cuda_memory_usage_;
163
+ }
164
+
165
+ at::RecordFunctionHandle handle() const {
166
+ return handle_;
167
+ }
168
+
169
+ // Node ID corresponding to this event.
170
+ int64_t nodeId() const {
171
+ return node_id_;
172
+ }
173
+
174
+ // Set Node ID on this event.
175
+ void setNodeId(int64_t node_id) {
176
+ node_id_ = node_id;
177
+ }
178
+
179
+ void setName(at::StringView newName_) {
180
+ name_ = std::move(newName_);
181
+ }
182
+
183
+ bool isRemote() const {
184
+ return is_remote_;
185
+ }
186
+
187
+ void setCudaUs(int64_t cuda_us) {
188
+ cuda_us_ = cuda_us;
189
+ }
190
+
191
+ void setSequenceNr(int64_t sequence_nr) {
192
+ sequence_nr_ = sequence_nr;
193
+ }
194
+
195
+ int64_t sequenceNr() const {
196
+ return sequence_nr_;
197
+ }
198
+
199
+ void setCorrelationId(uint64_t correlation_id) {
200
+ correlation_id_ = correlation_id;
201
+ }
202
+
203
+ uint64_t correlationId() const {
204
+ return correlation_id_;
205
+ }
206
+
207
+ const std::vector<std::string>& stack() const {
208
+ return stack_;
209
+ }
210
+
211
+ void setStack(const std::vector<std::string>& stack) {
212
+ stack_ = stack;
213
+ }
214
+
215
+ uint64_t fwdThreadId() const {
216
+ return fwd_thread_id_;
217
+ }
218
+
219
+ void setFwdThreadId(uint64_t fwd_thread_id) {
220
+ fwd_thread_id_ = fwd_thread_id;
221
+ }
222
+
223
+ uint8_t scope() const {
224
+ return scope_;
225
+ }
226
+
227
+ void setScope(uint8_t scope) {
228
+ scope_ = scope;
229
+ }
230
+
231
+ const std::unordered_map<std::string, c10::IValue>& extraArgs() const {
232
+ return extra_args_;
233
+ }
234
+
235
+ void setExtraArgs(std::unordered_map<std::string, c10::IValue>&& save_args) {
236
+ extra_args_ = std::move(save_args);
237
+ }
238
+
239
+ uint64_t flops() {
240
+ return flops_;
241
+ }
242
+
243
+ bool isAsync() {
244
+ return is_async_;
245
+ }
246
+
247
+ void setFlops(uint64_t flops) {
248
+ flops_ = flops;
249
+ }
250
+
251
+ private:
252
+ // signed to allow for negative intervals, initialized for safety.
253
+ int64_t cpu_ns_ = 0;
254
+ at::StringView name_;
255
+ EventKind kind_;
256
+ uint64_t thread_id_;
257
+ uint64_t fwd_thread_id_{0};
258
+ at::RecordFunctionHandle handle_{0};
259
+ std::vector<std::vector<int64_t>> shapes_;
260
+ int64_t cpu_memory_usage_ = 0;
261
+ int64_t cuda_memory_usage_ = 0;
262
+ c10::DeviceIndex device_ = -1;
263
+ torch::profiler::impl::ProfilerVoidEventStub cuda_event = nullptr;
264
+ int64_t node_id_ = 0;
265
+ bool is_remote_ = false;
266
+ int64_t cuda_us_ = -1;
267
+ int64_t sequence_nr_ = -1;
268
+ bool is_async_ = false;
269
+
270
+ std::vector<std::string> stack_;
271
+ uint8_t scope_{0};
272
+ uint64_t correlation_id_{0};
273
+ // Extra arguments for computing op flops
274
+ std::unordered_map<std::string, c10::IValue> extra_args_;
275
+ uint64_t flops_ = 0;
276
+ };
277
+
278
+ // a linked-list of fixed sized vectors, to avoid
279
+ // a std::vector resize from taking a large amount of time inside
280
+ // a profiling event
281
+ struct RangeEventList {
282
+ RangeEventList() {
283
+ events_.reserve(kReservedCapacity);
284
+ }
285
+
286
+ template <typename... Args>
287
+ void record(Args&&... args) {
288
+ std::lock_guard<std::mutex> guard(mutex_);
289
+ events_.emplace_back(std::forward<Args>(args)...);
290
+ }
291
+
292
+ std::vector<LegacyEvent> consolidate() {
293
+ std::lock_guard<std::mutex> lock(mutex_);
294
+ std::vector<LegacyEvent> result;
295
+ result.insert(
296
+ result.begin(),
297
+ std::make_move_iterator(events_.begin()),
298
+ std::make_move_iterator(events_.end()));
299
+ events_.erase(events_.begin(), events_.end());
300
+ return result;
301
+ }
302
+
303
+ size_t size() {
304
+ std::lock_guard<std::mutex> lock(mutex_);
305
+ return events_.size();
306
+ }
307
+
308
+ private:
309
+ // This mutex is used to serialize access when different threads are writing
310
+ // to the same instance of RangeEventList.
311
+ std::mutex mutex_;
312
+ std::vector<LegacyEvent> events_;
313
+
314
+ static const size_t kReservedCapacity = 1024;
315
+ };
316
+
317
+ // A struct to control settings of disableProfiler options.
318
+ struct TORCH_API ProfilerDisableOptions {
319
+ ProfilerDisableOptions() = default;
320
+ ProfilerDisableOptions(bool shouldCleanupTLSState, bool shouldConsolidate)
321
+ : cleanupTLSState(shouldCleanupTLSState),
322
+ consolidate(shouldConsolidate) {}
323
+ // Whether we should clean up profiler states that are thread local, such as
324
+ // ThreadLocalDebugInfo and thread local RecordFunction callbacks.
325
+ bool cleanupTLSState = true;
326
+ // Whether we should consolidate all currently recorded profiled events. If
327
+ // false, will not consolidate and other threads can continue to write to the
328
+ // event lists.
329
+ bool consolidate = true;
330
+ };
331
+
332
+ // NOTE: profiler mode is thread local, with automatic propagation
333
+ // across thread boundary (e.g. at::launch tasks)
334
+ TORCH_API void enableProfilerLegacy(
335
+ const torch::profiler::impl::ProfilerConfig&);
336
+ using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
337
+ TORCH_API thread_event_lists disableProfilerLegacy(
338
+ c10::optional<ProfilerDisableOptions> profilerDisableOptions =
339
+ c10::nullopt);
340
+
341
+ // adds profiledEvents to the current thread local recorded events. Each event
342
+ // will be marked with node ID given by fromNodeId.
343
+ TORCH_API void addEventList(std::vector<LegacyEvent>&& profiledEvents);
344
+ // Writes profiled events to a stream.
345
+ TORCH_API void writeProfilerEventsToStream(
346
+ std::ostream& out,
347
+ const std::vector<LegacyEvent*>& events);
348
+
349
+ // Usage:
350
+ // {
351
+ // RecordProfile guard("filename.trace");
352
+ // // code you want to profile
353
+ // }
354
+ // Then open filename.trace in chrome://tracing
355
+ struct TORCH_API RecordProfile {
356
+ RecordProfile(std::ostream& out);
357
+ RecordProfile(const std::string& filename);
358
+
359
+ ~RecordProfile();
360
+
361
+ private:
362
+ void init();
363
+ std::unique_ptr<std::ofstream> file_;
364
+ std::ostream& out_;
365
+ void processEvents(const std::vector<LegacyEvent*>& events);
366
+ };
367
+
368
+ // A guard that enables the legacy profiler, taking in an optional callback to
369
+ // process the results Usage:
370
+ // {
371
+ // TLSLegacyProfilerGuard g([](thread_event_lists profilerResults) {
372
+ // // process profilerResults
373
+ // });
374
+ // Code to profile
375
+ // }
376
+ struct TORCH_API TLSLegacyProfilerGuard {
377
+ explicit TLSLegacyProfilerGuard(
378
+ const torch::profiler::impl::ProfilerConfig& cfg,
379
+ c10::optional<std::function<void(const thread_event_lists&)>>
380
+ resultCallback = c10::nullopt,
381
+ c10::optional<ProfilerDisableOptions> profilerDisableOptions =
382
+ c10::nullopt)
383
+ : cb_(std::move(resultCallback)),
384
+ profilerDisableOptions_(profilerDisableOptions) {
385
+ enableProfilerLegacy(cfg);
386
+ }
387
+ ~TLSLegacyProfilerGuard() {
388
+ thread_event_lists event_lists =
389
+ disableProfilerLegacy(profilerDisableOptions_);
390
+ if (cb_) {
391
+ try {
392
+ (*cb_)(event_lists);
393
+ } catch (const std::exception& e) {
394
+ LOG(ERROR) << "Got error processing profiler events: " << e.what();
395
+ }
396
+ }
397
+ }
398
+
399
+ private:
400
+ c10::optional<std::function<void(const thread_event_lists&)>> cb_;
401
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
402
+ const c10::optional<ProfilerDisableOptions> profilerDisableOptions_;
403
+ };
404
+
405
+ } // namespace profiler
406
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sstream>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ inline std::string requires_grad_leaf_error(bool requires_grad) {
10
+ std::ostringstream oss;
11
+ oss << "you can only change requires_grad flags of leaf variables.";
12
+ if (requires_grad == false) {
13
+ oss << " If you want to use a computed variable in a subgraph "
14
+ "that doesn't require differentiation use "
15
+ "var_no_grad = var.detach().";
16
+ }
17
+ return oss.str();
18
+ }
19
+
20
+ } // namespace utils
21
+ } // namespace autograd
22
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ // Helper functions to enforce the "Gradient Layout Contract" described in
10
+ // torch/csrc/autograd/functions/accumulate_grad.h.
11
+
12
+ // Checks if grad obeys the contract with variable.
13
+ inline bool obeys_layout_contract(
14
+ const at::Tensor& grad,
15
+ const at::Tensor& variable) {
16
+ TORCH_INTERNAL_ASSERT(!grad.is_sparse());
17
+ TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr());
18
+ TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr());
19
+
20
+ // NOLINTNEXTLINE(bugprone-branch-clone)
21
+ if (variable.is_nested()) {
22
+ // TODO: Nested Tensor does not have an implementation of detach. The
23
+ // current implementation of nested tensor likely does obey the gradient
24
+ // contract and should return true, but this would likely change in the
25
+ // future
26
+ return false;
27
+ } else if (variable.is_sparse()) {
28
+ // Gradient Layout Contract is not applicable for sparse layouts
29
+ return false;
30
+ } else if (variable.is_non_overlapping_and_dense()) {
31
+ // Only look at stride for dimensions that are not of size 1.
32
+ const auto& grad_sizes = grad.sym_sizes();
33
+ const auto& grad_strides = grad.sym_strides();
34
+ const auto& variable_strides = variable.sym_strides();
35
+ for (const auto idx : c10::irange(grad_sizes.size())) {
36
+ if (grad_sizes[idx] != 1) {
37
+ if (grad_strides[idx] != variable_strides[idx]) {
38
+ return false;
39
+ }
40
+ } else {
41
+ // This should not be needed but we don't check if a Tensor has views
42
+ // before stashing it. And 0-strided Tensors of size 1 are actually
43
+ // views for ops like cat.
44
+ // TODO: Actually detect views in the accumulateGrad function so that
45
+ // this Tensor is not considered at all.
46
+ if (grad_strides[idx] == 0) {
47
+ return false;
48
+ }
49
+ }
50
+ }
51
+ return true;
52
+ } else {
53
+ return grad.is_contiguous(at::MemoryFormat::Contiguous);
54
+ }
55
+ }
56
+
57
+ // Creates a clone of new_grad that obeys the contract with variable.
58
+ // The clone should attach to new_grad's history if GradMode::is_enabled().
59
+ inline at::Tensor clone_obey_contract(
60
+ const at::Tensor& new_grad,
61
+ const at::Tensor& variable) {
62
+ if (variable.is_non_overlapping_and_dense()) {
63
+ // (1)
64
+ // Does this dicey-looking sequence attach the result to new_grad's
65
+ // history if GradMode::is_enabled()? Yes, and @alband says it should.
66
+ return std::move(new_grad
67
+ .new_empty_strided_symint(
68
+ variable.sym_sizes(),
69
+ variable.sym_strides(),
70
+ variable.options().memory_format(c10::nullopt))
71
+ .copy_(new_grad));
72
+ } else {
73
+ // (2)
74
+ return new_grad.clone(at::MemoryFormat::Contiguous);
75
+ }
76
+ }
77
+
78
+ } // namespace utils
79
+ } // namespace autograd
80
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function_hook.h>
4
+
5
+ namespace torch {
6
+ namespace autograd {
7
+ namespace utils {
8
+
9
+ // Turns lambda into a torch::autograd::FunctionPostHook.
10
+ class LambdaPostHook : public torch::autograd::FunctionPostHook {
11
+ using variable_list = std::vector<torch::autograd::Variable>;
12
+ using fn_type =
13
+ std::function<variable_list(const variable_list&, const variable_list&)>;
14
+ using compiled_fn_type = std::function<void(CompiledNodeArgs&)>;
15
+
16
+ public:
17
+ // The lambda function takes as arguments the outputs and inputs of the
18
+ // autograd function and can modify the outputs of the autograd function by
19
+ // returning a new output if needed.
20
+ /* implicit */ LambdaPostHook(fn_type fn) : fn_(std::move(fn)) {}
21
+
22
+ LambdaPostHook(fn_type fn, compiled_fn_type compiled_fn)
23
+ : fn_(std::move(fn)), compiled_fn_(std::move(compiled_fn)) {}
24
+
25
+ variable_list operator()(
26
+ const variable_list& outputs,
27
+ const variable_list& inputs) override {
28
+ return fn_(outputs, inputs);
29
+ }
30
+
31
+ void compiled_args(CompiledNodeArgs& args) override {}
32
+
33
+ protected:
34
+ std::function<variable_list(const variable_list&, const variable_list&)> fn_;
35
+ compiled_fn_type compiled_fn_;
36
+ };
37
+
38
+ } // namespace utils
39
+ } // namespace autograd
40
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ #include <torch/csrc/utils/python_arg_parser.h>
7
+
8
+ namespace torch {
9
+ namespace autograd {
10
+ namespace utils {
11
+
12
+ // The parameter allow_copy is to accept copy for Tensor.to (and by proxy
13
+ // PackedSequences.to) but not nn.Module.to.
14
+ inline std::tuple<
15
+ c10::optional<at::Device>,
16
+ c10::optional<at::ScalarType>,
17
+ bool,
18
+ bool,
19
+ c10::optional<at::MemoryFormat>>
20
+ parse_to_conversion(PythonArgs& r, bool allow_copy) {
21
+ if (r.idx == 0) {
22
+ if (!allow_copy && !r.isNone(3))
23
+ throw std::runtime_error(".to() does not accept copy argument");
24
+ return std::make_tuple(
25
+ r.deviceOptional(0),
26
+ r.scalartypeOptional(1),
27
+ r.toBool(2),
28
+ r.toBool(3),
29
+ r.memoryformatOptional(4));
30
+ } else if (r.idx == 1) {
31
+ if (!allow_copy && !r.isNone(2))
32
+ throw std::runtime_error(".to() does not accept copy argument");
33
+ return std::make_tuple(
34
+ c10::nullopt,
35
+ r.scalartype(0),
36
+ r.toBool(1),
37
+ r.toBool(2),
38
+ r.memoryformatOptional(3));
39
+ } else {
40
+ auto tensor = r.tensor(0);
41
+ if (!allow_copy && !r.isNone(2))
42
+ throw std::runtime_error(".to() does not accept copy argument");
43
+ return std::make_tuple(
44
+ tensor.device(),
45
+ tensor.scalar_type(),
46
+ r.toBool(1),
47
+ r.toBool(2),
48
+ r.memoryformatOptional(3));
49
+ }
50
+ }
51
+ } // namespace utils
52
+ } // namespace autograd
53
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+
4
+ #include <mutex>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace autograd {
9
+ namespace utils {
10
+
11
+ // Warning handler for multi-threaded contexts. Gather warnings from
12
+ // all threads into a single queue, then process together at the end
13
+ // in the main thread.
14
+ class DelayWarningHandler : public at::WarningHandler {
15
+ public:
16
+ ~DelayWarningHandler() override = default;
17
+ void replay_warnings();
18
+
19
+ private:
20
+ void process(const c10::Warning& warning) override;
21
+
22
+ std::vector<c10::Warning> warnings_;
23
+ std::mutex mutex_;
24
+ };
25
+
26
+ } // namespace utils
27
+ } // namespace autograd
28
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/wrap_outputs.h ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Wrap tensor operation outputs as PyObject*
4
+
5
+ #include <ATen/ScalarOps.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <c10/util/irange.h>
8
+ #include <torch/csrc/python_headers.h>
9
+ #include <initializer_list>
10
+ #include <tuple>
11
+
12
+ #include <torch/csrc/Dtype.h>
13
+ #include <torch/csrc/DynamicTypes.h>
14
+ #include <torch/csrc/Layout.h>
15
+ #include <torch/csrc/QScheme.h>
16
+ #include <torch/csrc/autograd/python_variable.h>
17
+ #include <torch/csrc/autograd/variable.h>
18
+ #include <torch/csrc/utils/python_numbers.h>
19
+ #include <torch/csrc/utils/tensor_qschemes.h>
20
+
21
+ namespace torch {
22
+ namespace autograd {
23
+ namespace utils {
24
+
25
+ inline PyObject* wrap(bool value) {
26
+ if (value) {
27
+ Py_RETURN_TRUE;
28
+ } else {
29
+ Py_RETURN_FALSE;
30
+ }
31
+ }
32
+
33
+ inline PyObject* wrap(c10::DeviceIndex value) {
34
+ return THPUtils_packDeviceIndex(value);
35
+ }
36
+
37
+ inline PyObject* wrap(int64_t value) {
38
+ return THPUtils_packInt64(value);
39
+ }
40
+
41
+ inline PyObject* wrap(double value) {
42
+ return PyFloat_FromDouble(value);
43
+ }
44
+
45
+ inline PyObject* wrap(c10::complex<double> value) {
46
+ // I could probably also use FromComplex with a reinterpret cast,
47
+ // but... eh.
48
+ return PyComplex_FromDoubles(value.real(), value.imag());
49
+ }
50
+
51
+ inline PyObject* wrap(void* value) {
52
+ return THPUtils_packInt64(reinterpret_cast<intptr_t>(value));
53
+ }
54
+
55
+ inline PyObject* wrap(THPDtype* dtype) {
56
+ Py_INCREF(dtype);
57
+ return (PyObject*)dtype;
58
+ }
59
+
60
+ inline PyObject* wrap(at::ScalarType scalarType) {
61
+ return wrap(getTHPDtype(scalarType));
62
+ }
63
+
64
+ inline PyObject* wrap(THPLayout* layout) {
65
+ Py_INCREF(layout);
66
+ return (PyObject*)layout;
67
+ }
68
+
69
+ inline PyObject* wrap(at::Layout layout) {
70
+ return wrap(getTHPLayout(layout));
71
+ }
72
+
73
+ inline PyObject* wrap(at::Tensor tensor) {
74
+ return THPVariable_Wrap(Variable(std::move(tensor)));
75
+ }
76
+
77
+ inline PyObject* wrap(const at::Scalar& scalar) {
78
+ return wrap(scalar_to_tensor(scalar));
79
+ }
80
+
81
+ inline PyObject* wrap(at::QScheme qscheme) {
82
+ auto* thp_qscheme = torch::utils::getTHPQScheme(qscheme);
83
+ Py_INCREF(thp_qscheme);
84
+ return thp_qscheme;
85
+ }
86
+
87
+ inline PyObject* wrap(at::TensorList tl) {
88
+ auto r = THPObjectPtr{PyTuple_New(tl.size())};
89
+ if (!r)
90
+ throw python_error();
91
+ for (const auto i : c10::irange(tl.size())) {
92
+ PyTuple_SET_ITEM(r.get(), i, wrap(tl[i]));
93
+ }
94
+ return r.release();
95
+ }
96
+
97
+ inline PyObject* wrap(at::IntArrayRef list) {
98
+ auto r = THPObjectPtr{PyTuple_New(list.size())};
99
+ if (!r)
100
+ throw python_error();
101
+ for (const auto i : c10::irange(list.size())) {
102
+ PyTuple_SET_ITEM(r.get(), i, wrap(list[i]));
103
+ }
104
+ return r.release();
105
+ }
106
+
107
+ inline PyObject* wrap(at::Stream stream) {
108
+ return THPStream_Wrap(stream);
109
+ }
110
+
111
+ namespace detail {
112
+ template <typename F, typename Tuple, size_t... Is>
113
+ void apply_with_idx_impl(
114
+ const F& f,
115
+ Tuple& t,
116
+ std::index_sequence<Is...> /*indices*/) {
117
+ (void)std::initializer_list<int>{(f(std::get<Is>(t), Is), 0)...};
118
+ }
119
+
120
+ // For tuple(a, b, c), calls f(a, 0), f(b, 1), f(c, 2)
121
+ template <typename F, typename... Ts>
122
+ void apply_with_idx(const F& f, std::tuple<Ts...>& t) {
123
+ apply_with_idx_impl(f, t, std::index_sequence_for<Ts...>{});
124
+ }
125
+ } // namespace detail
126
+
127
+ template <typename... Ts>
128
+ PyObject* wrap(std::tuple<Ts...> values) {
129
+ auto r = THPObjectPtr{PyTuple_New(sizeof...(Ts))};
130
+ if (!r)
131
+ throw python_error();
132
+ detail::apply_with_idx(
133
+ [&](auto& value, size_t idx) {
134
+ PyTuple_SET_ITEM(r.get(), idx, wrap(std::move(value)));
135
+ },
136
+ values);
137
+ return r.release();
138
+ }
139
+
140
+ template <typename... Ts>
141
+ PyObject* wrap(PyTypeObject* type, std::tuple<Ts...> values) {
142
+ auto r = THPObjectPtr{PyStructSequence_New(type)};
143
+ if (!r)
144
+ throw python_error();
145
+ detail::apply_with_idx(
146
+ [&](auto& value, size_t idx) {
147
+ PyStructSequence_SET_ITEM(r.get(), idx, wrap(std::move(value)));
148
+ },
149
+ values);
150
+ return r.release();
151
+ }
152
+
153
+ } // namespace utils
154
+ } // namespace autograd
155
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <string>
6
+
7
+ #include <c10/util/Registry.h>
8
+ #include <gloo/config.h>
9
+ #include <gloo/transport/device.h>
10
+
11
+ namespace c10d {
12
+
13
+ class TORCH_API GlooDeviceFactory {
14
+ public:
15
+ // Create new device instance for specific interface.
16
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForInterface(
17
+ const std::string& interface);
18
+
19
+ // Create new device instance for specific hostname or address.
20
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForHostname(
21
+ const std::string& hostname);
22
+ };
23
+
24
+ TORCH_DECLARE_SHARED_REGISTRY(
25
+ GlooDeviceRegistry,
26
+ ::gloo::transport::Device,
27
+ const std::string&, /* interface */
28
+ const std::string& /* hostname */);
29
+
30
+ } // namespace c10d
31
+
32
+ #endif // USE_C10D_GLOO
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Store.hpp>
4
+ #include <memory>
5
+
6
+ namespace c10d {
7
+
8
+ class TORCH_API PrefixStore : public Store {
9
+ public:
10
+ explicit PrefixStore(std::string prefix, c10::intrusive_ptr<Store> store);
11
+
12
+ using Store::set;
13
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
14
+
15
+ using Store::compareSet;
16
+ std::vector<uint8_t> compareSet(
17
+ const std::string& key,
18
+ const std::vector<uint8_t>& expectedValue,
19
+ const std::vector<uint8_t>& desiredValue) override;
20
+
21
+ std::vector<uint8_t> get(const std::string& key) override;
22
+
23
+ int64_t add(const std::string& key, int64_t value) override;
24
+
25
+ bool deleteKey(const std::string& key) override;
26
+
27
+ int64_t getNumKeys() override;
28
+
29
+ bool check(const std::vector<std::string>& keys) override;
30
+
31
+ void wait(const std::vector<std::string>& keys) override;
32
+
33
+ void wait(
34
+ const std::vector<std::string>& keys,
35
+ const std::chrono::milliseconds& timeout) override;
36
+
37
+ const std::chrono::milliseconds& getTimeout() const noexcept override;
38
+
39
+ void setTimeout(const std::chrono::milliseconds& timeout) override;
40
+
41
+ void append(const std::string& key, const std::vector<uint8_t>& value)
42
+ override;
43
+
44
+ std::vector<std::vector<uint8_t>> multiGet(
45
+ const std::vector<std::string>& keys) override;
46
+
47
+ void multiSet(
48
+ const std::vector<std::string>& keys,
49
+ const std::vector<std::vector<uint8_t>>& values) override;
50
+
51
+ // Returns true if this store support append, multiGet and multiSet
52
+ bool hasExtendedApi() const override;
53
+
54
+ c10::intrusive_ptr<Store> getUnderlyingStore();
55
+
56
+ // Recursively to fetch the store before layers of wrapping with PrefixStore.
57
+ c10::intrusive_ptr<Store> getUnderlyingNonPrefixStore();
58
+
59
+ protected:
60
+ std::string prefix_;
61
+ c10::intrusive_ptr<Store> store_;
62
+
63
+ std::string joinKey(const std::string& key);
64
+ std::vector<std::string> joinKeys(const std::vector<std::string>& keys);
65
+ };
66
+
67
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_MPI
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <exception>
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <thread>
11
+ #include <vector>
12
+
13
+ #include <ATen/core/ivalue.h>
14
+ #include <ATen/core/ivalue_inl.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
17
+ #include <torch/csrc/distributed/c10d/Types.hpp>
18
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
19
+
20
+ #include <c10/util/CallOnce.h>
21
+
22
+ #include <mpi.h>
23
+
24
+ namespace c10d {
25
+
26
+ constexpr const char* MPI_BACKEND_NAME = "mpi";
27
+
28
+ // WorkEntry is the state associated with a single MPI run instance.
29
+ // It include the source Tensor list and destination Tensor list, as well as
30
+ // The actual run function that will operate either on src or dst or both.
31
+ struct WorkEntry {
32
+ explicit WorkEntry(
33
+ std::vector<at::Tensor>* srcPtr,
34
+ std::vector<at::Tensor>* dstPtr,
35
+ std::function<void(std::unique_ptr<WorkEntry>&)> run)
36
+ : dst(dstPtr ? *dstPtr : std::vector<at::Tensor>()), run(std::move(run)) {
37
+ if (srcPtr) {
38
+ src = *srcPtr;
39
+ }
40
+ }
41
+
42
+ // Not copyable
43
+ WorkEntry(const WorkEntry&) = delete;
44
+ // Not copy assignable
45
+ WorkEntry& operator=(const WorkEntry&) = delete;
46
+
47
+ // For input and output tensors (in-place), we will always use src
48
+ std::vector<at::Tensor> src;
49
+
50
+ // Copy of user provided outputs.
51
+ const std::vector<at::Tensor> dst;
52
+
53
+ // src rank returned, for recv only
54
+ int* srcRank = nullptr;
55
+ std::function<void(std::unique_ptr<WorkEntry>&)> run;
56
+ };
57
+
58
+ // ProcessGroupMPI implements MPI bindings for c10d.
59
+ //
60
+ // All functions on this class are expected to be called in the same
61
+ // order across processes in the group. This is the only way that we
62
+ // can guarantee to match up the same calls across processes.
63
+ //
64
+ // All MPI functions provided by this class is asynchronously scheduled on a
65
+ // Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation
66
+ // that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED.
67
+ // That is, The process may be multi-threaded, and multiple threads may make
68
+ // MPI calls, but only one at a time: MPI calls are not made concurrently from
69
+ // two distinct threads (all MPI calls are serialized). However, with
70
+ // MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process
71
+ // group. In other words, no more than 1 process group can be created globally.
72
+ //
73
+ // If you would like to use multiple ProcessGroupMPI, it requires your MPI
74
+ // implementation to have a thread support value of MPI_THREAD_MULTIPLE, that
75
+ // is, multiple threads may call MPI, with no restriction.
76
+ //
77
+ // Also note that ProcessGroupMPI only supports a single Tensor operation. In
78
+ // other words, the size of the input Tensor vector should always be 1.
79
+ //
80
+ // CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and
81
+ // ProcessGroupMPI will automatically detect this support.
82
+ class TORCH_API ProcessGroupMPI : public Backend {
83
+ public:
84
+ class WorkMPI : public Work {
85
+ public:
86
+ explicit WorkMPI(
87
+ std::vector<at::Tensor> outputTensors,
88
+ const char* profilingTitle = nullptr,
89
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
90
+ c10::nullopt)
91
+ : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors),
92
+ outputTensors_(std::move(outputTensors)),
93
+ future_(c10::make_intrusive<at::ivalue::Future>(
94
+ c10::ListType::create(c10::TensorType::get()))) {}
95
+
96
+ std::vector<at::Tensor> result() override;
97
+
98
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
99
+
100
+ protected:
101
+ friend class ProcessGroupMPI;
102
+
103
+ private:
104
+ void finishWorkMPI();
105
+ void finishWorkMPIError(std::exception_ptr eptr);
106
+
107
+ std::vector<at::Tensor> outputTensors_;
108
+ c10::intrusive_ptr<at::ivalue::Future> future_;
109
+ };
110
+
111
+ class AsyncWork : public Work {
112
+ public:
113
+ AsyncWork(
114
+ MPI_Request request,
115
+ std::vector<at::Tensor> outputTensors,
116
+ const char* profilingTitle = nullptr,
117
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
118
+ c10::nullopt);
119
+
120
+ ~AsyncWork() override;
121
+
122
+ bool isCompleted() override;
123
+
124
+ bool isSuccess() const override;
125
+
126
+ int sourceRank() const override;
127
+
128
+ bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
129
+
130
+ void abort() override;
131
+
132
+ std::vector<at::Tensor> result() override;
133
+
134
+ protected:
135
+ void populateException();
136
+
137
+ private:
138
+ const std::vector<at::Tensor> outputTensors_;
139
+ MPI_Request request_;
140
+ MPI_Status status_;
141
+ };
142
+
143
+ // Constructor will spawn up the worker thread loop
144
+ explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm);
145
+
146
+ ~ProcessGroupMPI() override;
147
+
148
+ // Abort the MPI program, needs to be called when exception is detected
149
+ void abort();
150
+
151
+ const std::string getBackendName() const override {
152
+ return std::string(MPI_BACKEND_NAME);
153
+ }
154
+
155
+ c10::intrusive_ptr<Work> broadcast(
156
+ std::vector<at::Tensor>& data,
157
+ const BroadcastOptions& opts = BroadcastOptions()) override;
158
+
159
+ c10::intrusive_ptr<Work> allreduce(
160
+ std::vector<at::Tensor>& tensors,
161
+ const AllreduceOptions& opts = AllreduceOptions()) override;
162
+
163
+ c10::intrusive_ptr<Work> allreduce_coalesced(
164
+ std::vector<at::Tensor>& tensors,
165
+ const AllreduceCoalescedOptions& opts =
166
+ AllreduceCoalescedOptions()) override;
167
+
168
+ c10::intrusive_ptr<Work> reduce(
169
+ std::vector<at::Tensor>& tensors,
170
+ const ReduceOptions& opts = ReduceOptions()) override;
171
+
172
+ c10::intrusive_ptr<Work> allgather(
173
+ std::vector<std::vector<at::Tensor>>& outputTensors,
174
+ std::vector<at::Tensor>& inputTensors,
175
+ const AllgatherOptions& opts = AllgatherOptions()) override;
176
+
177
+ c10::intrusive_ptr<Work> _allgather_base(
178
+ at::Tensor& outputbuffer,
179
+ at::Tensor& inputbuffer,
180
+ const AllgatherOptions& opts = AllgatherOptions()) override;
181
+
182
+ c10::intrusive_ptr<Work> allgather_coalesced(
183
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
184
+ std::vector<at::Tensor>& inputTensors,
185
+ const AllgatherOptions& opts = AllgatherOptions()) override;
186
+
187
+ c10::intrusive_ptr<Work> gather(
188
+ std::vector<std::vector<at::Tensor>>& outputTensors,
189
+ std::vector<at::Tensor>& inputTensors,
190
+ const GatherOptions& opts = GatherOptions()) override;
191
+
192
+ c10::intrusive_ptr<Work> scatter(
193
+ std::vector<at::Tensor>& outputTensors,
194
+ std::vector<std::vector<at::Tensor>>& inputTensors,
195
+ const ScatterOptions& opts = ScatterOptions()) override;
196
+
197
+ c10::intrusive_ptr<Work> reduce_scatter(
198
+ std::vector<at::Tensor>& outputTensors,
199
+ std::vector<std::vector<at::Tensor>>& inputTensors,
200
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
201
+
202
+ c10::intrusive_ptr<Work> alltoall_base(
203
+ at::Tensor& outputTensor,
204
+ at::Tensor& inputTensor,
205
+ std::vector<int64_t>& outputSplitSizes,
206
+ std::vector<int64_t>& inputSplitSizes,
207
+ const AllToAllOptions& opts = AllToAllOptions()) override;
208
+
209
+ c10::intrusive_ptr<Work> alltoall(
210
+ std::vector<at::Tensor>& outputTensors,
211
+ std::vector<at::Tensor>& inputTensors,
212
+ const AllToAllOptions& opts = AllToAllOptions()) override;
213
+
214
+ c10::intrusive_ptr<Work> send(
215
+ std::vector<at::Tensor>& tensors,
216
+ int dstRank,
217
+ int tag) override;
218
+
219
+ c10::intrusive_ptr<Work> recv(
220
+ std::vector<at::Tensor>& tensors,
221
+ int srcRank,
222
+ int tag) override;
223
+
224
+ c10::intrusive_ptr<Work> recvAnysource(
225
+ std::vector<at::Tensor>& tensor,
226
+ int tag) override;
227
+
228
+ c10::intrusive_ptr<Work> barrier(
229
+ const BarrierOptions& opts = BarrierOptions()) override;
230
+
231
+ // Creating a new ProcessGroupMPI, will initialize MPI if not initialized
232
+ static c10::intrusive_ptr<ProcessGroupMPI> createProcessGroupMPI(
233
+ std::vector<int> ranks = {});
234
+
235
+ protected:
236
+ using WorkType =
237
+ std::tuple<std::unique_ptr<WorkEntry>, c10::intrusive_ptr<WorkMPI>>;
238
+ // Worker thread loop
239
+ void runLoop();
240
+ // Helper function that is called by the destructor
241
+ void destroy();
242
+
243
+ c10::intrusive_ptr<Work> enqueue(
244
+ std::unique_ptr<WorkEntry> entry,
245
+ const char* profilingTitle = nullptr,
246
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
247
+ c10::nullopt);
248
+
249
+ bool stop_;
250
+
251
+ std::mutex pgMutex_;
252
+ std::thread workerThread_;
253
+
254
+ std::deque<WorkType> queue_;
255
+ std::condition_variable queueProduceCV_;
256
+ std::condition_variable queueConsumeCV_;
257
+
258
+ // Global states
259
+ static void initMPIOnce();
260
+ static void mpiExit();
261
+ static c10::once_flag onceFlagInitMPI;
262
+
263
+ static std::mutex pgGlobalMutex_;
264
+ static int mpiThreadSupport_;
265
+
266
+ MPI_Comm pgComm_;
267
+ };
268
+
269
+ } // namespace c10d
270
+
271
+ #endif // USE_C10D_MPI
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/UCCUtils.hpp>
6
+
7
+ #include <exception>
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <queue>
11
+ #include <thread>
12
+ #include <vector>
13
+
14
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
15
+ #include <torch/csrc/distributed/c10d/Store.hpp>
16
+ #include <torch/csrc/distributed/c10d/Types.hpp>
17
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
18
+ #ifdef USE_CUDA
19
+ #include <ATen/cuda/CUDAEvent.h>
20
+ #include <c10/cuda/CUDAStream.h>
21
+ #endif
22
+
23
+ namespace c10d {
24
+
25
+ #define TORCH_UCC_DEVICE_NOT_SET -2
26
+
27
+ #ifdef USE_CUDA
28
+ #define SAVE_TENSORS(_TENSORS, _DATA) \
29
+ do { \
30
+ if ((_TENSORS)[0].device().is_cuda()) { \
31
+ for (const auto i : c10::irange((_TENSORS).size())) { \
32
+ c10::cuda::CUDACachingAllocator::recordStream( \
33
+ (_TENSORS)[i].storage().data_ptr(), (*stream)); \
34
+ } \
35
+ } else { \
36
+ (_DATA) = (_TENSORS); \
37
+ } \
38
+ } while (0)
39
+
40
+ #else
41
+ #define SAVE_TENSORS(_TENSORS, _DATA) (_DATA) = (_TENSORS);
42
+ #endif
43
+
44
+ constexpr const char* UCC_BACKEND_NAME = "ucc";
45
+
46
+ struct event_pool_t {
47
+ #ifdef USE_CUDA
48
+ std::queue<std::unique_ptr<at::cuda::CUDAEvent>> event_pool;
49
+ #endif
50
+ std::mutex event_pool_mutex;
51
+ };
52
+
53
+ class Comm;
54
+
55
+ // UCC does not support multiple CUDA devices per process.
56
+ class TORCH_API ProcessGroupUCC : public Backend {
57
+ private:
58
+ void set_timeout(ucc_coll_args_t& args);
59
+
60
+ public:
61
+ class WorkData {
62
+ public:
63
+ std::vector<at::Tensor> src;
64
+ std::vector<at::Tensor> dst;
65
+ std::vector<at::Tensor> flat;
66
+ WorkData() {}
67
+ virtual ~WorkData() = default;
68
+ };
69
+ class AlltoallWorkData : public WorkData {
70
+ public:
71
+ AlltoallWorkData(int size)
72
+ : send_lengths(size),
73
+ send_offsets(size),
74
+ recv_lengths(size),
75
+ recv_offsets(size) {}
76
+ std::vector<uint64_t> send_lengths;
77
+ std::vector<uint64_t> send_offsets;
78
+ std::vector<uint64_t> recv_lengths;
79
+ std::vector<uint64_t> recv_offsets;
80
+ };
81
+
82
+ class AllgathervWorkData : public WorkData {
83
+ public:
84
+ AllgathervWorkData(int size) : recv_lengths(size), recv_offsets(size) {}
85
+ std::vector<uint64_t> recv_lengths;
86
+ std::vector<uint64_t> recv_offsets;
87
+ };
88
+
89
+ class ScattervWorkData : public WorkData {
90
+ public:
91
+ ScattervWorkData(int size) : send_lengths(size), send_offsets(size) {}
92
+ std::vector<uint64_t> send_lengths;
93
+ std::vector<uint64_t> send_offsets;
94
+ };
95
+
96
+ class ProgressEntry {
97
+ friend class ProcessGroupUCC;
98
+ friend class Comm;
99
+
100
+ public:
101
+ ProgressEntry(CommBase* comm, ucc_coll_req_h request)
102
+ : status_(UCC_INPROGRESS), comm_(comm), request_(request) {}
103
+ // Finalizes UCC status or exception of collective request.
104
+ void finalize(std::exception_ptr eptr = nullptr);
105
+ ucc_status_t status_;
106
+ CommBase* comm_;
107
+ ucc_coll_req_h request_;
108
+ std::unique_ptr<WorkData> data;
109
+ c10::intrusive_ptr<c10::ivalue::Future> future_;
110
+ std::exception_ptr eptr_;
111
+ };
112
+
113
+ class WorkUCC : public Work {
114
+ friend class ProcessGroupUCC;
115
+ friend class Comm;
116
+
117
+ public:
118
+ WorkUCC(
119
+ OpType opType,
120
+ uint64_t seq,
121
+ const char* prof_title,
122
+ const c10::optional<std::vector<at::Tensor>>& inputs,
123
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger)
124
+ : Work(-1, opType, prof_title, inputs), logger_(logger), seq_(seq) {}
125
+ ~WorkUCC();
126
+ void setException();
127
+ void setAndThrowException();
128
+ bool isCompleted() override;
129
+ bool isSuccess() const override;
130
+ bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
131
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
132
+ std::vector<at::Tensor> result() override;
133
+ int sourceRank() const override;
134
+ #ifdef USE_CUDA
135
+ std::unique_ptr<at::cuda::CUDAEvent> fence = nullptr;
136
+ event_pool_t* ep = nullptr;
137
+ #endif
138
+ int sourceRank_;
139
+
140
+ protected:
141
+ std::shared_ptr<ProgressEntry> entry_;
142
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger_;
143
+ uint64_t seq_;
144
+
145
+ private:
146
+ // The future returned by getFuture.
147
+ c10::intrusive_ptr<at::ivalue::Future> future_;
148
+ // Store a reference to collective's outputs, used by result
149
+ std::shared_ptr<std::vector<at::Tensor>> outputs_;
150
+ };
151
+
152
+ explicit ProcessGroupUCC(
153
+ const c10::intrusive_ptr<Store>& store,
154
+ int rank = -1,
155
+ int size = -1,
156
+ std::chrono::duration<float> timeout = kBackendDefaultTimeout);
157
+
158
+ void initComm(c10::Device dev);
159
+
160
+ ~ProcessGroupUCC() override;
161
+
162
+ const std::string getBackendName() const override {
163
+ return std::string(UCC_BACKEND_NAME);
164
+ }
165
+
166
+ #ifdef USE_CUDA
167
+ std::unique_ptr<at::cuda::CUDAEvent> getPooledEvent();
168
+ #endif
169
+
170
+ // Performs a health check by initializing dummy UCC & UCX communicators and
171
+ // then destroying them. This will help indicate and signal any
172
+ // UCC/UCX-related issues prior to the first collective. The actual
173
+ // initialization and subsequent destruction is ran on a separate thread and
174
+ // the main thread is signalled about timeouts/errors to report to the
175
+ // application.
176
+ void runHealthCheck();
177
+
178
+ template <typename PreProcess, typename PostProcess>
179
+ c10::intrusive_ptr<Work> collective_post(
180
+ OpType opType,
181
+ PreProcess preproc,
182
+ PostProcess postproc,
183
+ ucc_coll_args_t& coll,
184
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
185
+ c10::Device dev,
186
+ std::vector<at::Tensor>& inputTensors,
187
+ std::vector<at::Tensor>& outputTensors,
188
+ const char* prof_title);
189
+
190
+ c10::intrusive_ptr<Work> broadcast(
191
+ std::vector<at::Tensor>& data,
192
+ const BroadcastOptions& opts = BroadcastOptions()) override;
193
+
194
+ c10::intrusive_ptr<Work> allreduce(
195
+ std::vector<at::Tensor>& tensors,
196
+ const AllreduceOptions& opts = AllreduceOptions()) override;
197
+
198
+ c10::intrusive_ptr<Work> allreduce_coalesced(
199
+ std::vector<at::Tensor>& tensors,
200
+ const AllreduceCoalescedOptions& opts =
201
+ AllreduceCoalescedOptions()) override;
202
+
203
+ c10::intrusive_ptr<Work> reduce(
204
+ std::vector<at::Tensor>& tensors,
205
+ const ReduceOptions& opts = ReduceOptions()) override;
206
+
207
+ c10::intrusive_ptr<Work> allgather(
208
+ std::vector<std::vector<at::Tensor>>& outputTensors,
209
+ std::vector<at::Tensor>& inputTensors,
210
+ const AllgatherOptions& opts = AllgatherOptions()) override;
211
+
212
+ c10::intrusive_ptr<Work> _allgather_base(
213
+ at::Tensor& outputBuffer,
214
+ at::Tensor& inputBuffer,
215
+ const AllgatherOptions& opts = AllgatherOptions()) override;
216
+
217
+ c10::intrusive_ptr<Work> barrier(
218
+ const BarrierOptions& opts = BarrierOptions()) override;
219
+
220
+ c10::intrusive_ptr<Work> gather(
221
+ std::vector<std::vector<at::Tensor>>& outputTensors,
222
+ std::vector<at::Tensor>& inputTensors,
223
+ const GatherOptions& opts = GatherOptions()) override;
224
+
225
+ c10::intrusive_ptr<Work> scatter(
226
+ std::vector<at::Tensor>& outputTensors,
227
+ std::vector<std::vector<at::Tensor>>& inputTensors,
228
+ const ScatterOptions& opts = ScatterOptions()) override;
229
+
230
+ c10::intrusive_ptr<Work> reduce_scatter(
231
+ std::vector<at::Tensor>& outputTensors,
232
+ std::vector<std::vector<at::Tensor>>& inputTensors,
233
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
234
+
235
+ c10::intrusive_ptr<Work> alltoall_base(
236
+ at::Tensor& outputTensor,
237
+ at::Tensor& inputTensor,
238
+ std::vector<int64_t>& outputSplitSizes,
239
+ std::vector<int64_t>& inputSplitSizes,
240
+ const AllToAllOptions& opts = AllToAllOptions()) override;
241
+
242
+ c10::intrusive_ptr<Work> alltoall(
243
+ std::vector<at::Tensor>& outputTensors,
244
+ std::vector<at::Tensor>& inputTensors,
245
+ const AllToAllOptions& opts = AllToAllOptions()) override;
246
+
247
+ c10::intrusive_ptr<Work> send(
248
+ std::vector<at::Tensor>& tensors,
249
+ int dstRank,
250
+ int tag) override;
251
+
252
+ c10::intrusive_ptr<Work> recv(
253
+ std::vector<at::Tensor>& tensors,
254
+ int srcRank,
255
+ int tag) override;
256
+
257
+ // Counting for the sequential number of UCC collective_post call.
258
+ uint64_t seq_{0};
259
+
260
+ // Agrees on an initial sequence number for the whole group by having rank 0
261
+ // create it and broadcast it to other ranks using the store.
262
+ void setSequenceNumberForGroup() override;
263
+
264
+ // Retrieves the current sequence number for the whole group, which should be
265
+ // in sync. If the returned number is not consistent across the group, it
266
+ // may indicate that there is some sort of collective desynchronization.
267
+ uint64_t getSequenceNumberForGroup() override;
268
+
269
+ static c10::intrusive_ptr<Backend> createProcessGroupUCC(
270
+ const c10::intrusive_ptr<::c10d::Store>& store,
271
+ int rank,
272
+ int size,
273
+ const std::chrono::duration<float>& timeout);
274
+
275
+ protected:
276
+ const std::chrono::duration<float> timeout_;
277
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
278
+ std::shared_ptr<Comm> comm = {nullptr};
279
+ uint32_t comm_id;
280
+ ucc_team_h team{nullptr};
281
+ ucc_ee_h cuda_ee{nullptr};
282
+ ucc_ee_h cuda_ee_p2p[2]{nullptr, nullptr};
283
+
284
+ #ifdef USE_CUDA
285
+ std::unique_ptr<at::cuda::CUDAStream> stream = nullptr;
286
+ std::unique_ptr<at::cuda::CUDAStream> stream_p2p[2] = {nullptr, nullptr};
287
+ event_pool_t ep;
288
+ #endif
289
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
290
+ };
291
+
292
+ class Comm {
293
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
294
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
295
+ CommUCC ucc_comm;
296
+ std::mutex mutex;
297
+ std::thread progress_thread;
298
+ std::condition_variable queue_produce_cv;
299
+ std::condition_variable queue_consume_cv;
300
+ std::deque<std::shared_ptr<ProcessGroupUCC::ProgressEntry>> progress_queue;
301
+ bool stop_progress_loop;
302
+ bool collective_inprogress;
303
+ torch_ucc_phase_t finalize_phase;
304
+
305
+ public:
306
+ c10::DeviceIndex cuda_device_index;
307
+ Comm(
308
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
309
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
310
+ c10::Device dev,
311
+ bool is_health_check);
312
+
313
+ ~Comm();
314
+
315
+ void ucc_create_team(
316
+ ucc_team_h& team,
317
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob);
318
+
319
+ void ucc_destroy_team(ucc_team_h& team);
320
+
321
+ c10::intrusive_ptr<Work> enqueue_p2p(
322
+ OpType opType,
323
+ ucc_coll_req_h request,
324
+ const char* prof_title);
325
+
326
+ #ifdef USE_CUDA
327
+ void enqueue_cuda_collective(
328
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
329
+ c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
330
+ ucc_coll_args_t& coll,
331
+ ucc_team_h team,
332
+ ucc_ee_h ee);
333
+ #endif
334
+
335
+ void enqueue_collective(
336
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
337
+ c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
338
+ ucc_coll_args_t& coll,
339
+ ucc_team_h team);
340
+
341
+ static std::shared_ptr<Comm> get_comm(
342
+ uint32_t& id,
343
+ c10::Device dev,
344
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
345
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
346
+ bool is_health_check = false);
347
+
348
+ void progress_loop();
349
+ };
350
+
351
+ } // namespace c10d
352
+
353
+ #endif // USE_C10D_UCC
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <chrono>
4
+ #include <thread>
5
+ #include <vector>
6
+
7
+ #include <torch/csrc/distributed/c10d/TCPStore.hpp>
8
+ #include <torch/csrc/distributed/c10d/socket.h>
9
+
10
+ #ifdef _WIN32
11
+ #include <io.h>
12
+ #include <winsock2.h>
13
+ #else
14
+ #include <poll.h>
15
+ #include <unistd.h>
16
+ #endif
17
+
18
+ namespace c10d {
19
+ namespace detail {
20
+
21
+ // Magic number for client validation.
22
+ static const uint32_t validationMagicNumber = 0x3C85F7CE;
23
+
24
+ enum class QueryType : uint8_t {
25
+ VALIDATE,
26
+ SET,
27
+ COMPARE_SET,
28
+ GET,
29
+ ADD,
30
+ CHECK,
31
+ WAIT,
32
+ GETNUMKEYS,
33
+ DELETE_KEY,
34
+ APPEND,
35
+ MULTI_GET,
36
+ MULTI_SET,
37
+ CANCEL_WAIT,
38
+ };
39
+
40
+ enum class CheckResponseType : uint8_t { READY, NOT_READY };
41
+
42
+ enum class WaitResponseType : uint8_t { STOP_WAITING, WAIT_CANCELED };
43
+
44
+ // Abstract base class to handle thread state for TCPStoreMasterDaemon.
45
+ // Contains the windows/unix implementations to signal a
46
+ // shutdown sequence for the thread
47
+ class BackgroundThread {
48
+ public:
49
+ explicit BackgroundThread();
50
+
51
+ virtual ~BackgroundThread() = 0;
52
+ virtual std::uint16_t port() const = 0;
53
+
54
+ void start();
55
+ bool stop_requested();
56
+
57
+ protected:
58
+ void dispose();
59
+ virtual void run() = 0;
60
+ virtual void stop() = 0;
61
+ bool is_running() {
62
+ return is_running_.load();
63
+ }
64
+
65
+ private:
66
+ std::atomic<bool> is_running_;
67
+ std::thread daemonThread_{};
68
+ };
69
+
70
+ std::unique_ptr<BackgroundThread> create_tcpstore_backend(
71
+ const TCPStoreOptions& opts);
72
+ std::unique_ptr<BackgroundThread> create_libuv_tcpstore_backend(
73
+ const TCPStoreOptions& opts);
74
+ bool is_libuv_tcpstore_backend_available();
75
+
76
+ } // namespace detail
77
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Store.hpp>
4
+
5
+ #include <chrono>
6
+ #include <cstdint>
7
+
8
+ #include <ATen/core/Tensor.h>
9
+ #include <ATen/core/ivalue.h>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/intrusive_ptr.h>
13
+
14
+ namespace c10d {
15
+
16
+ // Base class for supplementary data potentially needed by ReduceOps
17
+ struct TORCH_API _SupplementBase : torch::CustomClassHolder {
18
+ ~_SupplementBase() override = default;
19
+ };
20
+
21
+ // Supplementary data specific to NCCL PREMUL_SUM
22
+ // The point of use in ProcessGroupNCCL knows how to unpack it.
23
+ struct NCCLPreMulSumSupplement : _SupplementBase {
24
+ double double_factor{0.0};
25
+ at::Tensor tensor_factor;
26
+ NCCLPreMulSumSupplement(double f) : double_factor{f} {}
27
+ NCCLPreMulSumSupplement(at::Tensor t) : tensor_factor{std::move(t)} {
28
+ TORCH_CHECK_EQ(tensor_factor.numel(), 1);
29
+ }
30
+ };
31
+
32
+ // Other ReduceOps that need different supplementary data can also
33
+ // derive from _SupplementBase.
34
+ struct TORCH_API ReduceOp : torch::CustomClassHolder {
35
+ // note(crcrpar): RedOpType could be defined outside of `ReduceOp`
36
+ enum RedOpType : uint8_t {
37
+ SUM = 0,
38
+ AVG = 1,
39
+ PRODUCT = 2,
40
+ MIN = 3,
41
+ MAX = 4,
42
+ BAND = 5, // Bitwise AND
43
+ BOR = 6, // Bitwise OR
44
+ BXOR = 7, // Bitwise XOR
45
+ PREMUL_SUM = 8, // Multiply by a user-supplied constant before summing.
46
+ UNUSED = 9
47
+ };
48
+
49
+ ReduceOp() = default;
50
+
51
+ ReduceOp(RedOpType op) : op_(op) {
52
+ TORCH_INTERNAL_ASSERT(
53
+ op_ != PREMUL_SUM,
54
+ "Use `torch.distributed._make_nccl_premul_sum` to create an instance of ReduceOp with PREMUL_SUM");
55
+ }
56
+
57
+ ReduceOp(
58
+ RedOpType op,
59
+ c10::intrusive_ptr<_SupplementBase> optional_supplement) {
60
+ if (optional_supplement.get()) {
61
+ op_ = op;
62
+ } else {
63
+ supplement_ = optional_supplement;
64
+ }
65
+ }
66
+
67
+ // The heap resource supplement_, if it exists, is managed by a
68
+ // c10::intrusive_ptr, so constructors and operator= can be simple
69
+ ReduceOp(const ReduceOp& other)
70
+ : op_(other.op_), supplement_(other.supplement_) {}
71
+
72
+ const ReduceOp& operator=(const ReduceOp& other) {
73
+ op_ = other.op_;
74
+ supplement_ = other.supplement_;
75
+ return *this;
76
+ }
77
+
78
+ operator RedOpType() const {
79
+ return op_;
80
+ }
81
+
82
+ bool operator==(const std::uint8_t other) {
83
+ TORCH_INTERNAL_ASSERT(other < 9, "Invalid other op value");
84
+ return other == op_;
85
+ }
86
+
87
+ bool operator==(const ReduceOp::RedOpType other) {
88
+ return *this == static_cast<std::uint8_t>(other);
89
+ }
90
+
91
+ // todo(crcrpar): Handle `RedOpType::PREMUL_SUM` with its scaling factor.
92
+ bool operator==(const ReduceOp& other) {
93
+ return *this == other.op_;
94
+ }
95
+
96
+ RedOpType op_ = SUM;
97
+ // supplement_ is "type-erased" storage for optional supplementary
98
+ // data the op might need.
99
+ // The point of use will know the derived type supplement_ really is,
100
+ // and downcast its pointer to extract the data as the needed type(s).
101
+ // Right now, only PREMUL_SUM needs supplementary data, but the same
102
+ // mechanism could extend to support other nontrivial reduce ops with
103
+ // different supplementary payloads.
104
+ c10::intrusive_ptr<_SupplementBase> supplement_;
105
+ };
106
+
107
+ template <typename T>
108
+ ReduceOp makeNCCLPreMulSum(const T& factor) {
109
+ ReduceOp rop;
110
+ rop.op_ = ReduceOp::PREMUL_SUM;
111
+ rop.supplement_ = c10::make_intrusive<NCCLPreMulSumSupplement>(factor);
112
+ return rop;
113
+ }
114
+
115
+ constexpr auto kUnsetTimeout = std::chrono::milliseconds(-1);
116
+
117
+ struct BroadcastOptions {
118
+ int64_t rootRank = 0;
119
+ int64_t rootTensor = 0;
120
+ std::chrono::milliseconds timeout = kUnsetTimeout;
121
+ bool asyncOp = true;
122
+ };
123
+
124
+ struct AllreduceOptions {
125
+ ReduceOp reduceOp = ReduceOp::SUM;
126
+ std::chrono::milliseconds timeout = kUnsetTimeout;
127
+ c10::optional<at::Tensor> sparseIndices = c10::nullopt;
128
+ };
129
+
130
+ struct AllreduceCoalescedOptions : AllreduceOptions {};
131
+
132
+ struct ReduceOptions {
133
+ ReduceOp reduceOp = ReduceOp::SUM;
134
+ int64_t rootRank = 0;
135
+ int64_t rootTensor = 0;
136
+ std::chrono::milliseconds timeout = kUnsetTimeout;
137
+ };
138
+
139
+ struct AllgatherOptions {
140
+ std::chrono::milliseconds timeout = kUnsetTimeout;
141
+ bool asyncOp = true;
142
+ };
143
+
144
+ struct GatherOptions {
145
+ int64_t rootRank = 0;
146
+ std::chrono::milliseconds timeout = kUnsetTimeout;
147
+ };
148
+
149
+ struct ScatterOptions {
150
+ int64_t rootRank = 0;
151
+ std::chrono::milliseconds timeout = kUnsetTimeout;
152
+ bool asyncOp = true;
153
+ };
154
+
155
+ struct ReduceScatterOptions {
156
+ ReduceOp reduceOp = ReduceOp::SUM;
157
+ std::chrono::milliseconds timeout = kUnsetTimeout;
158
+ bool asyncOp = true;
159
+ };
160
+
161
+ struct AllToAllOptions {
162
+ std::chrono::milliseconds timeout = kUnsetTimeout;
163
+ };
164
+
165
+ struct BarrierOptions {
166
+ std::vector<int64_t> device_ids;
167
+ std::chrono::milliseconds timeout = kUnsetTimeout;
168
+ c10::optional<at::Device> device;
169
+ };
170
+
171
+ struct DistributedBackendOptions {
172
+ c10::intrusive_ptr<::c10d::Store> store;
173
+ int group_rank;
174
+ int group_size;
175
+ std::chrono::duration<float> timeout;
176
+ std::string group_id;
177
+ std::vector<int64_t> global_ranks_in_group;
178
+ };
179
+
180
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/intra_node_comm.hpp ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <c10/cuda/CUDAStream.h>
6
+ #include <torch/csrc/distributed/c10d/Store.hpp>
7
+ #include <torch/csrc/distributed/c10d/Work.hpp>
8
+
9
+ namespace c10d {
10
+ namespace intra_node_comm {
11
+
12
+ constexpr size_t kMaxDevices = 8;
13
+ constexpr size_t kDefaultBufferSize = 10 * 1024 * 1024;
14
+
15
+ using NvlMesh = std::array<std::array<size_t, kMaxDevices>, kMaxDevices>;
16
+ using HybridCubeMesh = std::array<std::array<int, 4>, kMaxDevices>;
17
+
18
+ enum class Topology { UNKNOWN = 0, FULLY_CONNECTED = 1, HYBRID_CUBE_MESH = 2 };
19
+
20
+ enum class AllReduceAlgo { NONE = 0, ONE_SHOT = 1, TWO_SHOT = 2, HCM = 3 };
21
+
22
+ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target {
23
+ public:
24
+ IntraNodeComm(
25
+ Topology topology,
26
+ std::array<void*, kMaxDevices> p2pStates,
27
+ std::array<void*, kMaxDevices> buffers,
28
+ void* p2pStatesDev,
29
+ void* buffersDev,
30
+ void* topoInfo,
31
+ size_t rank,
32
+ size_t worldSize,
33
+ size_t bufferSize = kDefaultBufferSize);
34
+
35
+ ~IntraNodeComm();
36
+
37
+ /**
38
+ * Rendezvous via a c10d::Store.
39
+ * This function may return nullptr if intra-node comm is not applicable.
40
+ * It guarantees all participants either succeeds or abort.
41
+ */
42
+ static c10::intrusive_ptr<IntraNodeComm> rendezvous(
43
+ c10::intrusive_ptr<c10d::Store> store,
44
+ const std::string& prefix,
45
+ size_t rank,
46
+ size_t worldSize,
47
+ size_t bufferSize = kDefaultBufferSize);
48
+
49
+ /**
50
+ * Selects a AllReduceAlgo that we think will outperform nccl.
51
+ * Returns AllReduceAlgo::NONE if we don't think we can outperform nccl.
52
+ */
53
+ AllReduceAlgo selectAllReduceAlgo(const at::Tensor& input);
54
+
55
+ at::Tensor allReduce(const at::Tensor& input, AllReduceAlgo algo);
56
+
57
+ private:
58
+ at::Tensor oneShotAllReduce(
59
+ const at::Tensor& input,
60
+ at::cuda::CUDAStream& stream);
61
+
62
+ at::Tensor twoShotAllReduce(
63
+ const at::Tensor& input,
64
+ at::cuda::CUDAStream& stream);
65
+
66
+ at::Tensor hybridCubeMeshAllReduce(
67
+ const at::Tensor& input,
68
+ at::cuda::CUDAStream& stream);
69
+
70
+ Topology topology_;
71
+ std::array<void*, kMaxDevices> p2pStates_;
72
+ std::array<void*, kMaxDevices> buffers_;
73
+ void* p2pStatesDev_;
74
+ void* buffersDev_;
75
+ void* topoInfo_;
76
+ size_t rank_;
77
+ size_t worldSize_;
78
+ size_t bufferSize_;
79
+ };
80
+
81
+ /**
82
+ * NOTE [IntraNodeComm Stream Semantics]
83
+ *
84
+ * ProcessGroupNCCL launches kernels differently from the conventional PyTorch
85
+ * CUDA semantics: it always launches collective kernels onto a dedicated
86
+ * communication stream. Therefore, it needs to:
87
+ *
88
+ * - Synchronize the calling stream and the comm stream.
89
+ * - Ensure the memory safety of the operands (via record_stream or stashing).
90
+ * - Synchronize the waiting stream with the comm stream.
91
+ *
92
+ * Unconditionally performing these tasks makes sense when we expect most of the
93
+ * communication to benefit from compute/comm overlap. However, IntraNodeComm
94
+ * primarily aims to optimize small, latency-sensitive, blocking communication,
95
+ * in which the overhead incurred by the above steps can be quite pronounced.
96
+ *
97
+ * Thus, IntraNodeComm follows the conventional PyTorch CUDA semantics and
98
+ * launches kernels onto the stream specified by the user. Although the user
99
+ * can perform neccessary synchronization via wait_stream, to provide a UX
100
+ * consistent to that of ProcessGroupNCCL, the neccessary stream
101
+ * synchronization can also be performed via IntraNodeWork::wait().
102
+ */
103
+ class IntraNodeCommWork : public c10d::Work {
104
+ public:
105
+ IntraNodeCommWork() : c10d::Work() {
106
+ event_.record();
107
+ }
108
+
109
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
110
+ event_.block(at::cuda::getCurrentCUDAStream());
111
+ return true;
112
+ }
113
+
114
+ private:
115
+ at::cuda::CUDAEvent event_;
116
+ };
117
+
118
+ TORCH_API int64_t getIntraNodeCommUsageCounter();
119
+
120
+ } // namespace intra_node_comm
121
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/comm.hpp>
4
+
5
+ #include <ATen/ATen.h>
6
+ #include <ATen/core/ivalue.h>
7
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
8
+ #include <torch/csrc/utils/pybind.h>
9
+
10
+ namespace c10d {
11
+
12
+ class TORCH_PYTHON_API PythonCommHook : public CommHookInterface {
13
+ public:
14
+ // Takes a state and a callable hook. The inputs are Python objects.
15
+ // The state is passed to the hook in runHook method, and it can be used to
16
+ // maintain and update any state information during the execution of the hook.
17
+ // The hook performs user-specified processing and returns a future indicating
18
+ // asychronous communication of gradients.
19
+ PythonCommHook(py::object state, py::object hook)
20
+ : state_(std::move(state)), hook_(std::move(hook)) {}
21
+
22
+ ~PythonCommHook() override;
23
+
24
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
25
+
26
+ at::Tensor parseHookResult(const c10::IValue& result) override;
27
+
28
+ private:
29
+ // Only needed for stateful communication.
30
+ py::object state_;
31
+ py::object hook_;
32
+ };
33
+
34
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/backend/backend_device.h>
4
+ #include <torch/csrc/lazy/core/shape.h>
5
+ #include <cstring>
6
+
7
+ namespace torch {
8
+ namespace lazy {
9
+
10
+ class TORCH_API BackendData {
11
+ public:
12
+ struct Info {
13
+ /**
14
+ * Used by Lazy Graph Executor to tag info on BackendData objs
15
+ * */
16
+ virtual ~Info() = default;
17
+ };
18
+ /**
19
+ * Represents (Tensor) data stored on a backend device
20
+ * in its native format.
21
+ * */
22
+ using Handle = int64_t;
23
+
24
+ BackendData(BackendDevice device, Shape shape)
25
+ : device_(std::move(device)), shape_(std::move(shape)) {}
26
+
27
+ virtual ~BackendData() = default;
28
+
29
+ const BackendDevice& device() const {
30
+ return device_;
31
+ }
32
+
33
+ const Shape& shape() const {
34
+ return shape_;
35
+ }
36
+
37
+ Info* info() const {
38
+ return info_.get();
39
+ }
40
+
41
+ std::shared_ptr<Info> SetInfo(std::shared_ptr<Info> info) {
42
+ std::swap(info, info_);
43
+ return info;
44
+ }
45
+
46
+ virtual Handle GetHandle() = 0;
47
+
48
+ virtual void Assign(const BackendData& data) = 0;
49
+
50
+ virtual bool HasValue() const = 0;
51
+
52
+ private:
53
+ BackendDevice device_;
54
+ Shape shape_;
55
+ std::shared_ptr<Info> info_;
56
+ };
57
+
58
+ using BackendDataPtr = std::shared_ptr<BackendData>;
59
+
60
+ } // namespace lazy
61
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <ostream>
5
+ #include <string>
6
+
7
+ #include <ATen/Tensor.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Deprecated.h>
10
+ #include <c10/util/Optional.h>
11
+
12
+ namespace c10 {
13
+ struct Device;
14
+ }
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ // Backend should extend it and define their own supported hardware types.
20
+ struct TORCH_API BackendDeviceType {
21
+ int8_t type{(int8_t)at::kCPU};
22
+ // Note: previous default value was '0', which actually maps to at::kCPU, at
23
+ // least now it is explicit, we may want to make default/undefined semantics
24
+ // more clear though
25
+ BackendDeviceType() : type((int8_t)at::kCPU) {}
26
+ BackendDeviceType(int8_t type) : type(type) {}
27
+
28
+ virtual ~BackendDeviceType() = default;
29
+ virtual std::string toString() const {
30
+ return "Unknown";
31
+ }
32
+ };
33
+
34
+ class TORCH_API BackendDevice {
35
+ public:
36
+ // The default constructor will set both the device type and ordinal
37
+ // to backend specific defaults.
38
+ BackendDevice();
39
+ BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int64_t ordinal);
40
+
41
+ int8_t type() const;
42
+ int64_t ordinal() const {
43
+ return ordinal_;
44
+ }
45
+
46
+ bool operator==(const BackendDevice& other) const {
47
+ return compare(other) == 0;
48
+ }
49
+ bool operator!=(const BackendDevice& other) const {
50
+ return compare(other) != 0;
51
+ }
52
+ bool operator<(const BackendDevice& rhs) const {
53
+ return compare(rhs) < 0;
54
+ }
55
+
56
+ std::string toString() const;
57
+
58
+ private:
59
+ int compare(const BackendDevice& rhs) const;
60
+
61
+ // Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
62
+ std::shared_ptr<BackendDeviceType> type_;
63
+ int64_t ordinal_;
64
+ };
65
+
66
+ TORCH_API std::ostream& operator<<(
67
+ std::ostream& os,
68
+ const BackendDevice& device);
69
+
70
+ // Helpers for converting a c10::Device to BackendDevice and vice versa.
71
+ TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
72
+ TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);
73
+
74
+ // Tries to extract the backend device out of the lazy tensor. Returns nullopt
75
+ // if the input is not a lazy tensor.
76
+ TORCH_API c10::optional<BackendDevice> GetBackendDevice(
77
+ const at::ITensorListRef tensors);
78
+ TORCH_API c10::optional<BackendDevice> GetBackendDevice(
79
+ const at::TensorList tensors);
80
+ TORCH_API c10::optional<BackendDevice> GetBackendDevice(
81
+ const at::Tensor& tensor);
82
+ TORCH_API c10::optional<BackendDevice> GetBackendDevice(
83
+ const c10::optional<c10::Device>& device);
84
+
85
+ // For variadic template.
86
+ TORCH_API c10::optional<BackendDevice> GetBackendDevice();
87
+
88
+ template <typename T, typename... Args>
89
+ c10::optional<BackendDevice> GetBackendDevice(
90
+ const T& tensor,
91
+ const Args&... forward_tensors) {
92
+ auto optional_device = GetBackendDevice(tensor);
93
+ if (optional_device) {
94
+ return optional_device;
95
+ }
96
+ return GetBackendDevice(forward_tensors...);
97
+ }
98
+
99
+ } // namespace lazy
100
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <torch/csrc/lazy/backend/backend_data.h>
5
+ #include <torch/csrc/lazy/backend/backend_device.h>
6
+ #include <torch/csrc/lazy/backend/lowering_context.h>
7
+ #include <torch/csrc/lazy/core/lazy_graph_executor.h>
8
+ #include <torch/csrc/lazy/core/shape.h>
9
+ #include <torch/csrc/lazy/core/tensor.h>
10
+ #include <atomic>
11
+
12
+ namespace torch {
13
+ namespace lazy {
14
+
15
+ struct IrBuilder;
16
+
17
+ /**
18
+ * Work in progress- don't treat this as a stable interface yet!
19
+ */
20
+ class TORCH_API BackendImplInterface {
21
+ public:
22
+ virtual ~BackendImplInterface() = default;
23
+
24
+ /**
25
+ * Initialization/Teardown
26
+ * */
27
+ // No-op by default. Allows custom functionality to be exposed through
28
+ // extension bindings.
29
+ virtual void InitializeAtenBindings() const {}
30
+
31
+ virtual void PrepareToExit() const = 0;
32
+
33
+ /**
34
+ * Configuration
35
+ * */
36
+
37
+ virtual void SetRngSeed(size_t seed) const = 0;
38
+
39
+ /**
40
+ * IR Tracing
41
+ * */
42
+
43
+ virtual const IrBuilder* GetIrBuilder() const = 0;
44
+
45
+ /**
46
+ * Data Transfer
47
+ * */
48
+
49
+ virtual BackendDataPtr MakeComputationDataFromTensor(
50
+ const at::Tensor& tensor,
51
+ const Shape& shape,
52
+ const BackendDevice& device) const = 0;
53
+ virtual BackendDataPtr MakeComputationDataFromScalar(
54
+ const at::Scalar& scalar,
55
+ const torch::lazy::BackendDevice& device) const = 0;
56
+ virtual BackendDataPtr CreateDataPlaceholder(
57
+ const BackendDevice& device,
58
+ const Shape& shape) const = 0;
59
+
60
+ // Gets backend data if the node is a device data node. Otherwise returns
61
+ // nullptr
62
+ virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0;
63
+
64
+ virtual at::Tensor MakeTensorFromComputationData(
65
+ const BackendDataPtr data,
66
+ c10::optional<at::ScalarType> logical_scalar_type) const = 0;
67
+
68
+ /**
69
+ * Lowering, Compilation, Execution
70
+ * */
71
+
72
+ virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
73
+ const std::string& name,
74
+ BackendDevice device,
75
+ c10::ArrayRef<const torch::lazy::Node*> post_order,
76
+ Util::EmissionMap emit_status) const = 0;
77
+
78
+ virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
79
+ const std::string& name,
80
+ BackendDevice device) const = 0;
81
+
82
+ // TODO(whc) need to keep this?
83
+ virtual std::vector<std::string> GetCompilationDevices(
84
+ const std::string& device,
85
+ c10::ArrayRef<std::string> devices) const = 0;
86
+
87
+ virtual std::vector<ComputationPtr> Compile(
88
+ std::vector<ComputationPtr> instances) const = 0;
89
+
90
+ virtual std::vector<BackendDataPtr> ExecuteComputation(
91
+ torch::lazy::ComputationPtr computation,
92
+ c10::ArrayRef<BackendDataPtr> arguments,
93
+ const BackendDevice& device) const = 0;
94
+
95
+ /**
96
+ * Device Configuration
97
+ * */
98
+
99
+ // Set or get the default device type.
100
+ // For backends used with virtual c10::Devices, this configures what real
101
+ // device type the backend should use, and matters if the backend supports
102
+ // more than one type of real device.
103
+ virtual std::shared_ptr<BackendDeviceType> GetDefaultDeviceType() const = 0;
104
+ virtual void SetDefaultDeviceType(int8_t type) = 0;
105
+
106
+ // Set or get the default device ordinal.
107
+ // For backends that supports multi-device, this configures what the
108
+ // default device the backend should use.
109
+ virtual int64_t GetDefaultDeviceOrdinal() const = 0;
110
+ virtual void SetDefaultDeviceOrdinal(int64_t) = 0;
111
+
112
+ // Specify which aten device should be used for eager fallback
113
+ // may change depending on current 'Default' DeviceType
114
+ virtual at::DeviceType EagerFallbackDeviceType() const = 0;
115
+
116
+ // Query all available backend devices
117
+ virtual std::vector<BackendDevice> GetBackendDevices() const = 0;
118
+
119
+ virtual std::string CreateMetricReport() const {
120
+ return "";
121
+ }
122
+
123
+ // Map a particular c10:: device to a concrete backend device
124
+ // Note:: c10:: devices may be virtual or concrete. xla:: and lazy:: are
125
+ // virtual devices, meaning they may map to a gpu, tpu, etc. behind the
126
+ // scenes. In the future, non-virtual c10:: devices may also use lazy tensors
127
+ // through a mode, in which case these APIs should still work, but should be
128
+ // identity mappings.
129
+ virtual BackendDevice GetBackendDevice(c10::Device device) const = 0;
130
+
131
+ // TODO(whc)
132
+ // Additional APIs expected for supporting distributed training, to be
133
+ // designed
134
+
135
+ /**
136
+ * Debug/Metrics
137
+ * */
138
+
139
+ // virtual std::map<std::string, Metric> GetMetrics() const = 0;
140
+
141
+ // virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0;
142
+
143
+ virtual std::string GetComputationBackendText(
144
+ const ComputationPtr computation) const = 0;
145
+ };
146
+
147
+ class TORCH_API BackendRegistrar {
148
+ public:
149
+ BackendRegistrar(const BackendImplInterface* backend_impl_interface);
150
+ };
151
+
152
+ TORCH_API bool hasBackend();
153
+ TORCH_API const BackendImplInterface* getBackend();
154
+
155
+ TORCH_API const IrBuilder* getIrBuilder();
156
+
157
+ } // namespace lazy
158
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <memory>
4
+ #include <string>
5
+ #include <unordered_map>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ #include <torch/csrc/lazy/backend/backend_data.h>
10
+ #include <torch/csrc/lazy/backend/backend_device.h>
11
+ #include <torch/csrc/lazy/core/ir.h>
12
+ #include <torch/csrc/lazy/core/ir_util.h>
13
+
14
+ namespace torch {
15
+ namespace lazy {
16
+
17
+ class TORCH_API Computation {
18
+ public:
19
+ virtual int parameters_size() const = 0;
20
+
21
+ virtual const std::vector<Shape>& parameter_shapes() const = 0;
22
+
23
+ virtual const std::vector<std::string>& parameter_names() const = 0;
24
+
25
+ virtual const Shape& result_shape() const = 0;
26
+
27
+ virtual const std::string to_string() const = 0;
28
+
29
+ virtual ~Computation() = default;
30
+
31
+ // Indicates whether this computation is being executed inside a mark step
32
+ // Assume false unless set otherwise
33
+ bool in_mark_step = false;
34
+ };
35
+
36
+ using ComputationPtr = std::shared_ptr<Computation>;
37
+
38
+ // Keeps track of the code generation state.
39
+ class TORCH_API LoweringContext {
40
+ public:
41
+ LoweringContext(const std::string& name, BackendDevice device);
42
+ LoweringContext(
43
+ const std::string& name,
44
+ BackendDevice device,
45
+ c10::ArrayRef<const torch::lazy::Node*> post_order,
46
+ Util::EmissionMap emit_status);
47
+
48
+ virtual ~LoweringContext() = default;
49
+
50
+ static std::unique_ptr<LoweringContext> Create(
51
+ const std::string& name,
52
+ BackendDevice device,
53
+ c10::ArrayRef<const torch::lazy::Node*> post_order,
54
+ Util::EmissionMap emit_status);
55
+
56
+ static std::unique_ptr<LoweringContext> Create(
57
+ const std::string& name,
58
+ BackendDevice device);
59
+
60
+ const BackendDevice& device() const {
61
+ return device_;
62
+ };
63
+
64
+ // Retrieves the vector holding all the tensors associated with the parameter
65
+ // instructions which have been created.
66
+ const std::vector<BackendDataPtr>& GetParametersData() const;
67
+
68
+ // Adds a new input/output alias.
69
+ virtual void SetUpAlias(
70
+ const std::vector<int64_t>& output_index,
71
+ int64_t param_number,
72
+ const std::vector<int64_t>& param_index,
73
+ bool must_alias = false) {
74
+ // Dummy default implementation to do nothing.
75
+ }
76
+
77
+ // Check if parameter shape matches result at index.
78
+ virtual bool CheckResultShape(
79
+ const BackendDataPtr& parameter_data,
80
+ size_t result_idx) {
81
+ // Dummy default implementation to do nothing.
82
+ return false;
83
+ }
84
+
85
+ // Adds the given output as a component of the result tuple and returns its
86
+ // assigned position within the tuple.
87
+ virtual size_t AddResult(const torch::lazy::Output& output) = 0;
88
+
89
+ // Associates the given output with the input parameter of the given index and
90
+ // shape. Only used for the operator-by-operator execution, mostly for
91
+ // debugging purposes.
92
+ virtual void AddParameter(
93
+ const torch::lazy::Output& output,
94
+ size_t index,
95
+ const Shape& shape,
96
+ const std::string& name) = 0;
97
+
98
+ // Build the computation capturing all the operations created with the
99
+ // embedded builder (returned by the builder() API).
100
+ virtual ComputationPtr Build() = 0;
101
+
102
+ size_t GetEmittedNodeCount() const {
103
+ return emit_status_.size();
104
+ }
105
+
106
+ protected:
107
+ BackendDevice device_;
108
+ std::vector<BackendDataPtr> parameters_;
109
+ std::vector<size_t> parameter_sequence_;
110
+ Util::EmissionMap emit_status_;
111
+ };
112
+
113
+ } // namespace lazy
114
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Cache utils in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <functional>
9
+ #include <list>
10
+ #include <memory>
11
+ #include <mutex>
12
+ #include <unordered_map>
13
+ #include <utility>
14
+
15
+ namespace torch {
16
+ namespace lazy {
17
+
18
+ // Generic key and object cache with LRU expiration policy. The objects of type
19
+ // T will be stored as std::shared_ptr<T> and taken and returned as such, by the
20
+ // cache API.
21
+ template <
22
+ typename K,
23
+ typename T,
24
+ typename H = std::hash<K>,
25
+ typename E = std::equal_to<K>>
26
+ class Cache {
27
+ public:
28
+ using TypePtr = std::shared_ptr<T>;
29
+ using Element = std::pair<K, TypePtr>;
30
+
31
+ explicit Cache(size_t max_size) : max_size_(max_size) {}
32
+
33
+ // Adds an object to the cache, unless it already exists. If the cache grows
34
+ // beyond the limit set during construction, the oldest used object will be
35
+ // removed from the cache.
36
+ TypePtr Add(K key, TypePtr object) {
37
+ if (!max_size_) {
38
+ return object;
39
+ }
40
+ std::lock_guard<std::mutex> slock(lock_);
41
+ element_list_.emplace_front(Element(std::move(key), std::move(object)));
42
+ auto it = element_list_.begin();
43
+ auto emplace_result = element_map_.emplace(&it->first, it);
44
+ if (!emplace_result.second) {
45
+ element_list_.erase(it);
46
+ DoLRU(emplace_result.first->second);
47
+ } else if (element_list_.size() > max_size_) {
48
+ Element* last = &element_list_.back();
49
+ element_map_.erase(&last->first);
50
+ element_list_.pop_back();
51
+ }
52
+ return emplace_result.first->second->second;
53
+ }
54
+
55
+ // Retrieves the existing object if it exists. If it does, its position in
56
+ // the LRU list gets moved to the head of the list.
57
+ // Returns nullptr if no object with the specified key is found within the
58
+ // cache.
59
+ TypePtr Get(const K& key) {
60
+ if (!max_size_) {
61
+ return nullptr;
62
+ }
63
+ std::lock_guard<std::mutex> slock(lock_);
64
+ auto it = element_map_.find(&key);
65
+ if (it == element_map_.end()) {
66
+ return nullptr;
67
+ }
68
+ DoLRU(it->second);
69
+ return it->second->second;
70
+ }
71
+
72
+ TypePtr GetLatest() {
73
+ std::lock_guard<std::mutex> g(lock_);
74
+ TORCH_CHECK(!element_list_.empty());
75
+ return element_list_.front().second;
76
+ }
77
+
78
+ bool Erase(const K& key) {
79
+ if (!max_size_) {
80
+ return false;
81
+ }
82
+ std::lock_guard<std::mutex> slock(lock_);
83
+ auto it = element_map_.find(&key);
84
+ if (it == element_map_.end()) {
85
+ return false;
86
+ }
87
+ auto lit = it->second;
88
+ element_map_.erase(it);
89
+ element_list_.erase(lit);
90
+ return true;
91
+ }
92
+
93
+ void Clear() {
94
+ if (!max_size_) {
95
+ return;
96
+ }
97
+ std::lock_guard<std::mutex> slock(lock_);
98
+ element_map_.clear();
99
+ element_list_.clear();
100
+ }
101
+
102
+ int Numel() const {
103
+ if (!max_size_) {
104
+ return 0;
105
+ }
106
+ std::lock_guard<std::mutex> g(lock_);
107
+ TORCH_CHECK(element_map_.size() == element_list_.size());
108
+ return element_map_.size();
109
+ }
110
+
111
+ private:
112
+ using ElementList = std::list<Element>;
113
+
114
+ struct Hasher {
115
+ size_t operator()(const K* key) const {
116
+ return hasher(*key);
117
+ }
118
+
119
+ H hasher;
120
+ };
121
+
122
+ struct Equaler {
123
+ bool operator()(const K* k1, const K* k2) const {
124
+ return equaler(*k1, *k2);
125
+ }
126
+
127
+ E equaler;
128
+ };
129
+
130
+ using ElementMap = std::
131
+ unordered_map<const K*, typename ElementList::iterator, Hasher, Equaler>;
132
+
133
+ void DoLRU(typename ElementList::iterator it) {
134
+ element_list_.splice(element_list_.begin(), element_list_, it);
135
+ }
136
+
137
+ mutable std::mutex lock_;
138
+ const size_t max_size_ = 0;
139
+ ElementList element_list_;
140
+ ElementMap element_map_;
141
+ };
142
+
143
+ } // namespace lazy
144
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <c10/util/Flags.h>
4
+
5
+ C10_DECLARE_bool(torch_lazy_ir_debug);
6
+ C10_DECLARE_bool(torch_lazy_handle_special_scalars);
7
+ C10_DECLARE_bool(torch_lazy_all_numbers_special_scalars);
8
+ C10_DECLARE_bool(torch_lazy_param_aliasing);
9
+ C10_DECLARE_bool(torch_lazy_reuse_ir);
10
+ C10_DECLARE_bool(torch_lazy_use_thread_pool);
11
+ C10_DECLARE_bool(torch_lazy_enable_device_data_cache);
12
+
13
+ C10_DECLARE_int(torch_lazy_compilation_cache_size);
14
+ C10_DECLARE_int(torch_lazy_device_data_cache_size);
15
+ C10_DECLARE_int(torch_lazy_io_thread_pool_size);
16
+ C10_DECLARE_int(torch_lazy_metrics_samples);
17
+ C10_DECLARE_int(torch_lazy_trim_graph_check_frequency);
18
+ C10_DECLARE_int(torch_lazy_trim_graph_size);
19
+
20
+ C10_DECLARE_string(torch_lazy_metrics_percentiles);
21
+
22
+ C10_DECLARE_int(torch_lazy_shape_cache_size);
23
+
24
+ namespace torch {
25
+ namespace lazy {
26
+ TORCH_API std::string& getLTCForceFallback();
27
+ }
28
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/lazy/core/tensor.h>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ TORCH_API std::function<std::vector<SourceLocation>()>&
12
+ GetPythonFramesFunction();
13
+
14
+ TORCH_API std::string GetFirstUserFrameInPython();
15
+
16
+ class TORCH_API DebugUtil {
17
+ public:
18
+ enum GraphFormat {
19
+ kText,
20
+ kDot,
21
+ kBackend,
22
+ };
23
+
24
+ static GraphFormat GetDefaultGraphFormat();
25
+
26
+ // Dumps the current Python frame and the IR Graph whose roots are the IR
27
+ // values held at the tensors. If indices is not nullptr, it selects the
28
+ // indices of the tensors whose graph will be emitted.
29
+ static std::string GetTensorsGraphInfo(
30
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
31
+ const std::vector<size_t>* indices,
32
+ GraphFormat format = GetDefaultGraphFormat());
33
+
34
+ // If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper
35
+ // output path, an instance of the report returned by GetTensorsGraphInfo() is
36
+ // saved.
37
+ static void SaveTensorsGraphInfo(
38
+ const char* name,
39
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
40
+ const std::vector<size_t>* indices,
41
+ GraphFormat format = GetDefaultGraphFormat());
42
+
43
+ static bool ExperimentEnabled(const std::string& name);
44
+ };
45
+
46
+ } // namespace lazy
47
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/Flags.h>
16
+ #include <torch/csrc/lazy/core/hash.h>
17
+ #include <torch/csrc/lazy/core/ir.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
20
+
21
+ namespace torch {
22
+ namespace lazy {
23
+
24
+ /**
25
+ * The goal of "dynamic" Nodes is to patch a hole in our tracing.
26
+ * Previously, if a user called `sizes` on a Tensor, it would leak out
27
+ * of our tracing system, as `sizes` returns a torch.Size or an int. To
28
+ * prevent this from happening, we introduce DimensionNode, a new type
29
+ * of Node that abstracts the operation of getting the dimensions of a
30
+ * Tensor.
31
+ *
32
+ * Consider the following example:
33
+ * ```
34
+ * numel = x.shape()[0] * x.shape()[1]
35
+ * ```
36
+ *
37
+ * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
38
+ * and the multiplication of the two SizeNodes will be represented by
39
+ * a SizeMul (also a subclass of DimensionNode). Through this, we can
40
+ * prevent `numel` from being represented as a Python int and thus
41
+ * burned into the Graph.
42
+ */
43
+
44
+ class TORCH_API DimensionNode {
45
+ public:
46
+ virtual bool isSymbolic() const {
47
+ return false;
48
+ };
49
+ virtual int64_t getDynamicValue() const {
50
+ TORCH_CHECK(false, "NYI");
51
+ };
52
+ virtual int64_t getStaticValue() const {
53
+ TORCH_CHECK(false, "NYI");
54
+ };
55
+ virtual ~DimensionNode() = default;
56
+ };
57
+
58
+ } // namespace lazy
59
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Hash utils in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/e0e5f937a0ba8d904f9608137dc8c51ba439df2d/third_party/xla_client/util.h
4
+ */
5
+ #pragma once
6
+
7
+ #include <ATen/Tensor.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <c10/util/int128.h>
10
+ #include <torch/csrc/Export.h>
11
+ #include <cstring>
12
+ #include <set>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ using size_t = std::size_t;
20
+
21
+ class TORCH_API hash_t : public c10::uint128 {
22
+ public:
23
+ // Swich from typedef hash_t = uint128 to provide explicit casters
24
+ hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
25
+ hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
26
+ hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
27
+ hash_t(int64_t val) : uint128(static_cast<uint64_t>(val)) {}
28
+ hash_t(uint32_t val) : uint128(val) {}
29
+ hash_t(uint64_t val) : uint128(val) {}
30
+ hash_t(uint128 val) : uint128(val) {}
31
+ hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
32
+ hash_t() : uint128() {}
33
+ };
34
+
35
+ // Std* functions use 64-bit hash
36
+ size_t TORCH_API StdDataHash(const void* data, size_t size);
37
+
38
+ size_t TORCH_API StdHashCombine(uintmax_t a, uintmax_t b);
39
+
40
+ // Other functions are all 128-bit
41
+ hash_t TORCH_API HashBlock(const void* data, size_t n, const hash_t& seed);
42
+
43
+ hash_t TORCH_API DataHash(const void* data, size_t size);
44
+
45
+ hash_t TORCH_API HashCombine(const hash_t& a, const hash_t& b);
46
+
47
+ size_t TORCH_API HashReduce(const hash_t& a);
48
+
49
+ // Returns a string representation of a hash
50
+ std::string TORCH_API HashToString(const hash_t& a);
51
+
52
+ struct HashReducer {
53
+ size_t operator()(const hash_t& value) const {
54
+ return HashReduce(value);
55
+ }
56
+ };
57
+
58
+ static inline hash_t StringHash(const char* data) {
59
+ return DataHash(data, std::strlen(data));
60
+ }
61
+
62
+ // Automatic templated implementation for 'arithmetic' types
63
+ template <
64
+ typename T,
65
+ typename std::enable_if<std::is_arithmetic<T>::value>::type* = nullptr>
66
+ hash_t Hash(const T& value) {
67
+ return DataHash(&value, sizeof(value));
68
+ }
69
+
70
+ // added because on macos builds the vector<bool> specialization
71
+ // breaks falling through to the templated arithmetic types above
72
+ hash_t TORCH_API Hash(const std::vector<bool>& value);
73
+
74
+ // Specialiazed implementations for proprietary types
75
+ static inline hash_t Hash(const c10::ScalarType& value) {
76
+ return DataHash(&value, sizeof(value));
77
+ }
78
+
79
+ static inline hash_t Hash(const c10::MemoryFormat& value) {
80
+ return DataHash(&value, sizeof(value));
81
+ }
82
+
83
+ static inline hash_t Hash(const c10::DeviceType& value) {
84
+ return DataHash(&value, sizeof(value));
85
+ }
86
+
87
+ static inline hash_t Hash(const c10::Device& value) {
88
+ return HashCombine(Hash(value.type()), Hash(value.index()));
89
+ }
90
+
91
+ static inline hash_t Hash(const c10::Layout& value) {
92
+ return DataHash(&value, sizeof(value));
93
+ }
94
+
95
+ static inline hash_t Hash(const c10::Scalar& value) {
96
+ switch (value.type()) {
97
+ case c10::ScalarType::ComplexDouble:
98
+ return Hash(value.toComplexDouble());
99
+ case c10::ScalarType::Double:
100
+ return Hash(value.toDouble());
101
+ case c10::ScalarType::Long:
102
+ return Hash(value.toLong());
103
+ case c10::ScalarType::Bool:
104
+ return Hash(value.toBool());
105
+ default:
106
+ TORCH_INTERNAL_ASSERT(false, "Unknown scalar type.", value.type());
107
+ }
108
+ }
109
+
110
+ static inline hash_t TensorHash(const at::Tensor& tensor) {
111
+ at::Tensor ctensor = tensor.contiguous();
112
+ int64_t size = ctensor.numel() * ctensor.element_size();
113
+ switch (ctensor.scalar_type()) {
114
+ case at::ScalarType::Bool:
115
+ return DataHash(ctensor.const_data_ptr<bool>(), size);
116
+ case at::ScalarType::Byte:
117
+ return DataHash(ctensor.const_data_ptr<uint8_t>(), size);
118
+ case at::ScalarType::Char:
119
+ return DataHash(ctensor.const_data_ptr<int8_t>(), size);
120
+ case at::ScalarType::Short:
121
+ return DataHash(ctensor.const_data_ptr<int16_t>(), size);
122
+ case at::ScalarType::Int:
123
+ return DataHash(ctensor.const_data_ptr<int32_t>(), size);
124
+ case at::ScalarType::Long:
125
+ return DataHash(ctensor.const_data_ptr<int64_t>(), size);
126
+ case at::ScalarType::Float:
127
+ return DataHash(ctensor.const_data_ptr<float>(), size);
128
+ case at::ScalarType::Double:
129
+ return DataHash(ctensor.const_data_ptr<double>(), size);
130
+ case at::ScalarType::BFloat16:
131
+ return DataHash(ctensor.const_data_ptr<at::BFloat16>(), size);
132
+ case at::ScalarType::Half:
133
+ return DataHash(ctensor.const_data_ptr<at::Half>(), size);
134
+ case at::ScalarType::ComplexFloat:
135
+ return DataHash(ctensor.const_data_ptr<c10::complex<float>>(), size);
136
+ case at::ScalarType::ComplexDouble:
137
+ return DataHash(ctensor.const_data_ptr<c10::complex<double>>(), size);
138
+ default:
139
+ TORCH_INTERNAL_ASSERT(
140
+ false, "Unsupported scalar type:", ctensor.scalar_type());
141
+ }
142
+ }
143
+
144
+ static inline hash_t Hash(const std::string& value) {
145
+ return DataHash(value.data(), value.size());
146
+ }
147
+
148
+ static inline hash_t Hash(const c10::string_view& value) {
149
+ return DataHash(value.data(), value.size());
150
+ }
151
+
152
+ static inline hash_t Hash(const at::Generator& value) {
153
+ return TensorHash(value.get_state());
154
+ }
155
+
156
+ // Taken from glibc's implementation of hashing optionals,
157
+ // we want to include a contribution to the hash to distinguish
158
+ // cases where one or another option was null, but we hope it doesn't
159
+ // collide with an actually scalar value.
160
+ //
161
+ // Use an arbitrary randomly-selected 64-bit integer rather than a
162
+ // small constant that we then hash at runtime so we don't have to
163
+ // repeatedly hash a constant at runtime.
164
+ static const int64_t kNullOpt = 0x8655d738f3678dda;
165
+
166
+ // Hashing for c10::optional types contributes to hash
167
+ // for optionals with null value, important to distinguish
168
+ // between <nullopt, non-nullopt> and <non-nullopt, nullopt> cases
169
+ template <typename T>
170
+ hash_t Hash(const c10::optional<T>& value) {
171
+ if (value.has_value()) {
172
+ return Hash(value.value());
173
+ } else {
174
+ return kNullOpt;
175
+ }
176
+ }
177
+
178
+ // Hashing of containers
179
+ // Forward declare to allow hashes of vectors of vectors to work.
180
+ template <typename T>
181
+ hash_t ContainerHash(const T& values);
182
+
183
+ template <typename T>
184
+ hash_t Hash(const std::vector<T>& values) {
185
+ return ContainerHash(values);
186
+ }
187
+
188
+ // Need a special case for optional<container>?
189
+ template <typename T>
190
+ hash_t Hash(const c10::optional<std::vector<T>>& value) {
191
+ if (value.has_value()) {
192
+ return ContainerHash(value.value());
193
+ } else {
194
+ return kNullOpt;
195
+ }
196
+ }
197
+
198
+ template <typename T>
199
+ hash_t Hash(const std::set<T>& values) {
200
+ return ContainerHash(values);
201
+ }
202
+
203
+ template <typename T, typename S>
204
+ hash_t Hash(const std::pair<T, S>& values) {
205
+ return HashCombine(Hash(values.first), Hash(values.second));
206
+ }
207
+
208
+ static inline hash_t Hash(const hash_t& value) {
209
+ return value;
210
+ }
211
+
212
+ template <typename T>
213
+ hash_t Hash(c10::ArrayRef<T> values) {
214
+ return ContainerHash(values);
215
+ }
216
+
217
+ template <typename T>
218
+ hash_t ContainerHash(const T& values) {
219
+ hash_t h(static_cast<uint64_t>(0x85ebca77c2b2ae63));
220
+ for (const auto& value : values) {
221
+ h = HashCombine(h, Hash(value));
222
+ }
223
+ return h;
224
+ }
225
+
226
+ // Varargs hashing
227
+ template <typename T = void>
228
+ hash_t MHash() {
229
+ return hash_t(static_cast<uint64_t>(0x165667b19e3779f9));
230
+ }
231
+
232
+ template <typename T, typename... Targs>
233
+ hash_t MHash(T value, Targs... Fargs) {
234
+ return HashCombine(Hash(value), MHash(Fargs...));
235
+ }
236
+
237
+ } // namespace lazy
238
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/helpers.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Scalar.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Half.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <torch/csrc/lazy/core/permutation_util.h>
8
+ #include <torch/csrc/lazy/core/shape.h>
9
+ #include <torch/csrc/lazy/core/util.h>
10
+
11
+ #include <complex>
12
+ #include <functional>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+ // TODO: Consolidate this file with util.h
17
+
18
+ namespace torch {
19
+ namespace lazy {
20
+
21
+ // Converts an iterable container to a vector of int64's.
22
+ template <typename S>
23
+ static std::vector<int64_t> ToI64Vector(const S& input) {
24
+ return ToVector<int64_t>(input);
25
+ }
26
+
27
+ // Creates a set of dimension by dropping the drop_dims ones.
28
+ TORCH_API std::vector<int64_t> DropDimensions(
29
+ c10::ArrayRef<int64_t> sizes,
30
+ c10::ArrayRef<int64_t> drop_dims);
31
+
32
+ // Get the canonical dimension index in the [0, rank) interval. Negative
33
+ // indices are interpreted as follows: -1 is rank-1, -2 is rank-2 etc.
34
+ TORCH_API int64_t GetCanonicalDimensionIndex(int64_t dim, int64_t rank);
35
+
36
+ // Same as above, for multiple dimensions.
37
+ TORCH_API std::vector<int64_t> GetCanonicalDimensionIndices(
38
+ c10::ArrayRef<int64_t> dimensions,
39
+ int64_t rank);
40
+
41
+ // Returns the canonical position in the dim dimension, handling negative
42
+ // values for the position.
43
+ TORCH_API int64_t GetCanonicalPosition(
44
+ c10::ArrayRef<int64_t> dimensions,
45
+ int64_t dim,
46
+ int64_t pos);
47
+
48
+ // Creates a transposition from the given input and dimensions.
49
+ TORCH_API std::vector<int64_t> MakeTransposePermutation(
50
+ int64_t dim0,
51
+ int64_t dim1,
52
+ int64_t rank);
53
+
54
+ // Calculates the protomoted shape to which the input shapes should be
55
+ // broadcasted for an elementwise operation. The size of the common dimensions
56
+ // (2,3,4 for shape1, and 0,1,2 for shape2) must either match, or either one
57
+ // of the two be 1.
58
+ // Example:
59
+ // shape1 = [9, 7, 6, 1, 2]
60
+ // shape2 = [6, 5, 2]
61
+ // result_shape = [9, 7, 6, 5, 2]
62
+ TORCH_API std::vector<int64_t> GetPromotedShape(
63
+ c10::ArrayRef<int64_t> shape1_dims,
64
+ c10::ArrayRef<int64_t> shape2_dims);
65
+
66
+ TORCH_API Shape
67
+ GetPromotedBinaryOpShape(const Shape& shape1, const Shape& shape2);
68
+
69
+ TORCH_API std::vector<std::string> StrSplit(c10::string_view text, char delim);
70
+
71
+ } // namespace lazy
72
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/ir.h>
4
+
5
+ #include <c10/util/CallOnce.h>
6
+
7
+ #include <mutex>
8
+ #include <string>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ class TORCH_API OpKindWrapper {
14
+ public:
15
+ explicit OpKindWrapper(const char* name) : name_(name) {}
16
+
17
+ const OpKind& operator*() const {
18
+ return get();
19
+ }
20
+
21
+ operator OpKind() const {
22
+ return get();
23
+ }
24
+
25
+ private:
26
+ const OpKind& get() const {
27
+ c10::call_once(once_, [this]() { op_kind_ = OpKind::Get(name_); });
28
+ return op_kind_;
29
+ }
30
+
31
+ const char* name_;
32
+ mutable OpKind op_kind_;
33
+ mutable c10::once_flag once_;
34
+ };
35
+
36
+ const OpKindWrapper ltc_all_to_all("lazy_tensors::all_to_all");
37
+ const OpKindWrapper ltc_cast("lazy_tensors::cast");
38
+ const OpKindWrapper ltc_collective_permute("lazy_tensors::collective_permute");
39
+ const OpKindWrapper ltc_cross_replica_sum("lazy_tensors::cross_replica_sum");
40
+ const OpKindWrapper ltc_device_data("lazy_tensors::device_data");
41
+ const OpKindWrapper ltc_get_dimensions_size(
42
+ "lazy_tensors::ltc_get_dimensions_size");
43
+ const OpKindWrapper ltc_moving_average("lazy_tensors::moving_average");
44
+ const OpKindWrapper ltc_nms("lazy_tensors::nms");
45
+ const OpKindWrapper ltc_not_supported("lazy_tensors::not_supported");
46
+ const OpKindWrapper ltc_replication_pad("lazy_tensors::replication_pad");
47
+ const OpKindWrapper ltc_replication_pad_backward(
48
+ "lazy_tensors::replication_pad_backward");
49
+ const OpKindWrapper ltc_tensor_data("lazy_tensors::tensor_data");
50
+
51
+ } // namespace lazy
52
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/ArrayRef.h>
16
+ #include <c10/util/Flags.h>
17
+ #include <torch/csrc/lazy/core/hash.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/core/shape.h>
20
+
21
+ C10_DECLARE_bool(ltc_enable_dynamic_shapes);
22
+
23
+ namespace torch {
24
+ namespace lazy {
25
+
26
+ static const hash_t kHashSeed(static_cast<uint32_t>(0x5a2d296e9));
27
+
28
+ class Node;
29
+ struct Output;
30
+ struct Value;
31
+
32
+ using NodePtr = std::shared_ptr<Node>;
33
+
34
+ // The Kind of operation a Node can be associated to.
35
+ struct TORCH_API OpKind {
36
+ OpKind() = default;
37
+ explicit OpKind(c10::Symbol op) : op(op) {}
38
+
39
+ bool operator==(const OpKind& rhs) const {
40
+ return op == rhs.op;
41
+ }
42
+ bool operator!=(const OpKind& rhs) const {
43
+ return !operator==(rhs);
44
+ }
45
+ bool operator<(const OpKind& rhs) const {
46
+ return c10::unique_t(op) < c10::unique_t(rhs.op);
47
+ }
48
+
49
+ hash_t hash() const;
50
+
51
+ std::string ToString() const {
52
+ return op.toQualString();
53
+ }
54
+
55
+ // Retrieves an existing operation object, or creates a new one. Operations
56
+ // that are specific to lazy tensors, should live within the 'lazy_tensors::'
57
+ // namespace.
58
+ static OpKind Get(const std::string& name);
59
+
60
+ c10::Symbol op;
61
+ };
62
+
63
+ inline std::ostream& operator<<(std::ostream& stream, const OpKind& op) {
64
+ stream << op.ToString();
65
+ return stream;
66
+ }
67
+
68
+ using OpList = c10::ArrayRef<Value>;
69
+
70
+ hash_t OperandHashes(
71
+ const OpList& operands,
72
+ const hash_t& seed,
73
+ bool bakeInSizes);
74
+ // A node in the graph. Nodes for operations which require extra data to be
75
+ // stored for lowering should inherit from this class and add an operation
76
+ // specific member there. For example, a constant might create a new
77
+ // NodeConstant class (inheriting from Node) with an extra lazy_tensors::Literal
78
+ // field, or a tensor value might create a new NodeTensor with a computation
79
+ // client data handle in it.
80
+ class TORCH_API Node {
81
+ public:
82
+ static bool enableDynamicShape();
83
+
84
+ // Creates a new node with the given op name. The op is a unique identifier
85
+ // for the operation. The num_outputs tells how many outputs a given operation
86
+ // generates.
87
+ //
88
+ // None leaf node's node_hash does not contains shape information always.
89
+ // So we pass in the hash value rather than a function.
90
+ Node(OpKind op, size_t num_outputs);
91
+
92
+ // Construct node with operands and shapes
93
+ Node(
94
+ OpKind op,
95
+ OpList operands,
96
+ std::vector<Shape>&& shapes,
97
+ size_t num_outputs = 1);
98
+
99
+ // Construct node with operands and shape generated from a function
100
+ Node(
101
+ OpKind op,
102
+ OpList operands,
103
+ const std::function<Shape()>& shape_fn,
104
+ size_t num_outputs = 1);
105
+
106
+ // Construct node with operands and no shape
107
+ Node(OpKind op, OpList operands, size_t num_outputs = 1);
108
+
109
+ // Construct node with shape and no operands
110
+ Node(OpKind op, Shape shape, size_t num_outputs = 1);
111
+
112
+ virtual ~Node();
113
+
114
+ const OpKind& op() const {
115
+ return op_;
116
+ }
117
+
118
+ size_t num_outputs() const {
119
+ return num_outputs_;
120
+ }
121
+
122
+ // Retrieves the full shape of the IR Node.
123
+ virtual c10::ArrayRef<Shape> shapes() const;
124
+
125
+ virtual const Shape& shape(size_t output_index = 0) const;
126
+
127
+ // Add the shape computed by the shape_fn
128
+ void addComputedShape(const std::function<Shape()>& shape_fn);
129
+
130
+ // Compute the shape using the provided shape_fn if not previously cached
131
+ Shape computeShape(const std::function<Shape()>& shape_fn);
132
+
133
+ virtual const std::vector<Output>& operands() const;
134
+
135
+ virtual const Output& operand(size_t i) const;
136
+
137
+ // Gets operand at index i if index is valid, or kNullOutput otherwise.
138
+ virtual const Output& nullable_operand(size_t i) const;
139
+
140
+ // Returns the hash of the dag used to look up the compiled graph
141
+ virtual hash_t hash() const = 0;
142
+
143
+ // Returns the hash of the dag used to for shape caching
144
+ virtual hash_t shapeHash() const = 0;
145
+
146
+ const MetaData& metadata() const {
147
+ return metadata_;
148
+ }
149
+
150
+ UserMetaData* user_metadata() const {
151
+ return user_metadata_.get();
152
+ }
153
+
154
+ std::shared_ptr<UserMetaData> SetUserMetadata(
155
+ std::shared_ptr<UserMetaData> user_meta) {
156
+ std::swap(user_metadata_, user_meta);
157
+ return user_meta;
158
+ }
159
+
160
+ virtual std::string ToString() const;
161
+
162
+ private:
163
+ // The ID of the operation captured by this node.
164
+ OpKind op_;
165
+ size_t num_outputs_ = 1;
166
+
167
+ // The IR specific metadata attached to the IR node.
168
+ MetaData metadata_;
169
+ // The IR framework user can attach a user defined metadata object deriving
170
+ // from UserMetaData.
171
+ std::shared_ptr<UserMetaData> user_metadata_;
172
+
173
+ protected:
174
+ // Adds node's index output number as operand.
175
+ void AddOperand(NodePtr node, size_t index = 0);
176
+
177
+ std::vector<Shape> shapes_;
178
+ // A node holds a real reference to its operands.
179
+ std::vector<NodePtr> operands_;
180
+ // Outputs do not hold references on the nodes, and neither do the uses, since
181
+ // otherwise we get into circular reference counting.
182
+ std::vector<Output> operands_as_outputs_;
183
+ };
184
+
185
+ inline std::ostream& operator<<(std::ostream& stream, const Node& node) {
186
+ stream << node.ToString();
187
+ return stream;
188
+ }
189
+
190
+ // Note: Keep this version of NodeCast for smooth PyTorch/XLA migration, and
191
+ // clean up once the migration is done.
192
+ template <typename T>
193
+ const T* NodeCast(const Node* node, OpKind op) {
194
+ if (op != node->op()) {
195
+ return nullptr;
196
+ }
197
+ #ifdef NDEBUG
198
+ return static_cast<const T*>(node);
199
+ #else
200
+ return &dynamic_cast<const T&>(*node);
201
+ #endif
202
+ }
203
+
204
+ template <typename T>
205
+ const T* NodeCast(const Node* node) {
206
+ if (T::ClassOpKind() != node->op()) {
207
+ return nullptr;
208
+ }
209
+ // TODO: Some IR classes share the same opkind, such as Mean and MeanDim, so
210
+ // static_cast is not safe here. Unless we have opkind unique for each class,
211
+ // we have to use dynamic_cast here.
212
+ return dynamic_cast<const T*>(node);
213
+ }
214
+
215
+ // Represents a specific output produced by a node. Since the output of a node
216
+ // can be composed by multiple outputs, the node+index coordinates fully qualify
217
+ // each single output.
218
+ struct TORCH_API Output {
219
+ struct Hasher {
220
+ size_t operator()(const Output& output) const;
221
+ };
222
+
223
+ Output() = default;
224
+ explicit Output(const Node* node, size_t index = 0)
225
+ : node(node), index(index) {}
226
+
227
+ hash_t hash() const;
228
+ hash_t shapeHash() const;
229
+
230
+ bool operator==(const Output& rhs) const {
231
+ return node == rhs.node && index == rhs.index;
232
+ }
233
+
234
+ // To compare the operands of to-be-constructed node and to-be-reused node
235
+ bool operator==(const Value& rhs) const;
236
+
237
+ bool operator!=(const Output& rhs) const {
238
+ return !operator==(rhs);
239
+ }
240
+
241
+ const Shape& shape() const {
242
+ return node->shape(index);
243
+ }
244
+
245
+ std::string ToString() const;
246
+
247
+ // The node providing the output.
248
+ const Node* node{nullptr};
249
+ // The index in the node's output this output refers to.
250
+ size_t index{0};
251
+ };
252
+
253
+ inline std::ostream& operator<<(std::ostream& stream, const Output& output) {
254
+ stream << output.ToString();
255
+ return stream;
256
+ }
257
+
258
+ template <typename T>
259
+ using OutputMap = std::unordered_map<Output, T, Output::Hasher>;
260
+
261
+ // Represents an input/operand for a Node object.
262
+ struct TORCH_API Value {
263
+ Value() = default;
264
+ /* implicit */ Value(NodePtr&& node, size_t index = 0)
265
+ : node(std::move(node)), index(index) {}
266
+ /* implicit */ Value(const NodePtr& node, size_t index = 0)
267
+ : node(node), index(index) {}
268
+
269
+ hash_t hash() const;
270
+ hash_t shapeHash() const;
271
+
272
+ operator bool() const {
273
+ return node != nullptr;
274
+ }
275
+
276
+ operator Output() const {
277
+ return Output(node.get(), index);
278
+ }
279
+
280
+ const Shape& shape() const {
281
+ return node->shape(index);
282
+ }
283
+
284
+ Node* operator->() const {
285
+ return node.get();
286
+ }
287
+
288
+ NodePtr node;
289
+ size_t index = 0;
290
+ };
291
+
292
+ } // namespace lazy
293
+ } // namespace torch
294
+
295
+ namespace c10 {
296
+ // Explicit template instantiation to make ArrayRef<Value> work
297
+ template class at::ArrayRef<torch::lazy::Value>;
298
+ } // namespace c10
venv/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_builder.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <torch/csrc/lazy/backend/backend_interface.h>
6
+ #include <torch/csrc/lazy/core/config.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/tensor.h>
9
+ #include <torch/csrc/lazy/core/trie.h>
10
+ #include <vector>
11
+
12
+ // This file is part of the backend interface. So, ops shouldn't be added or
13
+ // removed without due process The exception to this being the view ops which
14
+ // will be removed soon pending functionalization
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ template <typename T, typename... Args>
20
+ NodePtr ReuseNode(Args&&... args) {
21
+ if (FLAGS_torch_lazy_reuse_ir) {
22
+ return LookupNodeFromTrieCache<T>(std::forward<Args>(args)...);
23
+ }
24
+ return nullptr;
25
+ }
26
+
27
+ // Caching an IR node into TrieCache
28
+ static inline void CacheNode(NodePtr node) {
29
+ if (FLAGS_torch_lazy_reuse_ir) {
30
+ TrieCache::Get()->Insert(std::move(node));
31
+ }
32
+ }
33
+
34
+ template <typename T, typename... Args>
35
+ NodePtr MakeNode(Args&&... args) {
36
+ return std::make_shared<T>(std::forward<Args>(args)...);
37
+ }
38
+
39
+ // op is passed in for a more efficient node casting, see the implementation of
40
+ // NodeCast
41
+ template <typename T, typename... Args>
42
+ NodePtr ReuseOrMakeNode(Args&&... args) {
43
+ NodePtr node = ReuseNode<T>(std::forward<Args>(args)...);
44
+ if (!node) {
45
+ node = MakeNode<T>(std::forward<Args>(args)...);
46
+ CacheNode(node);
47
+ }
48
+ return node;
49
+ }
50
+
51
+ struct IrBuilder {
52
+ virtual NodePtr MakeDeviceData(
53
+ const std::shared_ptr<BackendData>& data) const = 0;
54
+ virtual NodePtr MakeScalar(
55
+ const at::Scalar& value,
56
+ const at::ScalarType& type) const = 0;
57
+ virtual NodePtr MakeExpand(
58
+ const Value& input0,
59
+ const std::vector<int64_t>& size,
60
+ const bool& is_scalar_expand) const = 0;
61
+ virtual NodePtr MakeCast(
62
+ const Value& input0,
63
+ const at::ScalarType& dtype,
64
+ const c10::optional<at::ScalarType>& stype = c10::nullopt) const = 0;
65
+ virtual NodePtr MakeTensorList(const OpList& inputs) const = 0;
66
+ virtual NodePtr MakeGeneric(
67
+ const OpKind& op,
68
+ const OpList& operands,
69
+ const Shape& shape,
70
+ const size_t& num_outputs = 1,
71
+ const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) const = 0;
72
+
73
+ // dynamic ir nodes
74
+ virtual NodePtr MakeSizeNode(const Value& input, size_t dim) const = 0;
75
+ virtual NodePtr MakeSizeAdd(const Value& a, const Value& b) const = 0;
76
+ virtual NodePtr MakeSizeMul(const Value& a, const Value& b) const = 0;
77
+ virtual NodePtr MakeSizeDiv(const Value& a, const Value& b) const = 0;
78
+
79
+ virtual ~IrBuilder() = default;
80
+ };
81
+
82
+ static inline NodePtr MakeDeviceData(const std::shared_ptr<BackendData>& data) {
83
+ return getIrBuilder()->MakeDeviceData(data);
84
+ }
85
+ static inline NodePtr MakeScalar(
86
+ const at::Scalar& value,
87
+ const at::ScalarType& type) {
88
+ return getIrBuilder()->MakeScalar(value, type);
89
+ }
90
+ static inline NodePtr MakeExpand(
91
+ const Value& input0,
92
+ const std::vector<int64_t>& size,
93
+ const bool& is_scalar_expand) {
94
+ return getIrBuilder()->MakeExpand(input0, size, is_scalar_expand);
95
+ }
96
+ static inline NodePtr MakeCast(
97
+ const Value& input0,
98
+ const at::ScalarType& dtype,
99
+ const c10::optional<at::ScalarType>& stype = c10::nullopt) {
100
+ return getIrBuilder()->MakeCast(input0, dtype, stype);
101
+ }
102
+ static inline NodePtr MakeTensorList(const OpList& inputs) {
103
+ return getIrBuilder()->MakeTensorList(inputs);
104
+ }
105
+ static inline NodePtr MakeGeneric(
106
+ const OpKind& op,
107
+ const OpList& operands,
108
+ const Shape& shape,
109
+ const size_t& num_outputs = 1,
110
+ const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) {
111
+ return getIrBuilder()->MakeGeneric(
112
+ op, operands, shape, num_outputs, hash_seed);
113
+ }
114
+
115
+ // dynamic ir nodes
116
+ static inline NodePtr MakeSizeNode(const Value& input, size_t dim) {
117
+ return getIrBuilder()->MakeSizeNode(input, dim);
118
+ }
119
+ static inline NodePtr MakeSizeAdd(const Value& a, const Value& b) {
120
+ return getIrBuilder()->MakeSizeAdd(a, b);
121
+ }
122
+ static inline NodePtr MakeSizeMul(const Value& a, const Value& b) {
123
+ return getIrBuilder()->MakeSizeAdd(a, b);
124
+ }
125
+ static inline NodePtr MakeSizeDiv(const Value& a, const Value& b) {
126
+ return getIrBuilder()->MakeSizeDiv(a, b);
127
+ }
128
+
129
+ inline Value GetSymIntValue(c10::SymInt a) {
130
+ if (auto ma = a.maybe_as_int()) {
131
+ return Value(MakeScalar(*ma, at::kLong), 0);
132
+ } else {
133
+ return Value(
134
+ dynamic_cast<torch::lazy::SymNodeImpl*>(a.toSymNodeImplUnowned())
135
+ ->node_,
136
+ 0);
137
+ }
138
+ }
139
+
140
+ // TODO: this should return Value
141
+ inline std::vector<int64_t> GetSymIntArrayRefValue(c10::SymIntArrayRef arr) {
142
+ std::vector<int64_t> r;
143
+ for (const auto& a : arr) {
144
+ r.emplace_back(a.guard_int(__FILE__, __LINE__));
145
+ }
146
+ return r;
147
+ }
148
+
149
+ } // namespace lazy
150
+ } // namespace torch